hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d9abfe24a24423d51dd04a9f84591d30fdb33d58
| 160
|
py
|
Python
|
tests/no_sys_import.py
|
gflaherty/rules_pyz
|
900546b12e880f272c82534cdd2423dcb109e795
|
[
"Apache-2.0"
] | 26
|
2018-01-29T23:28:45.000Z
|
2020-12-01T21:30:32.000Z
|
tests/no_sys_import.py
|
gflaherty/rules_pyz
|
900546b12e880f272c82534cdd2423dcb109e795
|
[
"Apache-2.0"
] | 28
|
2018-03-31T19:41:12.000Z
|
2019-01-30T21:57:43.000Z
|
tests/no_sys_import.py
|
gflaherty/rules_pyz
|
900546b12e880f272c82534cdd2423dcb109e795
|
[
"Apache-2.0"
] | 17
|
2018-03-08T21:30:22.000Z
|
2019-11-22T17:16:00.000Z
|
#!/usr/bin/env python2.7
# should fail to execute: does not import sys
print 'sys.path:'
print sys.path
print os.path.dirname
print os.environ
print zipimport
| 17.777778
| 45
| 0.7625
| 28
| 160
| 4.357143
| 0.678571
| 0.131148
| 0.196721
| 0.278689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.1375
| 160
| 8
| 46
| 20
| 0.869565
| 0.41875
| 0
| 0
| 0
| 0
| 0.098901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.2
| null | null | 1
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
d9bffdb7fe0afade4ed497dc6995b676e124a804
| 137
|
py
|
Python
|
homebot/listener/__init__.py
|
HazardDede/homebot
|
7a44f5470bdd84c1e7660cf48955d44a9e4c317a
|
[
"MIT"
] | null | null | null |
homebot/listener/__init__.py
|
HazardDede/homebot
|
7a44f5470bdd84c1e7660cf48955d44a9e4c317a
|
[
"MIT"
] | null | null | null |
homebot/listener/__init__.py
|
HazardDede/homebot
|
7a44f5470bdd84c1e7660cf48955d44a9e4c317a
|
[
"MIT"
] | null | null | null |
"""Listener package."""
from homebot.listener.base import Listener
from homebot.listener import slack
__all__ = ['slack', 'Listener']
| 17.125
| 42
| 0.744526
| 16
| 137
| 6.125
| 0.5
| 0.22449
| 0.387755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124088
| 137
| 7
| 43
| 19.571429
| 0.816667
| 0.124088
| 0
| 0
| 0
| 0
| 0.114035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9dc4fa9d72065523dd26d37e52035acfceea673
| 215
|
py
|
Python
|
torchbenchmark/models/dlrm/install.py
|
Chillee/benchmark
|
91e1b2871327e44b9b7d24d173ca93720fb6565b
|
[
"BSD-3-Clause"
] | null | null | null |
torchbenchmark/models/dlrm/install.py
|
Chillee/benchmark
|
91e1b2871327e44b9b7d24d173ca93720fb6565b
|
[
"BSD-3-Clause"
] | null | null | null |
torchbenchmark/models/dlrm/install.py
|
Chillee/benchmark
|
91e1b2871327e44b9b7d24d173ca93720fb6565b
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
| 23.888889
| 93
| 0.716279
| 25
| 215
| 5.64
| 0.64
| 0.212766
| 0.312057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125581
| 215
| 8
| 94
| 26.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.176744
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8a0a92697e30046db57ab947797f3dc188141af2
| 34,412
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/flowrange_2fd5d93183298b6434d0d9d422fee83c.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/flowrange_2fd5d93183298b6434d0d9d422fee83c.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/flowrange_2fd5d93183298b6434d0d9d422fee83c.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class FlowRange(Base):
"""This object allows you to define the number of Flow Ranges for this Interface.
The FlowRange class encapsulates a list of flowRange resources that are managed by the user.
A list of resources can be retrieved from the server using the FlowRange.find() method.
The list can be managed by using the FlowRange.add() and FlowRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'flowRange'
_SDM_ATT_MAP = {
'CheckOverlap': 'checkOverlap',
'Description': 'description',
'DontAddOnChannelUp': 'dontAddOnChannelUp',
'EmergencyFlow': 'emergencyFlow',
'Enabled': 'enabled',
'EthernetDestination': 'ethernetDestination',
'EthernetSource': 'ethernetSource',
'EthernetType': 'ethernetType',
'FlowModStatus': 'flowModStatus',
'HardTimeout': 'hardTimeout',
'IdleTimeout': 'idleTimeout',
'InPort': 'inPort',
'IpDscp': 'ipDscp',
'IpProtocol': 'ipProtocol',
'Ipv4Destination': 'ipv4Destination',
'Ipv4Source': 'ipv4Source',
'MatchType': 'matchType',
'Priority': 'priority',
'SendFlowRemoved': 'sendFlowRemoved',
'TotalFlowCount': 'totalFlowCount',
'TransportDestinationIcmpCode': 'transportDestinationIcmpCode',
'TransportSourceIcmpType': 'transportSourceIcmpType',
'VlanId': 'vlanId',
'VlanPriority': 'vlanPriority',
}
_SDM_ENUM_MAP = {
'matchType': ['strict', 'loose'],
}
def __init__(self, parent, list_op=False):
super(FlowRange, self).__init__(parent, list_op)
@property
def FlowRangeAction(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.flowrangeaction_03657c6035891000482947c3eb53c6ea.FlowRangeAction): An instance of the FlowRangeAction class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.flowrangeaction_03657c6035891000482947c3eb53c6ea import FlowRangeAction
if self._properties.get('FlowRangeAction', None) is not None:
return self._properties.get('FlowRangeAction')
else:
return FlowRangeAction(self)
@property
def CheckOverlap(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, Ixia enables the Check Overlap flag while sending OpenFlow flow modification messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['CheckOverlap'])
@CheckOverlap.setter
def CheckOverlap(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['CheckOverlap'], value)
@property
def Description(self):
# type: () -> str
"""
Returns
-------
- str: A name that describes the Flow Range.
"""
return self._get_attribute(self._SDM_ATT_MAP['Description'])
@Description.setter
def Description(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Description'], value)
@property
def DontAddOnChannelUp(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, no flow add or delete packet is sent out when OpenFlow channel comes up or when flow entry is enabled/disabled in the IxNetwork GUI. This facility is useful to send flow add,delete, and modify for ad-hoc flows through Test Composer.
"""
return self._get_attribute(self._SDM_ATT_MAP['DontAddOnChannelUp'])
@DontAddOnChannelUp.setter
def DontAddOnChannelUp(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['DontAddOnChannelUp'], value)
@property
def EmergencyFlow(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, Ixia enables the Emergency flag while sending OpenFlow flow modification messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['EmergencyFlow'])
@EmergencyFlow.setter
def EmergencyFlow(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EmergencyFlow'], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, enables the flow Range object in the protocol.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def EthernetDestination(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the Ethernet destination address for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['EthernetDestination'])
@EthernetDestination.setter
def EthernetDestination(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['EthernetDestination'], value)
@property
def EthernetSource(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the Ethernet source address for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['EthernetSource'])
@EthernetSource.setter
def EthernetSource(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['EthernetSource'], value)
@property
def EthernetType(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the type of Ethernet to be used. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['EthernetType'])
@EthernetType.setter
def EthernetType(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['EthernetType'], value)
@property
def FlowModStatus(self):
# type: () -> str
"""
Returns
-------
- str: Reflects the status of the selected flow range which is modified at runtime.
"""
return self._get_attribute(self._SDM_ATT_MAP['FlowModStatus'])
@property
def HardTimeout(self):
# type: () -> int
"""
Returns
-------
- number: Indicates the inactive time in seconds after which the Flow range will hard timeout and close.
"""
return self._get_attribute(self._SDM_ATT_MAP['HardTimeout'])
@HardTimeout.setter
def HardTimeout(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['HardTimeout'], value)
@property
def IdleTimeout(self):
# type: () -> int
"""
Returns
-------
- number: Indicates the inactive time in seconds after which the Flow range will timeout and become idle.
"""
return self._get_attribute(self._SDM_ATT_MAP['IdleTimeout'])
@IdleTimeout.setter
def IdleTimeout(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['IdleTimeout'], value)
@property
def InPort(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the In port value for flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['InPort'])
@InPort.setter
def InPort(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InPort'], value)
@property
def IpDscp(self):
# type: () -> str
"""
Returns
-------
- str: Specifies the IP DSCP value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpDscp'])
@IpDscp.setter
def IpDscp(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['IpDscp'], value)
@property
def IpProtocol(self):
# type: () -> str
"""
Returns
-------
- str: Specifies the IP Protocol to be used for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpProtocol'])
@IpProtocol.setter
def IpProtocol(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['IpProtocol'], value)
@property
def Ipv4Destination(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the IPv4 destination address mask value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv4Destination'])
@Ipv4Destination.setter
def Ipv4Destination(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Ipv4Destination'], value)
@property
def Ipv4Source(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the IPv4 source address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv4Source'])
@Ipv4Source.setter
def Ipv4Source(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Ipv4Source'], value)
@property
def MatchType(self):
# type: () -> str
"""
Returns
-------
- str(strict | loose): Indicates the type of match to be configured.
"""
return self._get_attribute(self._SDM_ATT_MAP['MatchType'])
@MatchType.setter
def MatchType(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['MatchType'], value)
@property
def Priority(self):
# type: () -> int
"""
Returns
-------
- number: Indicates the priority level for the Flow Range.
"""
return self._get_attribute(self._SDM_ATT_MAP['Priority'])
@Priority.setter
def Priority(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Priority'], value)
@property
def SendFlowRemoved(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, Ixia enables the Send Flow Removed flag while sending OpenFlow flow modification messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['SendFlowRemoved'])
@SendFlowRemoved.setter
def SendFlowRemoved(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SendFlowRemoved'], value)
@property
def TotalFlowCount(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the number of flows.
"""
return self._get_attribute(self._SDM_ATT_MAP['TotalFlowCount'])
@TotalFlowCount.setter
def TotalFlowCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['TotalFlowCount'], value)
@property
def TransportDestinationIcmpCode(self):
# type: () -> str
"""
Returns
-------
- str: Specifies the Transport destination address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['TransportDestinationIcmpCode'])
@TransportDestinationIcmpCode.setter
def TransportDestinationIcmpCode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TransportDestinationIcmpCode'], value)
@property
def TransportSourceIcmpType(self):
# type: () -> str
"""
Returns
-------
- str: Specifies the Transport Source address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['TransportSourceIcmpType'])
@TransportSourceIcmpType.setter
def TransportSourceIcmpType(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TransportSourceIcmpType'], value)
@property
def VlanId(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the VLAN identifier value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanId'])
@VlanId.setter
def VlanId(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['VlanId'], value)
@property
def VlanPriority(self):
# type: () -> str
"""
Returns
-------
- str: Indicates the VLAN priority value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanPriority'])
@VlanPriority.setter
def VlanPriority(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['VlanPriority'], value)
def update(self, CheckOverlap=None, Description=None, DontAddOnChannelUp=None, EmergencyFlow=None, Enabled=None, EthernetDestination=None, EthernetSource=None, EthernetType=None, HardTimeout=None, IdleTimeout=None, InPort=None, IpDscp=None, IpProtocol=None, Ipv4Destination=None, Ipv4Source=None, MatchType=None, Priority=None, SendFlowRemoved=None, TotalFlowCount=None, TransportDestinationIcmpCode=None, TransportSourceIcmpType=None, VlanId=None, VlanPriority=None):
# type: (bool, str, bool, bool, bool, str, str, str, int, int, str, str, str, str, str, str, int, bool, int, str, str, str, str) -> FlowRange
"""Updates flowRange resource on the server.
Args
----
- CheckOverlap (bool): If true, Ixia enables the Check Overlap flag while sending OpenFlow flow modification messages.
- Description (str): A name that describes the Flow Range.
- DontAddOnChannelUp (bool): If true, no flow add or delete packet is sent out when OpenFlow channel comes up or when flow entry is enabled/disabled in the IxNetwork GUI. This facility is useful to send flow add,delete, and modify for ad-hoc flows through Test Composer.
- EmergencyFlow (bool): If true, Ixia enables the Emergency flag while sending OpenFlow flow modification messages.
- Enabled (bool): If true, enables the flow Range object in the protocol.
- EthernetDestination (str): Indicates the Ethernet destination address for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- EthernetSource (str): Indicates the Ethernet source address for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- EthernetType (str): Indicates the type of Ethernet to be used. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- HardTimeout (number): Indicates the inactive time in seconds after which the Flow range will hard timeout and close.
- IdleTimeout (number): Indicates the inactive time in seconds after which the Flow range will timeout and become idle.
- InPort (str): Indicates the In port value for flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- IpDscp (str): Specifies the IP DSCP value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- IpProtocol (str): Specifies the IP Protocol to be used for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- Ipv4Destination (str): Indicates the IPv4 destination address mask value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- Ipv4Source (str): Indicates the IPv4 source address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- MatchType (str(strict | loose)): Indicates the type of match to be configured.
- Priority (number): Indicates the priority level for the Flow Range.
- SendFlowRemoved (bool): If true, Ixia enables the Send Flow Removed flag while sending OpenFlow flow modification messages.
- TotalFlowCount (number): Specifies the number of flows.
- TransportDestinationIcmpCode (str): Specifies the Transport destination address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- TransportSourceIcmpType (str): Specifies the Transport Source address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- VlanId (str): Indicates the VLAN identifier value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- VlanPriority (str): Indicates the VLAN priority value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, CheckOverlap=None, Description=None, DontAddOnChannelUp=None, EmergencyFlow=None, Enabled=None, EthernetDestination=None, EthernetSource=None, EthernetType=None, HardTimeout=None, IdleTimeout=None, InPort=None, IpDscp=None, IpProtocol=None, Ipv4Destination=None, Ipv4Source=None, MatchType=None, Priority=None, SendFlowRemoved=None, TotalFlowCount=None, TransportDestinationIcmpCode=None, TransportSourceIcmpType=None, VlanId=None, VlanPriority=None):
# type: (bool, str, bool, bool, bool, str, str, str, int, int, str, str, str, str, str, str, int, bool, int, str, str, str, str) -> FlowRange
"""Adds a new flowRange resource on the server and adds it to the container.
Args
----
- CheckOverlap (bool): If true, Ixia enables the Check Overlap flag while sending OpenFlow flow modification messages.
- Description (str): A name that describes the Flow Range.
- DontAddOnChannelUp (bool): If true, no flow add or delete packet is sent out when OpenFlow channel comes up or when flow entry is enabled/disabled in the IxNetwork GUI. This facility is useful to send flow add,delete, and modify for ad-hoc flows through Test Composer.
- EmergencyFlow (bool): If true, Ixia enables the Emergency flag while sending OpenFlow flow modification messages.
- Enabled (bool): If true, enables the flow Range object in the protocol.
- EthernetDestination (str): Indicates the Ethernet destination address for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- EthernetSource (str): Indicates the Ethernet source address for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- EthernetType (str): Indicates the type of Ethernet to be used. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- HardTimeout (number): Indicates the inactive time in seconds after which the Flow range will hard timeout and close.
- IdleTimeout (number): Indicates the inactive time in seconds after which the Flow range will timeout and become idle.
- InPort (str): Indicates the In port value for flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- IpDscp (str): Specifies the IP DSCP value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- IpProtocol (str): Specifies the IP Protocol to be used for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- Ipv4Destination (str): Indicates the IPv4 destination address mask value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- Ipv4Source (str): Indicates the IPv4 source address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- MatchType (str(strict | loose)): Indicates the type of match to be configured.
- Priority (number): Indicates the priority level for the Flow Range.
- SendFlowRemoved (bool): If true, Ixia enables the Send Flow Removed flag while sending OpenFlow flow modification messages.
- TotalFlowCount (number): Specifies the number of flows.
- TransportDestinationIcmpCode (str): Specifies the Transport destination address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- TransportSourceIcmpType (str): Specifies the Transport Source address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- VlanId (str): Indicates the VLAN identifier value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- VlanPriority (str): Indicates the VLAN priority value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
Returns
-------
- self: This instance with all currently retrieved flowRange resources using find and the newly added flowRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained flowRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, CheckOverlap=None, Description=None, DontAddOnChannelUp=None, EmergencyFlow=None, Enabled=None, EthernetDestination=None, EthernetSource=None, EthernetType=None, FlowModStatus=None, HardTimeout=None, IdleTimeout=None, InPort=None, IpDscp=None, IpProtocol=None, Ipv4Destination=None, Ipv4Source=None, MatchType=None, Priority=None, SendFlowRemoved=None, TotalFlowCount=None, TransportDestinationIcmpCode=None, TransportSourceIcmpType=None, VlanId=None, VlanPriority=None):
# type: (bool, str, bool, bool, bool, str, str, str, str, int, int, str, str, str, str, str, str, int, bool, int, str, str, str, str) -> FlowRange
"""Finds and retrieves flowRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve flowRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all flowRange resources from the server.
Args
----
- CheckOverlap (bool): If true, Ixia enables the Check Overlap flag while sending OpenFlow flow modification messages.
- Description (str): A name that describes the Flow Range.
- DontAddOnChannelUp (bool): If true, no flow add or delete packet is sent out when OpenFlow channel comes up or when flow entry is enabled/disabled in the IxNetwork GUI. This facility is useful to send flow add,delete, and modify for ad-hoc flows through Test Composer.
- EmergencyFlow (bool): If true, Ixia enables the Emergency flag while sending OpenFlow flow modification messages.
- Enabled (bool): If true, enables the flow Range object in the protocol.
- EthernetDestination (str): Indicates the Ethernet destination address for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- EthernetSource (str): Indicates the Ethernet source address for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- EthernetType (str): Indicates the type of Ethernet to be used. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- FlowModStatus (str): Reflects the status of the selected flow range which is modified at runtime.
- HardTimeout (number): Indicates the inactive time in seconds after which the Flow range will hard timeout and close.
- IdleTimeout (number): Indicates the inactive time in seconds after which the Flow range will timeout and become idle.
- InPort (str): Indicates the In port value for flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- IpDscp (str): Specifies the IP DSCP value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- IpProtocol (str): Specifies the IP Protocol to be used for the flow range. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- Ipv4Destination (str): Indicates the IPv4 destination address mask value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- Ipv4Source (str): Indicates the IPv4 source address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- MatchType (str(strict | loose)): Indicates the type of match to be configured.
- Priority (number): Indicates the priority level for the Flow Range.
- SendFlowRemoved (bool): If true, Ixia enables the Send Flow Removed flag while sending OpenFlow flow modification messages.
- TotalFlowCount (number): Specifies the number of flows.
- TransportDestinationIcmpCode (str): Specifies the Transport destination address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- TransportSourceIcmpType (str): Specifies the Transport Source address. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- VlanId (str): Indicates the VLAN identifier value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
- VlanPriority (str): Indicates the VLAN priority value. This attribute is of string type and can take wildcard as input. It is composed of sub-attributes like, startValue, stepValue, repeatCount, wrapCount, and incrementMode.
Returns
-------
- self: This instance with matching flowRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of flowRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the flowRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def UpdateFlowMod(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the updateFlowMod operation on the server.
NOT DEFINED
updateFlowMod(Arg2=enum, async_operation=bool)bool
--------------------------------------------------
- Arg2 (str(sendFlowAdd | sendFlowModify | sendFlowRemove)): NOT DEFINED
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('updateFlowMod', payload=payload, response_object=None)
| 58.52381
| 490
| 0.692694
| 4,179
| 34,412
| 5.634841
| 0.085666
| 0.012995
| 0.019492
| 0.027603
| 0.794165
| 0.781722
| 0.755775
| 0.751869
| 0.732928
| 0.718108
| 0
| 0.003656
| 0.221086
| 34,412
| 587
| 491
| 58.623509
| 0.874869
| 0.636929
| 0
| 0.125
| 0
| 0
| 0.123397
| 0.019666
| 0
| 0
| 0
| 0
| 0
| 1
| 0.275
| false
| 0
| 0.02
| 0
| 0.475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a0e3b7c0375d93791d5077e5ce254bee9064784
| 10,118
|
py
|
Python
|
tests/agent/test_agent.py
|
dazzag24/prefect
|
9d36c989c95cbbed091b071932553286edf25bb6
|
[
"Apache-2.0"
] | null | null | null |
tests/agent/test_agent.py
|
dazzag24/prefect
|
9d36c989c95cbbed091b071932553286edf25bb6
|
[
"Apache-2.0"
] | null | null | null |
tests/agent/test_agent.py
|
dazzag24/prefect
|
9d36c989c95cbbed091b071932553286edf25bb6
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import MagicMock
import pytest
from prefect.agent import Agent
from prefect.engine.state import Scheduled
from prefect.utilities.configuration import set_temporary_config
from prefect.utilities.exceptions import AuthorizationError
from prefect.utilities.graphql import GraphQLResult
def test_agent_init(runner_token):
agent = Agent()
assert agent
def test_agent_config_options(runner_token):
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = Agent()
assert agent.client.get_auth_token() == "TEST_TOKEN"
assert agent.logger
def test_agent_log_level(runner_token):
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = Agent()
assert agent.logger.level == 20
def test_agent_log_level_debug(runner_token):
with set_temporary_config(
{"cloud.agent.auth_token": "TEST_TOKEN", "cloud.agent.level": "DEBUG"}
):
agent = Agent()
assert agent.logger.level == 10
def test_agent_fails_no_auth_token():
with pytest.raises(AuthorizationError):
agent = Agent()
agent.query_tenant_id()
def test_agent_fails_no_runner_token(monkeypatch):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(
return_value=dict(data=dict(authInfo=MagicMock(apiTokenScope="USER")))
)
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with pytest.raises(AuthorizationError):
agent = Agent()
agent.query_tenant_id()
def test_query_tenant_id(monkeypatch, runner_token):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(tenant=[dict(id="id")])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
agent = Agent()
tenant_id = agent.query_tenant_id()
assert tenant_id == "id"
def test_query_tenant_id_not_found(monkeypatch, runner_token):
post = MagicMock(
return_value=MagicMock(json=MagicMock(return_value=dict(data=dict(tenant=[]))))
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
agent = Agent()
tenant_id = agent.query_tenant_id()
assert not tenant_id
def test_query_flow_runs(monkeypatch, runner_token):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
getRunsInQueue=MagicMock(flow_run_ids=["id"]), flow_run=[{"id": "id"}]
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
flow_runs = agent.query_flow_runs(tenant_id="id")
assert flow_runs == [{"id": "id"}]
def test_update_states_passes_empty(monkeypatch, runner_token):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(set_flow_run_state=None, set_task_run_state=None)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
assert not agent.update_states(flow_runs=[])
def test_update_states_passes_no_task_runs(monkeypatch, runner_token):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(set_flow_run_state=None, set_task_run_state=None)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
assert not agent.update_states(
flow_runs=[
GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [],
}
)
]
)
def test_update_states_passes_task_runs(monkeypatch, runner_token):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(set_flow_run_state=None, set_task_run_state=None)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
assert not agent.update_states(
flow_runs=[
GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [
GraphQLResult(
{
"id": "id",
"version": 1,
"serialized_state": Scheduled().serialize(),
}
)
],
}
)
]
)
def test_deploy_flows_passes_base_agent(runner_token):
agent = Agent()
assert not agent.deploy_flows([])
def test_heartbeat_passes_base_agent(runner_token):
agent = Agent()
assert not agent.heartbeat()
def test_agent_connect(monkeypatch, runner_token):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(tenant=[dict(id="id")])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
agent = Agent()
assert agent.agent_connect() == "id"
def test_agent_connect_no_tenant_id(monkeypatch, runner_token):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=dict(data=dict(tenant=[dict(id=None)])))
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
agent = Agent()
with pytest.raises(ConnectionError):
assert agent.agent_connect()
def test_agent_process(monkeypatch, runner_token):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
set_flow_run_state=None,
set_task_run_state=None,
getRunsInQueue=MagicMock(flow_run_ids=["id"]),
flow_run=[
GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [
GraphQLResult(
{
"id": "id",
"version": 1,
"serialized_state": Scheduled().serialize(),
}
)
],
}
)
],
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
# Assert it doesn't return everything but all functions are called properly
agent = Agent()
assert agent.agent_process("id")
def test_agent_process_no_runs_found(monkeypatch, runner_token):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
set_flow_run_state=None,
set_task_run_state=None,
getRunsInQueue=MagicMock(flow_run_ids=["id"]),
flow_run=[],
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
# Assert it doesn't return everything but all functions are called properly
agent = Agent()
assert not agent.agent_process("id")
def test_agent_logs_flow_run_exceptions(monkeypatch, runner_token):
gql_return = MagicMock(
return_value=MagicMock(data=MagicMock(writeRunLog=MagicMock(success=True)))
)
client = MagicMock()
client.return_value.write_run_log = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", MagicMock(return_value=client))
agent = Agent()
agent._log_flow_run_exceptions(
flow_runs=[
GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [
GraphQLResult(
{
"id": "id",
"version": 1,
"serialized_state": Scheduled().serialize(),
}
)
],
}
)
],
exc=ValueError("Error Here"),
)
assert client.write_run_log.called
client.write_run_log.assert_called_with(
flow_run_id="id", level="ERROR", message="Error Here", name="agent"
)
def test_agent_logs_flow_run_exceptions_no_flow_runs(monkeypatch, runner_token):
gql_return = MagicMock(
return_value=MagicMock(data=MagicMock(writeRunLog=MagicMock(success=True)))
)
client = MagicMock()
client.return_value.write_run_log = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", MagicMock(return_value=client))
agent = Agent()
agent._log_flow_run_exceptions(flow_runs=[], exc=ValueError("Error Here"))
assert not client.write_run_log.called
def test_agent_process_raises_exception_and_logs(monkeypatch, runner_token):
client = MagicMock()
client.return_value.graphql.side_effect = ValueError("Error")
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
with pytest.raises(Exception):
agent.agent_process("id")
assert client.write_run_log.called
| 30.293413
| 87
| 0.587962
| 1,019
| 10,118
| 5.561335
| 0.105005
| 0.068819
| 0.070584
| 0.066526
| 0.839598
| 0.7916
| 0.753309
| 0.732839
| 0.725428
| 0.71537
| 0
| 0.00158
| 0.312117
| 10,118
| 333
| 88
| 30.384384
| 0.812644
| 0.014529
| 0
| 0.560886
| 0
| 0
| 0.07504
| 0.030096
| 0
| 0
| 0
| 0
| 0.077491
| 1
| 0.077491
| false
| 0.01845
| 0.02583
| 0
| 0.103321
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a1d106fb9d7a1b202193072eb3cc7096cff6b03
| 103
|
py
|
Python
|
df_filter/empty_filter.py
|
emirhanaydin/npp-data-miner
|
e65bf929d66569d7f583ab935a8976336cbc48ec
|
[
"Unlicense"
] | 1
|
2022-01-30T22:51:46.000Z
|
2022-01-30T22:51:46.000Z
|
df_filter/empty_filter.py
|
emirhanaydin/npp-data-miner
|
e65bf929d66569d7f583ab935a8976336cbc48ec
|
[
"Unlicense"
] | null | null | null |
df_filter/empty_filter.py
|
emirhanaydin/npp-data-miner
|
e65bf929d66569d7f583ab935a8976336cbc48ec
|
[
"Unlicense"
] | null | null | null |
from pandas import DataFrame
def empty_filter(df: DataFrame):
return df.dropna(subset=['Front'])
| 17.166667
| 38
| 0.737864
| 14
| 103
| 5.357143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145631
| 103
| 5
| 39
| 20.6
| 0.852273
| 0
| 0
| 0
| 0
| 0
| 0.048544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8a2983904ae4ae0984387b0fc23890ff8f4721a9
| 3,008
|
py
|
Python
|
tests/helper_methods.py
|
ywalakamar/store-manager-api-v2-revisted
|
ab326735f702719ee884263f5eb395b9e7a50011
|
[
"Apache-2.0"
] | null | null | null |
tests/helper_methods.py
|
ywalakamar/store-manager-api-v2-revisted
|
ab326735f702719ee884263f5eb395b9e7a50011
|
[
"Apache-2.0"
] | 91
|
2019-01-02T13:12:50.000Z
|
2019-09-04T22:45:17.000Z
|
tests/helper_methods.py
|
ywalakamar/store-manager-api-v2-revisted
|
ab326735f702719ee884263f5eb395b9e7a50011
|
[
"Apache-2.0"
] | null | null | null |
from flask import current_app
def user_registration(self, data):
"""User registration"""
return self.client.post(
'api/v1/register',
data=data,
content_type='application/json')
def user_login(self, data):
"""User registration"""
return self.client.post(
'api/v1/login',
data=data,
content_type='application/json')
def user_logout(self, token):
return self.client.post(
'api/v1/logout',
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def get_all_users(self):
return self.client.get(
'api/v1/register',
content_type='application/json')
def get_specific_user(self):
return self.client.get(
'api/v1/register/1',
content_type='application/json')
def create_product(self, data, token):
return self.client.post(
'/api/v1/products',
data=data,
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def get_all_products(self, token):
return self.client.get(
'/api/v1/products',
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def get_specific_product(self, token):
return self.client.get(
'/api/v1/products/1',
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def get_non_existing_product(self, token):
return self.client.get(
'/api/v1/products/100',
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def product_update(self, data, token):
return self.client.put(
'/api/v1/products/1',
data=data,
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def make_sale(self, data, token):
return self.client.post(
'/api/v1/sales',
data=data,
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def get_all_sales(self, token):
return self.client.get(
'/api/v1/sales',
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def get_specific_sale(self, token):
return self.client.get(
'/api/v1/sales/1',
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
def delete_specific_product(self, token):
return self.client.delete(
'/api/v1/products/1',
content_type='application/json',
headers=dict(Authorization="Bearer " + token))
| 34.181818
| 62
| 0.552527
| 309
| 3,008
| 5.255663
| 0.142395
| 0.086207
| 0.137931
| 0.224138
| 0.876232
| 0.837438
| 0.819581
| 0.776478
| 0.68165
| 0.562808
| 0
| 0.010902
| 0.329122
| 3,008
| 88
| 63
| 34.181818
| 0.793855
| 0.011636
| 0
| 0.694444
| 0
| 0
| 0.173135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.194444
| false
| 0
| 0.013889
| 0.166667
| 0.402778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
8a3dcd5bbd05289c983c2e8ca8c86b8ab72f24d7
| 29
|
py
|
Python
|
src/nukleus/spice/__init__.py
|
spielhuus/nukleus
|
55d7ae6051720213024fa20c8c9a92110f5566ce
|
[
"MIT"
] | null | null | null |
src/nukleus/spice/__init__.py
|
spielhuus/nukleus
|
55d7ae6051720213024fa20c8c9a92110f5566ce
|
[
"MIT"
] | null | null | null |
src/nukleus/spice/__init__.py
|
spielhuus/nukleus
|
55d7ae6051720213024fa20c8c9a92110f5566ce
|
[
"MIT"
] | null | null | null |
from .ngspice import ngspice
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8a53c3e24225b3269ebe2649a9debc245d6daa57
| 142
|
py
|
Python
|
python/dgl/ops/__init__.py
|
khaled-rahman/FusedMM4DGL
|
e3c46fc1ff886969774c382c944fdb49773a788f
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/ops/__init__.py
|
khaled-rahman/FusedMM4DGL
|
e3c46fc1ff886969774c382c944fdb49773a788f
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/ops/__init__.py
|
khaled-rahman/FusedMM4DGL
|
e3c46fc1ff886969774c382c944fdb49773a788f
|
[
"Apache-2.0"
] | null | null | null |
"""dgl operator module."""
from .spmm import *
from .sddmm import *
from .fusedmm import *
from .edge_softmax import *
from .segment import *
| 20.285714
| 27
| 0.71831
| 19
| 142
| 5.315789
| 0.578947
| 0.39604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161972
| 142
| 6
| 28
| 23.666667
| 0.84874
| 0.140845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8a936df01bd144c694eb48eb19791a0912253187
| 34
|
py
|
Python
|
monailabel/utils/datastore/__init__.py
|
finalelement/MONAILabel
|
3f63ffd4f49161076e77b7c74c733f6ce5cce78c
|
[
"Apache-2.0"
] | 1
|
2021-07-27T12:45:36.000Z
|
2021-07-27T12:45:36.000Z
|
monailabel/utils/datastore/__init__.py
|
finalelement/MONAILabel
|
3f63ffd4f49161076e77b7c74c733f6ce5cce78c
|
[
"Apache-2.0"
] | null | null | null |
monailabel/utils/datastore/__init__.py
|
finalelement/MONAILabel
|
3f63ffd4f49161076e77b7c74c733f6ce5cce78c
|
[
"Apache-2.0"
] | 1
|
2021-07-27T12:45:38.000Z
|
2021-07-27T12:45:38.000Z
|
from .local import LocalDatastore
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8ac455266f1a89d6921653852cf976c9312c6bf9
| 33,361
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/netappfiles/tests/latest/test_volume_commands.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/netappfiles/tests/latest/test_volume_commands.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/netappfiles/tests/latest/test_volume_commands.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.azclierror import ValidationError
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
from azure.cli.testsdk.decorators import serial_test
import time
POOL_DEFAULT = "--service-level 'Premium' --size 4"
VOLUME_DEFAULT = "--service-level 'Premium' --usage-threshold 100"
RG_LOCATION = "southcentralusstage"
DP_RG_LOCATION = "eastus2euap"
VNET_LOCATION = "southcentralus"
GIB_SCALE = 1024 * 1024 * 1024
# No tidy up of tests required. The resource group is automatically removed
class AzureNetAppFilesVolumeServiceScenarioTest(ScenarioTest):
def setup_vnet(self, rg, vnet_name, subnet_name, ip_pre, location):
self.cmd("az network vnet create -n %s --resource-group %s -l %s --address-prefix %s/16" % (vnet_name, rg, location, ip_pre))
self.cmd("az network vnet subnet create -n %s -g %s --vnet-name %s --address-prefixes '%s/24' --delegations 'Microsoft.Netapp/volumes'" % (subnet_name, rg, vnet_name, ip_pre))
def current_subscription(self):
subs = self.cmd("az account show").get_output_in_json()
return subs['id']
def create_volume(self, account_name, pool_name, volume_name1, rg, tags=None, volume_name2=None, protocols=None,
pool_payload=POOL_DEFAULT, volume_payload=VOLUME_DEFAULT, rule_index=1, allowed_clients="0.0.0.0/0"):
vnet_name = self.create_random_name(prefix='cli-vnet-', length=24)
subnet_name = self.create_random_name(prefix='cli-subnet-', length=16)
file_path = volume_name1 # creation_token
protocol_types = "--protocol-types %s" % protocols if protocols is not None else ""
tag = "--tags %s" % tags if tags is not None else ""
self.prepare_for_volume_creation(rg, account_name, pool_name, vnet_name, subnet_name, pool_payload, tags)
volume1 = self.cmd("az netappfiles volume create --resource-group %s --account-name %s --pool-name %s "
"--volume-name %s -l %s %s --file-path %s --vnet %s --subnet %s %s %s --rule-index %s "
"--allowed-clients %s" %
(rg, account_name, pool_name, volume_name1, RG_LOCATION, volume_payload, file_path,
vnet_name, subnet_name, protocol_types, tag, rule_index, allowed_clients)).get_output_in_json()
if volume_name2:
file_path = volume_name2
self.cmd("az netappfiles volume create -g %s -a %s -p %s -v %s -l %s %s --file-path %s --vnet %s --subnet %s --tags %s" % (rg, account_name, pool_name, volume_name2, RG_LOCATION, VOLUME_DEFAULT, file_path, vnet_name, subnet_name, tags)).get_output_in_json()
return volume1
def prepare_for_volume_creation(self, rg, account_name, pool_name, vnet_name, subnet_name,
pool_payload=POOL_DEFAULT, tags=None):
tag = "--tags %s" % tags if tags is not None else ""
self.setup_vnet(rg, vnet_name, subnet_name, '10.0.0.0', VNET_LOCATION)
self.cmd("az netappfiles account create -g %s -a '%s' -l %s" %
(rg, account_name, RG_LOCATION)).get_output_in_json()
self.cmd("az netappfiles pool create -g %s -a %s -p %s -l %s %s %s" %
(rg, account_name, pool_name, RG_LOCATION, pool_payload, tag)).get_output_in_json()
def wait_for_replication_status(self, target_state, rg_r, account_name_r, pool_name_r, volume_name_r):
# python isn't good at do-while loops but loop until we get the target state
attempts = 0
if (self.is_live or self.in_recording) and target_state == "Mirrored":
time.sleep(20)
replication_status = self.cmd("az netappfiles volume replication status -g %s -a %s -p %s -v %s" %
(rg_r, account_name_r, pool_name_r, volume_name_r)).get_output_in_json()
while attempts < 10:
attempts += 1
replication_status = self.cmd("az netappfiles volume replication status -g %s -a %s -p %s -v %s" %
(rg_r, account_name_r, pool_name_r, volume_name_r)).get_output_in_json()
if replication_status['mirrorState'] == target_state:
break
if self.is_live or self.in_recording:
time.sleep(60)
assert replication_status['mirrorState'] == target_state
@serial_test()
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_create_delete_volumes(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
tags = "Tag1=Value1 Tag2=Value2"
protocol_types = "NFSv3"
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}', tags=tags, protocols=protocol_types)
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
assert volume['tags']['Tag1'] == 'Value1'
assert volume['tags']['Tag2'] == 'Value2'
# default export policy still present
assert volume['exportPolicy']['rules'][0]['allowedClients'] == '0.0.0.0/0'
assert not volume['exportPolicy']['rules'][0]['cifs']
assert volume['exportPolicy']['rules'][0]['ruleIndex'] == 1
# check a mount target is present
assert len(volume['mountTargets']) == 1
# specified protocol type
assert len(volume['protocolTypes']) == 1
assert volume['protocolTypes'][0] == 'NFSv3'
# replication
assert volume['volumeType'] is None
assert volume['dataProtection'] is None
assert volume['kerberosEnabled'] is False
assert volume['securityStyle'] == 'Unix'
volume_list = self.cmd("netappfiles volume list --resource-group {rg} --account-name %s --pool-name %s" % (account_name, pool_name)).get_output_in_json()
assert len(volume_list) == 1
self.cmd("az netappfiles volume delete --resource-group {rg} --account-name %s --pool-name %s --volume-name %s --force" % (account_name, pool_name, volume_name))
volume_list = self.cmd("netappfiles volume list --resource-group {rg} -a %s -p %s" % (account_name, pool_name)).get_output_in_json()
assert len(volume_list) == 0
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_create_volume_with_subnet_in_different_rg(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
vnet_name = self.create_random_name(prefix='cli-vnet-', length=24)
file_path = volume_name # creation_token
subnet_name = self.create_random_name(prefix='cli-subnet-', length=16)
subnet_rg = self.create_random_name(prefix='cli-rg-subnet', length=24)
subs_id = self.current_subscription()
self.cmd("az group create -n %s --subscription %s -l %s --tags 'owner=cli_test'" % (subnet_rg, subs_id, VNET_LOCATION)).get_output_in_json()
rg = '{rg}'
self.setup_vnet(subnet_rg, vnet_name, subnet_name, '10.0.0.0', VNET_LOCATION)
self.cmd("az netappfiles account create -g %s -a %s -l %s" % (rg, account_name, RG_LOCATION)).get_output_in_json()
self.cmd("az netappfiles pool create -g %s -a %s -p %s -l %s %s" % (rg, account_name, pool_name, RG_LOCATION, POOL_DEFAULT)).get_output_in_json()
subnet_id = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s" % (subs_id, subnet_rg, vnet_name, subnet_name)
volume = self.cmd("az netappfiles volume create --resource-group %s --account-name %s --pool-name %s --volume-name %s -l %s %s --file-path %s --vnet %s --subnet %s" % (rg, account_name, pool_name, volume_name, RG_LOCATION, VOLUME_DEFAULT, file_path, vnet_name, subnet_id)).get_output_in_json()
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
self.cmd("az netappfiles volume delete --resource-group %s --account-name %s --pool-name %s --volume-name %s" % (rg, account_name, pool_name, volume_name))
self.cmd("az group delete --yes -n %s" % (subnet_rg))
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume2_', parameter_name='replication_resourcegroup', additional_tags={'owner': 'cli_test'})
def test_perform_replication(self, resource_group, replication_resourcegroup):
# create source volume
account_name = self.create_random_name(prefix='cli-acc-', length=24)
account_name_r = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
pool_name_r = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
volume_name_r = self.create_random_name(prefix='cli-vol-', length=24)
rg = '{rg}'
src_volume = self.create_volume(account_name, pool_name, volume_name, '{rg}')
assert src_volume['id'] is not None
# create destination volume in other region/rg and with its own vnet
vnet_name = self.create_random_name(prefix='cli-vnet-', length=24)
file_path = volume_name_r # creation_token
subnet_name = self.create_random_name(prefix='cli-subnet-', length=16)
# rg_r = self.create_random_name(prefix='cli-rg-', length=24)
rg_r = replication_resourcegroup
subs_id = self.current_subscription()
self.setup_vnet(rg_r, vnet_name, subnet_name, '10.1.0.0', DP_RG_LOCATION)
self.cmd("az netappfiles account create -g %s -a %s -l %s" % (rg_r, account_name_r, DP_RG_LOCATION)).get_output_in_json()
self.cmd("az netappfiles pool create -g %s -a %s -p %s -l %s %s" % (rg_r, account_name_r, pool_name_r, DP_RG_LOCATION, POOL_DEFAULT)).get_output_in_json()
subnet_id = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s" % (subs_id, rg_r, vnet_name, subnet_name)
dst_volume = self.cmd("az netappfiles volume create --resource-group %s --account-name %s --pool-name %s --volume-name %s -l %s %s --file-path %s --vnet %s --subnet %s --volume-type %s --endpoint-type %s --replication-schedule %s --remote-volume-resource-id %s" % (rg_r, account_name_r, pool_name_r, volume_name_r, DP_RG_LOCATION, VOLUME_DEFAULT, file_path, vnet_name, subnet_id, "DataProtection", "dst", "_10minutely", src_volume['id'])).get_output_in_json()
assert dst_volume['dataProtection'] is not None
assert dst_volume['id'] is not None
if self.is_live or self.in_recording:
time.sleep(90)
# approve
self.cmd("az netappfiles volume replication approve -g %s -a %s -p %s -v %s --remote-volume-resource-id %s" % (rg, account_name, pool_name, volume_name, dst_volume['id']))
self.wait_for_replication_status("Mirrored", rg_r, account_name_r, pool_name_r, volume_name_r)
# break
self.cmd("az netappfiles volume replication suspend -g %s -a %s -p %s -v %s" % (rg_r, account_name_r, pool_name_r, volume_name_r))
self.wait_for_replication_status("Broken", rg_r, account_name_r, pool_name_r, volume_name_r)
# resume
self.cmd("az netappfiles volume replication resume -g %s -a %s -p %s -v %s" % (rg_r, account_name_r, pool_name_r, volume_name_r))
self.wait_for_replication_status("Mirrored", rg_r, account_name_r, pool_name_r, volume_name_r)
# break
self.cmd("az netappfiles volume replication suspend -g %s -a %s -p %s -v %s -f %s" % (rg_r, account_name_r, pool_name_r, volume_name_r, True))
self.wait_for_replication_status("Broken", rg_r, account_name_r, pool_name_r, volume_name_r)
# delete
self.cmd("az netappfiles volume replication remove -g %s -a %s -p %s -v %s" % (rg_r, account_name_r, pool_name_r, volume_name_r))
if self.is_live or self.in_recording:
time.sleep(2)
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_list_volumes(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name1 = self.create_random_name(prefix='cli-vol-', length=24)
volume_name2 = self.create_random_name(prefix='cli-vol-', length=24)
tags = "Tag1=Value1"
self.create_volume(account_name, pool_name, volume_name1, '{rg}', tags=tags, volume_name2=volume_name2)
volume_list = self.cmd("netappfiles volume list --resource-group {rg} -a '%s' -p '%s'" % (account_name, pool_name)).get_output_in_json()
assert len(volume_list) == 2
self.cmd("az netappfiles volume delete -g {rg} -a %s -p %s -v %s" % (account_name, pool_name, volume_name1))
volume_list = self.cmd("netappfiles volume list -g {rg} -a '%s' -p '%s'" % (account_name, pool_name)).get_output_in_json()
assert len(volume_list) == 1
@serial_test()
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_get_volume_by_name(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
tags = "Tag2=Value1"
protocol_types = "NFSv4.1"
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}', tags=tags, protocols=protocol_types, rule_index=1)
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
# specified protocol type
assert len(volume['protocolTypes']) == 1
assert volume['protocolTypes'][0] == 'NFSv4.1'
assert len(volume['exportPolicy']['rules']) == 1
assert volume['exportPolicy']['rules'][0]['ruleIndex'] == 1
assert volume['exportPolicy']['rules'][0]['nfsv41']
assert not volume['exportPolicy']['rules'][0]['nfsv3']
volume = self.cmd("az netappfiles volume show --resource-group {rg} -a %s -p %s -v %s" % (account_name, pool_name, volume_name)).get_output_in_json()
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
assert volume['tags']['Tag2'] == 'Value1'
volume_from_id = self.cmd("az netappfiles volume show --ids %s" % volume['id']).get_output_in_json()
assert volume_from_id['name'] == account_name + '/' + pool_name + '/' + volume_name
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_update_volume(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
tags = "Tag1=Value2"
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}')
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
# default protocol type
assert len(volume['protocolTypes']) == 1
assert volume['protocolTypes'][0] == 'NFSv3'
assert volume['usageThreshold'] == 100 * GIB_SCALE
volume = self.cmd("az netappfiles volume update --resource-group {rg} -a %s -p %s -v %s --tags %s --usage-threshold 200" % (account_name, pool_name, volume_name, tags)).get_output_in_json()
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
assert volume['serviceLevel'] == "Premium" # unchanged
assert volume['usageThreshold'] == 200 * GIB_SCALE
assert volume['tags']['Tag1'] == 'Value2'
# default export policy still present
assert volume['exportPolicy']['rules'][0]['allowedClients'] == '0.0.0.0/0'
assert not volume['exportPolicy']['rules'][0]['cifs']
assert volume['exportPolicy']['rules'][0]['ruleIndex'] == 1
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_export_policy(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}')
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
# now add an export policy
# there is already one default rule present
vol_with_export_policy = self.cmd("netappfiles volume export-policy add -g {rg} -a %s -p %s -v %s --allowed-clients '1.2.3.0/24' --rule-index 3 --unix-read-only true --unix-read-write false --cifs false --nfsv3 true --nfsv41 false" % (account_name, pool_name, volume_name)).get_output_in_json()
assert vol_with_export_policy['name'] == account_name + '/' + pool_name + '/' + volume_name
assert vol_with_export_policy['exportPolicy']['rules'][0]['allowedClients'] == '1.2.3.0/24'
assert vol_with_export_policy['exportPolicy']['rules'][0]['ruleIndex'] == 3
assert vol_with_export_policy['exportPolicy']['rules'][0]['cifs'] is False
# and add another export policy
vol_with_export_policy = self.cmd("netappfiles volume export-policy add -g {rg} -a %s -p %s -v %s --allowed-clients '1.2.4.0/24' --rule-index 2 --unix-read-only true --unix-read-write false --cifs true --nfsv3 true --nfsv41 false" % (account_name, pool_name, volume_name)).get_output_in_json()
assert vol_with_export_policy['name'] == account_name + '/' + pool_name + '/' + volume_name
assert vol_with_export_policy['exportPolicy']['rules'][1]['allowedClients'] == '1.2.3.0/24'
assert vol_with_export_policy['exportPolicy']['rules'][0]['allowedClients'] == '1.2.4.0/24'
assert vol_with_export_policy['exportPolicy']['rules'][0]['cifs'] is True
assert len(vol_with_export_policy['exportPolicy']['rules']) == 3
# list the policies
export_policy = self.cmd("netappfiles volume export-policy list -g {rg} -a %s -p %s -v %s" % (account_name, pool_name, volume_name)).get_output_in_json()
assert len(export_policy['rules']) == 3
# and remove one
self.cmd("netappfiles volume export-policy remove -g {rg} -a %s -p %s -v %s --rule-index 3" % (account_name, pool_name, volume_name)).get_output_in_json()
#
if self.is_live or self.in_recording:
time.sleep(240)
volume = self.cmd("az netappfiles volume show --resource-group {rg} -a %s -p %s -v %s" % (account_name, pool_name, volume_name)).get_output_in_json()
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
assert len(volume['exportPolicy']['rules']) == 2
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_non_default_export_policy(self):
# tests that adding export policy works with non-default service level/usage threshold
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
pool_payload = "--service-level 'Standard' --size 8"
volume_payload = "--service-level 'Standard' --usage-threshold 200"
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}', pool_payload=pool_payload, volume_payload=volume_payload)
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
# check the specified volume properties
assert volume['usageThreshold'] == 200 * GIB_SCALE
assert volume['serviceLevel'] == "Standard"
# now add an export policy
# there is already one default rule present
vol_with_export_policy = self.cmd("netappfiles volume export-policy add -g {rg} -a %s -p %s -v %s --allowed-clients '1.2.3.0/24' --rule-index 3 --unix-read-only true --unix-read-write false --cifs false --nfsv3 true --nfsv41 false" % (account_name, pool_name, volume_name)).get_output_in_json()
assert vol_with_export_policy['name'] == account_name + '/' + pool_name + '/' + volume_name
assert vol_with_export_policy['exportPolicy']['rules'][0]['allowedClients'] == '1.2.3.0/24'
assert vol_with_export_policy['exportPolicy']['rules'][0]['ruleIndex'] == 3
assert vol_with_export_policy['exportPolicy']['rules'][0]['cifs'] is False
# and recheck the other properties are unchanged
assert volume['usageThreshold'] == 200 * GIB_SCALE
assert volume['serviceLevel'] == "Standard"
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_create_volume_with_non_default_export_policy(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
file_path = volume_name # creation_token
vnet_name = self.create_random_name(prefix='cli-vnet-', length=24)
subnet_name = self.create_random_name(prefix='cli-subnet-', length=16)
protocol_types = "NFSv4.1"
tag = "Tag2=Value1"
rule_index = 2
unix_read_only = False
unix_read_write = True
cifs = False
nfsv3 = False
nfsv41 = True
allowed_clients = '1.2.3.0/24'
self.prepare_for_volume_creation('{rg}', account_name, pool_name, vnet_name, subnet_name)
# Error when allowed-clients not set on NFSv4.1
with self.assertRaises(ValidationError):
self.cmd("az netappfiles volume create -g {rg} -a %s -p %s -v %s -l %s %s --file-path %s --vnet %s "
"--subnet %s --protocol-types %s --tags %s --rule-index %s --unix-read-only %s "
"--unix-read-write %s --cifs %s" %
(account_name, pool_name, volume_name, RG_LOCATION, VOLUME_DEFAULT, file_path, vnet_name,
subnet_name, protocol_types, tag, rule_index, unix_read_only, unix_read_write, cifs))
# Error when rule-index not set on NFSv4.1
with self.assertRaises(ValidationError):
self.cmd("az netappfiles volume create -g {rg} -a %s -p %s -v %s -l %s %s --file-path %s --vnet %s "
"--subnet %s --protocol-types %s --tags %s --unix-read-only %s --unix-read-write %s --cifs %s "
"--allowed-clients %s" %
(account_name, pool_name, volume_name, RG_LOCATION, VOLUME_DEFAULT, file_path, vnet_name,
subnet_name, protocol_types, tag, unix_read_only, unix_read_write, cifs, allowed_clients))
volume = self.cmd("az netappfiles volume create -g {rg} -a %s -p %s -v %s -l %s %s --file-path %s --vnet %s "
"--subnet %s --protocol-types %s --tags %s --rule-index %s --unix-read-only %s "
"--unix-read-write %s --cifs %s --allowed-clients %s" %
(account_name, pool_name, volume_name, RG_LOCATION, VOLUME_DEFAULT, file_path,
vnet_name, subnet_name, protocol_types, tag, rule_index, unix_read_only, unix_read_write,
cifs, allowed_clients)).get_output_in_json()
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
# specified protocol type
assert len(volume['protocolTypes']) == 1
assert volume['protocolTypes'][0] == 'NFSv4.1'
assert len(volume['exportPolicy']['rules']) == 1
assert volume['exportPolicy']['rules'][0]['ruleIndex'] == rule_index
assert volume['exportPolicy']['rules'][0]['unixReadOnly'] == unix_read_only
assert volume['exportPolicy']['rules'][0]['unixReadWrite'] == unix_read_write
assert volume['exportPolicy']['rules'][0]['nfsv41'] == nfsv41
assert volume['exportPolicy']['rules'][0]['nfsv3'] == nfsv3
assert volume['exportPolicy']['rules'][0]['cifs'] == cifs
assert volume['exportPolicy']['rules'][0]['allowedClients'] == allowed_clients
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_change_pool(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
pool2_name = self.create_random_name(prefix='cli-pool-', length=24)
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}')
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
# create a new pool to move the volume to
pool2 = self.cmd("az netappfiles pool create -g %s -a %s -p %s -l %s %s" % ('{rg}', account_name, pool2_name, RG_LOCATION, POOL_DEFAULT)).get_output_in_json()
assert pool2['name'] == account_name + '/' + pool2_name
# change volume to pool2
self.cmd("az netappfiles volume pool-change -g {rg} -a %s -p %s -v %s -d %s" % (account_name, pool_name, volume_name, pool2['id']))
# Make sure that the volume was changed to pool2
volume = self.cmd("az netappfiles volume show -g {rg} -a %s -p %s -v %s" % (account_name, pool2_name, volume_name)).get_output_in_json()
assert volume['name'] == account_name + '/' + pool2_name + '/' + volume_name
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_volume_parameters(self):
vnet_name = self.create_random_name(prefix='cli-vnet-', length=24)
subnet_name = self.create_random_name(prefix='cli-subnet-', length=16)
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
smb_encryption = False
smb_continuously_avl = False
encryption_key_source = "Microsoft.NetApp"
ldap_enabled = False
is_default_quota_enabled = False
avs_data_store = "Disabled"
self.prepare_for_volume_creation('{rg}', account_name, pool_name, vnet_name, subnet_name)
volume = self.cmd("az netappfiles volume create --resource-group {rg} --account-name %s --pool-name %s "
"--volume-name %s -l %s %s --file-path %s --vnet %s --subnet %s --smb-encryption %s "
"--smb-continuously-avl %s --encryption-key-source %s --ldap-enabled %s "
"--is-def-quota-enabled %s --avs-data-store %s" %
(account_name, pool_name, volume_name, RG_LOCATION, VOLUME_DEFAULT, volume_name, vnet_name,
subnet_name, smb_encryption, smb_continuously_avl, encryption_key_source, ldap_enabled,
is_default_quota_enabled, avs_data_store)).get_output_in_json()
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
assert volume['smbEncryption'] == smb_encryption
assert volume['smbContinuouslyAvailable'] == smb_continuously_avl
assert volume['encryptionKeySource'] == encryption_key_source
assert volume['ldapEnabled'] == ldap_enabled
assert volume['isDefaultQuotaEnabled'] == is_default_quota_enabled
assert volume['avsDataStore'] == avs_data_store
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_nfsv3_with_no_export_policy_provided_is_successful(self):
vnet_name = self.create_random_name(prefix='cli-vnet-', length=24)
subnet_name = self.create_random_name(prefix='cli-subnet-', length=16)
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
self.prepare_for_volume_creation('{rg}', account_name, pool_name, vnet_name, subnet_name)
volume = self.cmd("az netappfiles volume create --resource-group {rg} --account-name %s --pool-name %s "
"--volume-name %s -l %s %s --file-path %s --vnet %s --subnet %s --protocol-types NFSv3" %
(account_name, pool_name, volume_name, RG_LOCATION, VOLUME_DEFAULT, volume_name, vnet_name,
subnet_name)).get_output_in_json()
assert volume['name'] == account_name + '/' + pool_name + '/' + volume_name
assert len(volume['exportPolicy']['rules']) == 1
assert volume['exportPolicy']['rules'][0]['nfsv3']
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_add_export_policy_with_no_rule_index(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
pool_payload = "--service-level 'Standard' --size 8"
volume_payload = "--service-level 'Standard' --usage-threshold 200"
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}', pool_payload=pool_payload, volume_payload=volume_payload)
# add an export policy
# there is already one default rule present
vol_with_export_policy = self.cmd("netappfiles volume export-policy add -g {rg} -a %s -p %s -v %s --allowed-clients '1.2.3.0/24' --rule-index 3 --unix-read-only true --unix-read-write false --cifs false --nfsv3 true --nfsv41 false" % (account_name, pool_name, volume_name)).get_output_in_json()
assert vol_with_export_policy['name'] == account_name + '/' + pool_name + '/' + volume_name
assert vol_with_export_policy['exportPolicy']['rules'][0]['ruleIndex'] == 3
# add another export policy with no rule_index,
# should result in default rule index of 4 since highest existing rule index is 3
vol_with_export_policy = self.cmd("netappfiles volume export-policy add -g {rg} -a %s -p %s -v %s --allowed-clients '1.2.3.0/24' --unix-read-only true --unix-read-write false --cifs false --nfsv3 true --nfsv41 false" % (account_name, pool_name, volume_name)).get_output_in_json()
assert vol_with_export_policy['exportPolicy']['rules'][0]['ruleIndex'] == 4
@ResourceGroupPreparer(name_prefix='cli_netappfiles_test_volume_', additional_tags={'owner': 'cli_test'})
def test_add_export_policy_with_invalid_rule_index(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
pool_payload = "--service-level 'Standard' --size 8"
volume_payload = "--service-level 'Standard' --usage-threshold 200"
self.create_volume(account_name, pool_name, volume_name, '{rg}', pool_payload=pool_payload, volume_payload=volume_payload)
# add an export policy
# there is already one default rule present
vol_with_export_policy = self.cmd("netappfiles volume export-policy add -g {rg} -a %s -p %s -v %s --allowed-clients '1.2.3.0/24' --rule-index 3 --unix-read-only true --unix-read-write false --cifs false --nfsv3 true --nfsv41 false" % (account_name, pool_name, volume_name)).get_output_in_json()
assert vol_with_export_policy['name'] == account_name + '/' + pool_name + '/' + volume_name
assert vol_with_export_policy['exportPolicy']['rules'][0]['ruleIndex'] == 3
# add another export policy with same rule_index, should result in validation error
with self.assertRaisesRegex(ValidationError, "Rule index 3 already exist"):
self.cmd("netappfiles volume export-policy add -g {rg} -a %s -p %s -v %s --allowed-clients '1.2.3.0/24' --rule-index 3 --unix-read-only true --unix-read-write false --cifs false --nfsv3 true --nfsv41 false" % (account_name, pool_name, volume_name)).get_output_in_json()
| 67.532389
| 467
| 0.661401
| 4,568
| 33,361
| 4.577058
| 0.067207
| 0.057346
| 0.047255
| 0.059977
| 0.816769
| 0.79515
| 0.758083
| 0.731729
| 0.715946
| 0.695667
| 0
| 0.016937
| 0.196517
| 33,361
| 493
| 468
| 67.669371
| 0.763067
| 0.055304
| 0
| 0.479564
| 0
| 0.13624
| 0.282536
| 0.026887
| 0
| 0
| 0
| 0
| 0.264305
| 1
| 0.051771
| false
| 0
| 0.010899
| 0
| 0.070845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76d01b755fb74b639ff07b1669f23f676e9406d5
| 21,822
|
py
|
Python
|
python/orca/src/bigdl/orca/data/file.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
python/orca/src/bigdl/orca/data/file.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
python/orca/src/bigdl/orca/data/file.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
import logging
import shutil
import glob
from distutils.dir_util import copy_tree
logger = logging.getLogger(__name__)
def open_text(path):
"""
Read a text file to list of lines. It supports local, hdfs, s3 file systems.
:param path: text file path
:return: list of lines
"""
# Return a list of lines
if path.startswith("hdfs"): # hdfs://url:port/file_path
import pyarrow as pa
fs = pa.hdfs.connect()
with fs.open(path, 'rb') as f:
lines = f.read().decode("utf-8").strip().split("\n")
elif path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = path.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
data = s3_client.get_object(Bucket=bucket, Key=key)
lines = data["Body"].read().decode("utf-8").strip().split("\n")
else: # Local path
if path.startswith("file://"):
path = path[len("file://"):]
lines = []
for line in open(path):
lines.append(line)
return [line.strip() for line in lines]
def open_image(path):
"""
Open a image file. It supports local, hdfs, s3 file systems.
:param path: an image file path
:return: An :py:class:`~PIL.Image.Image` object.
"""
from PIL import Image
if path.startswith("hdfs"): # hdfs://url:port/file_path
import pyarrow as pa
fs = pa.hdfs.connect()
with fs.open(path, 'rb') as f:
return Image.open(f)
elif path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
from io import BytesIO
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = path.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
data = s3_client.get_object(Bucket=bucket, Key=key)
return Image.open(BytesIO(data["Body"].read()))
else: # Local path
if path.startswith("file://"):
path = path[len("file://"):]
return Image.open(path)
def load_numpy(path):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
It supports local, hdfs, s3 file systems.
:param path: file path
:return: array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
"""
import numpy as np
if path.startswith("hdfs"): # hdfs://url:port/file_path
import pyarrow as pa
fs = pa.hdfs.connect()
with fs.open(path, 'rb') as f:
return np.load(f)
elif path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
from io import BytesIO
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = path.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
data = s3_client.get_object(Bucket=bucket, Key=key)
return np.load(BytesIO(data["Body"].read()))
else: # Local path
if path.startswith("file://"):
path = path[len("file://"):]
return np.load(path)
def exists(path):
"""
Check if a path exists or not. It supports local, hdfs, s3 file systems.
:param path: file or directory path string.
:return: if path exists or not.
"""
if path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = path.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
try:
s3_client.get_object(Bucket=bucket, Key=key)
except Exception as ex:
if ex.response['Error']['Code'] == 'NoSuchKey':
return False
raise ex
return True
elif path.startswith("hdfs://"):
import pyarrow as pa
host_port = path.split("://")[1].split("/")[0].split(":")
classpath = subprocess.Popen(["hadoop", "classpath", "--glob"],
stdout=subprocess.PIPE).communicate()[0]
os.environ["CLASSPATH"] = classpath.decode("utf-8")
fs = pa.hdfs.connect(host=host_port[0], port=int(host_port[1]))
return fs.exists(path)
else:
if path.startswith("file://"):
path = path[len("file://"):]
return os.path.exists(path)
def makedirs(path):
"""
Make a directory with creating intermediate directories.
It supports local, hdfs, s3 file systems.
:param path: directory path string to be created.
"""
if path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = path.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
return s3_client.put_object(Bucket=bucket, Key=key, Body='')
elif path.startswith("hdfs://"):
import pyarrow as pa
host_port = path.split("://")[1].split("/")[0].split(":")
classpath = subprocess.Popen(["hadoop", "classpath", "--glob"],
stdout=subprocess.PIPE).communicate()[0]
os.environ["CLASSPATH"] = classpath.decode("utf-8")
fs = pa.hdfs.connect(host=host_port[0], port=int(host_port[1]))
return fs.mkdir(path)
else:
if path.startswith("file://"):
path = path[len("file://"):]
return os.makedirs(path)
def write_text(path, text):
"""
Write text to a file. It supports local, hdfs, s3 file systems.
:param path: file path
:param text: text string
:return: number of bytes written or AWS response(s3 file systems)
"""
if path.startswith("hdfs"): # hdfs://url:port/file_path
import pyarrow as pa
fs = pa.hdfs.connect()
with fs.open(path, 'wb') as f:
result = f.write(text.encode('utf-8'))
f.close()
return result
elif path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = path.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
return s3_client.put_object(Bucket=bucket, Key=key, Body=text)
else:
if path.startswith("file://"):
path = path[len("file://"):]
with open(path, 'w') as f:
result = f.write(text)
f.close()
return result
def is_file(path):
"""
Check if a path is file or not. It supports local, hdfs, s3 file systems.
:param path: path string.
:return: if path is a file.
"""
if path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = path.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
try:
dir_key = key + '/'
resp1 = s3_client.list_objects(Bucket=bucket, Prefix=key, Delimiter='/', MaxKeys=1)
if 'Contents' in resp1:
resp2 = s3_client.list_objects(Bucket=bucket, Prefix=dir_key,
Delimiter='/', MaxKeys=1)
return not ('Contents' in resp2)
else:
return False
except Exception as ex:
raise ex
elif path.startswith("hdfs://"):
import pyarrow as pa
host_port = path.split("://")[1].split("/")[0].split(":")
classpath = subprocess.Popen(["hadoop", "classpath", "--glob"],
stdout=subprocess.PIPE).communicate()[0]
os.environ["CLASSPATH"] = classpath.decode("utf-8")
fs = pa.hdfs.connect(host=host_port[0], port=int(host_port[1]))
return fs.isfile(path)
else:
if path.startswith("file://"):
path = path[len("file://"):]
from pathlib import Path
return Path(path).is_file()
def put_local_dir_to_remote(local_dir, remote_dir):
if remote_dir.startswith("hdfs"): # hdfs://url:port/file_path
import pyarrow as pa
host_port = remote_dir.split("://")[1].split("/")[0].split(":")
classpath = subprocess.Popen(["hadoop", "classpath", "--glob"],
stdout=subprocess.PIPE).communicate()[0]
os.environ["CLASSPATH"] = classpath.decode("utf-8")
fs = pa.hdfs.connect(host=host_port[0], port=int(host_port[1]))
if not fs.exists(remote_dir):
fs.mkdir(remote_dir)
for file in os.listdir(local_dir):
with open(os.path.join(local_dir, file), "rb") as f:
fs.upload(os.path.join(remote_dir, file), f)
elif remote_dir.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = remote_dir.split("://")[1].split('/')
bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
for file in os.listdir(local_dir):
with open(os.path.join(local_dir, file), "rb") as f:
s3_client.upload_fileobj(f, Bucket=bucket, Key=prefix+'/'+file)
else:
if remote_dir.startswith("file://"):
remote_dir = remote_dir[len("file://"):]
copy_tree(local_dir, remote_dir)
def put_local_dir_tree_to_remote(local_dir, remote_dir):
if remote_dir.startswith("hdfs"): # hdfs://url:port/file_path
test_cmd = 'hdfs dfs -ls {}'.format(remote_dir)
process = subprocess.Popen(test_cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
if process.returncode != 0:
if 'No such file or directory' in err.decode('utf-8'):
mkdir_cmd = 'hdfs dfs -mkdir -p {}'.format(remote_dir)
mkdir_process = subprocess.Popen(mkdir_cmd, shell=True)
ret = mkdir_process.wait()
if ret != 0:
return ret
else:
# ls remote dir error
logger.warning(err.decode('utf-8'))
return -1
cmd = 'hdfs dfs -put -f {}/* {}/'.format(local_dir, remote_dir)
process = subprocess.Popen(cmd, shell=True)
return process.wait()
elif remote_dir.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = remote_dir.split("://")[1].split('/')
bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
local_files = [os.path.join(dirpath, f)
for (dirpath, dirnames, filenames) in os.walk(local_dir)
for f in filenames]
for file in local_files:
try:
with open(file, "rb") as f:
s3_client.upload_fileobj(f, Bucket=bucket,
Key=prefix+'/'+file[len(local_dir)+1:])
except Exception as e:
logger.error('cannot upload file to s3: {}'.format(str(e)))
return -1
return 0
else:
if remote_dir.startswith("file://"):
remote_dir = remote_dir[len("file://"):]
try:
copy_tree(local_dir, remote_dir)
except Exception as e:
logger.warning(str(e))
return -1
return 0
def put_local_file_to_remote(local_path, remote_path, filemode=None):
if remote_path.startswith("hdfs"): # hdfs://url:port/file_path
import pyarrow as pa
host_port = remote_path.split("://")[1].split("/")[0].split(":")
classpath = subprocess.Popen(["hadoop", "classpath", "--glob"],
stdout=subprocess.PIPE).communicate()[0]
os.environ["CLASSPATH"] = classpath.decode("utf-8")
try:
fs = pa.hdfs.connect(host=host_port[0], port=int(host_port[1]))
remote_dir = os.path.dirname(remote_path)
if not fs.exists(remote_dir):
fs.mkdir(remote_dir)
with open(local_path, "rb") as f:
fs.upload(remote_path, f)
if filemode:
fs.chmod(remote_path, filemode)
except Exception as e:
logger.error("Cannot upload file {} to {}: error: "
.format(local_path, remote_path, str(e)))
return -1
return 0
elif remote_path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
try:
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = remote_path.split("://")[1].split('/')
bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
with open(local_path, "rb") as f:
s3_client.upload_fileobj(f, Bucket=bucket, Key=prefix)
except Exception as e:
logger.error("Cannot upload file {} to {}: error: "
.format(local_path, remote_path, str(e)))
return -1
return 0
else:
if remote_path.startswith("file://"):
remote_path = remote_path[len("file://"):]
try:
shutil.copy(local_path, remote_path)
if filemode:
os.chmod(remote_path, filemode)
except Exception as e:
logger.error("Cannot upload file {} to {}: error: "
.format(local_path, remote_path, str(e)))
return -1
return 0
def put_local_files_with_prefix_to_remote(local_path_prefix, remote_dir):
file_list = glob.glob(local_path_prefix + "*")
if remote_dir.startswith("hdfs"): # hdfs://url:port/file_path
cmd = 'hdfs dfs -put -f {}* {}'.format(local_path_prefix, remote_dir)
process = subprocess.Popen(cmd, shell=True)
return process.wait()
elif remote_dir.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = remote_dir.split("://")[1].split('/')
bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
local_dir = os.path.dirname(local_path_prefix)
try:
[s3_client.upload_file(os.path.join(local_dir, file), bucket,
os.path.join(prefix, file)) for file in file_list]
except Exception as e:
logger.error(str(e))
return -1
return 0
else:
if remote_dir.startswith("file://"):
remote_dir = remote_dir[len("file://"):]
try:
[shutil.copy(local_file, remote_dir) for local_file in file_list]
except Exception as e:
logger.error(str(e))
return -1
return 0
def get_remote_file_to_local(remote_path, local_path):
if remote_path.startswith("hdfs"): # hdfs://url:port/file_path
cmd = 'hdfs dfs -get {} {}'.format(remote_path, local_path)
process = subprocess.Popen(cmd, shell=True)
return process.wait()
elif remote_path.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = remote_path.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
try:
s3_client.download_file(bucket, key, local_path)
return 0
except Exception as e:
print(str(e))
return -1
else:
if remote_path.startswith("file://"):
remote_path = remote_path[len("file://"):]
shutil.copy(remote_path, local_path)
return 0
def get_remote_dir_to_local(remote_dir, local_dir):
if remote_dir.startswith("hdfs"): # hdfs://url:port/file_path
cmd = 'hdfs dfs -get {} {}'.format(remote_dir, local_dir)
process = subprocess.Popen(cmd, shell=True)
return process.wait()
elif remote_dir.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = remote_dir.split("://")[1].split('/')
bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
try:
response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix+"/")
keys = [item['Key'] for item in response['Contents']]
[s3_client.download_file(bucket, key, os.path.join(local_dir, os.path.basename(keys)))
for key in keys]
except Exception as e:
print(str(e))
raise e
return 0
else:
if remote_dir.startswith("file://"):
remote_dir = remote_dir[len("file://"):]
copy_tree(remote_dir, local_dir)
return 0
def get_remote_files_with_prefix_to_local(remote_path_prefix, local_dir):
prefix = os.path.basename(remote_path_prefix)
if remote_path_prefix.startswith("hdfs"): # hdfs://url:port/file_path
cmd = 'hdfs dfs -get -f {}* {}'.format(remote_path_prefix, local_dir)
process = subprocess.Popen(cmd, shell=True)
return process.wait()
elif remote_path_prefix.startswith("s3"): # s3://bucket/file_path
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key).client('s3', verify=False)
path_parts = remote_path_prefix.split("://")[1].split('/')
bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
try:
response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix)
keys = [item['Key'] for item in response['Contents']]
[s3_client.download_file(bucket, key, os.path.join(local_dir, os.path.basename(keys)))
for key in keys]
except Exception as e:
print(str(e))
raise e
return os.path.join(local_dir, prefix)
| 40.336414
| 98
| 0.592201
| 2,827
| 21,822
| 4.360453
| 0.087018
| 0.081772
| 0.049972
| 0.0318
| 0.775615
| 0.75217
| 0.739515
| 0.720613
| 0.718423
| 0.705362
| 0
| 0.013451
| 0.277747
| 21,822
| 540
| 99
| 40.411111
| 0.76867
| 0.110943
| 0
| 0.752273
| 0
| 0
| 0.080013
| 0.015335
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031818
| false
| 0
| 0.077273
| 0
| 0.218182
| 0.006818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a1f6cfff5c5e0ad83f94a7b8a76237f412c6987
| 167
|
py
|
Python
|
odoo-13.0/odoo/addons/test_action_bindings/models.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/odoo/addons/test_action_bindings/models.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/odoo/addons/test_action_bindings/models.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from odoo import models
class A(models.Model):
_name = _description = 'tab.a'
class B(models.Model):
_name = _description = 'tab.b'
| 16.7
| 34
| 0.634731
| 23
| 167
| 4.434783
| 0.608696
| 0.215686
| 0.294118
| 0.509804
| 0.568627
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007519
| 0.203593
| 167
| 9
| 35
| 18.555556
| 0.759399
| 0.125749
| 0
| 0
| 0
| 0
| 0.069444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
0a3d50db51d065daa03722a04f8c6ddea5b81304
| 47
|
py
|
Python
|
PRF/__init__.py
|
dcodrut/PRF
|
1741ec6fc923ba6ba0a9f62ff53117b0a7998827
|
[
"MIT"
] | 92
|
2018-11-16T04:37:29.000Z
|
2022-03-09T23:04:56.000Z
|
PRF/__init__.py
|
dcodrut/PRF
|
1741ec6fc923ba6ba0a9f62ff53117b0a7998827
|
[
"MIT"
] | 3
|
2019-03-05T08:02:45.000Z
|
2021-09-15T09:16:50.000Z
|
PRF/__init__.py
|
dcodrut/PRF
|
1741ec6fc923ba6ba0a9f62ff53117b0a7998827
|
[
"MIT"
] | 14
|
2018-11-20T11:11:06.000Z
|
2022-03-27T07:54:09.000Z
|
from .PRF import RandomForestClassifier as prf
| 23.5
| 46
| 0.851064
| 6
| 47
| 6.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 1
| 47
| 47
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a5135ca4293df9ae4cc44634ce627e563dffc43
| 21
|
py
|
Python
|
build/lib/simple_connect/__init__.py
|
saeed-abdul-rahim/simple_connect
|
c1d6945f5116825d9635915793f5ebbb488ef413
|
[
"MIT"
] | 1
|
2022-01-31T14:06:44.000Z
|
2022-01-31T14:06:44.000Z
|
simple_connect/__init__.py
|
saeed-abdul-rahim/simple_connect
|
c1d6945f5116825d9635915793f5ebbb488ef413
|
[
"MIT"
] | null | null | null |
simple_connect/__init__.py
|
saeed-abdul-rahim/simple_connect
|
c1d6945f5116825d9635915793f5ebbb488ef413
|
[
"MIT"
] | null | null | null |
from . import connect
| 21
| 21
| 0.809524
| 3
| 21
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a85271266410c37a70ab0d4b2afd3af852410a4
| 44,481
|
py
|
Python
|
src/amuse/test/suite/codes_tests/test_multiples.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 131
|
2015-06-04T09:06:57.000Z
|
2022-02-01T12:11:29.000Z
|
src/amuse/test/suite/codes_tests/test_multiples.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 690
|
2015-10-17T12:18:08.000Z
|
2022-03-31T16:15:58.000Z
|
src/amuse/test/suite/codes_tests/test_multiples.py
|
rieder/amuse
|
3ac3b6b8f922643657279ddee5c8ab3fc0440d5e
|
[
"Apache-2.0"
] | 102
|
2015-01-22T10:00:29.000Z
|
2022-02-09T13:29:43.000Z
|
# nosetests --nocapture --nologcapture -w test/codes_tests --tests=test_multiples
from amuse.test.amusetest import TestWithMPI
import os
import sys
import numpy
import time
import math
from amuse.community.hermite.interface import Hermite
from amuse.community.kepler.interface import Kepler
from amuse.community.smalln.interface import SmallN
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse import datamodel
from amuse.ic import plummer
from amuse.couple import multiples
from amuse.couple import encounters
from amuse import io
class TestSimpleMultiples(TestWithMPI):
previous = None
def new_smalln(self):
if not self.previous is None:
self.previous.stop()
result = SmallN()
result.parameters.timestep_parameter = 0.1
result.parameters.cm_index = 2001
self.previous = result
return result
def new_kepler_si(self):
unit_converter = nbody_system.nbody_to_si(
1.0 | units.MSun,
1.0 | units.AU
)
kepler = Kepler(unit_converter)
kepler.initialize_code()
return kepler
def new_kepler(self):
kepler = Kepler()
kepler.initialize_code()
return kepler
def new_smalln_si(self):
if not self.previous is None:
self.previous.stop()
converter = nbody_system.nbody_to_si(units.MSun, units.parsec)
result = SmallN(converter)
result.parameters.timestep_parameter = 0.1
result.parameters.cm_index = 2001
return result
def new_binary(self, mass1, mass2, semi_major_axis,
eccentricity = 0, keyoffset = -1):
total_mass = mass1 + mass2
mass_fraction_particle_1 = mass1 / (total_mass)
if keyoffset >= 0:
binary = datamodel.Particles(keys=list(range(keyoffset, keyoffset+2)))
else:
binary = datamodel.Particles(2)
binary[0].mass = mass1
binary[1].mass = mass2
mu = nbody_system.G * total_mass
velocity_perihelion = numpy.sqrt( mu / semi_major_axis * ((1.0 + eccentricity)/(1.0 - eccentricity)))
radius_perihelion = semi_major_axis * (1.0 - eccentricity)
binary[0].position = ((1.0 - mass_fraction_particle_1) * radius_perihelion * [1.0,0.0,0.0])
binary[1].position = -(mass_fraction_particle_1 * radius_perihelion * [1.0,0.0,0.0])
binary[0].velocity = ((1.0 - mass_fraction_particle_1) * velocity_perihelion * [0.0,1.0,0.0])
binary[1].velocity = -(mass_fraction_particle_1 * velocity_perihelion * [0.0,1.0,0.0])
return binary
def create_binaries(self, center_of_mass_particles, mass1, mass2, semi_major_axis,
eccentricity = 0):
singles_in_binaries = datamodel.Particles()
for binary in center_of_mass_particles:
particles_in_binary = self.new_binary(
mass1,
mass2,
semi_major_axis
)
particles_in_binary.radius = semi_major_axis
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.mass = mass1 + mass2
particles_in_binary.position += binary.position
particles_in_binary.velocity += binary.velocity
singles_in_binaries.add_particles(particles_in_binary)
return center_of_mass_particles, singles_in_binaries
def test0(self):
code = Hermite()
stars = datamodel.Particles(2)
stars.mass = 1 | nbody_system.mass
stars.position = [
[0.0, 0,0],
[1.2, 0, 0]
]|nbody_system.length
stars.velocity = [
[0.0,0,0],
[0,0.1, 0]
]|nbody_system.speed
stars.radius = 0.5 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
interaction_over_code = None
)
encounter_code.parameters.hard_binary_factor = 1
encounter_code.small_scale_factor = 1
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
multiples_code.evolve_model(0.6|nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.binaries), 1)
def test1(self):
code = Hermite()
stars = datamodel.Particles(keys = (1,2,3, 4))
stars.mass = 1 | nbody_system.mass
stars.position = [
[0.0,0,0],
[0.5, 0, 0],
[2.0, 0, 0],
[-10.0, 0, 0],
]|nbody_system.length
stars.velocity = [
[0.0,0,0],
[0,0.1, 0],
[0,-0.1, 0],
[0,0.2, 0],
]|nbody_system.speed
stars.radius = 0.5 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
interaction_over_code = None
)
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
multiples_code.evolve_model(0.6|nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertAlmostRelativeEquals(multiples_code.particles[:-1].radius, 0.5 | nbody_system.length)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].radius, 0.4446| nbody_system.length, 3)
multiples_code.evolve_model(2|nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.binaries), 1)
multiples_code.evolve_model(3|nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.particles), 2)
self.assertEqual(len(multiples_code.binaries), 1)
def test2(self):
code = Hermite()
stars = datamodel.Particles(keys = (1,2,3, 4))
stars.mass = 1 | nbody_system.mass
stars.position = [
[0.0,0,0],
[0.5, 0, 0],
[3, 0, 0],
[-10, 0, 0],
]|nbody_system.length
stars.velocity = [
[0.0,0,0],
[0,0.1, 0],
[0.0,-0.5, 0],
[0,0.2, 0],
]|nbody_system.speed
stars.radius = 0.5 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
interaction_over_code = None
)
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
multiples_code.evolve_model(3|nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
print(multiples_code.multiples[0].components)
self.assertEqual(len(multiples_code.multiples[0].components), 2)
self.assertEqual(len(multiples_code.particles), 3)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles), 2)
def test3(self):
code = Hermite()
particles_in_binary = self.new_binary(
0.1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.01 | nbody_system.length,
keyoffset = 1
)
particles_in_binary.radius = 0.001 | nbody_system.length
binary = datamodel.Particle(key = 3)
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.radius = 0.5 | nbody_system.length
binary.mass = 0.2 | nbody_system.mass
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
interaction_over_code = None
)
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.singles_in_binaries.add_particles(particles_in_binary)
multiples_code.binaries.add_particle(binary)
self.assertEqual(len(multiples_code.singles_in_binaries), 2)
self.assertEqual(id(multiples_code.binaries[0].child1.particles_set), id(multiples_code.singles_in_binaries))
multiples_code.commit_particles()
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
def test4(self):
code = Hermite()
stars = datamodel.Particles(keys = (1,2,3, 4))
stars.mass = 1 | nbody_system.mass
stars.position = [
[0.0,0,0],
[0.5, 0, 0],
[2, 0, 0],
[-10, 0, 0],
]|nbody_system.length
stars.velocity = [
[0,0,0],
[0,0.2, 0],
[0,-0.2, 0],
[0,0.3, 0],
]|nbody_system.speed
stars.radius = 0.5 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
interaction_over_code = None
)
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
stopping_condition = multiples_code.stopping_conditions.multiples_change_detection
stopping_condition.enable()
multiples_code.evolve_model(3|nbody_system.time)
self.assertTrue(stopping_condition.is_set())
self.assertAlmostRelativeEquals(multiples_code.model_time , 0.0075 | nbody_system.time, 4)
self.assertEqual(len(stopping_condition.particles(0)), 1)
self.assertEqual(len(stopping_condition.particles(1)), 0)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.multiples[0].components), 2)
self.assertEqual(len(multiples_code.particles), 3) # 1 multiples with 2 singles, plus 2 singles free
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles), 2)
multiples_code.evolve_model(3|nbody_system.time)
self.assertTrue(stopping_condition.is_set())
self.assertAlmostRelativeEquals(multiples_code.model_time , 1.2195 | nbody_system.time, 4)
self.assertEqual(len(stopping_condition.particles(0)), 1) # 1 new multiple
self.assertEqual(len(stopping_condition.particles(1)), 1) # 1 dissolved multiple
self.assertEqual(len(multiples_code.multiples[0].components), 3)
self.assertEqual(len(multiples_code.particles), 2) # 1 multiple, plus 1 single free
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles), 1)
def test5(self):
converter = nbody_system.nbody_to_si(units.MSun, units.parsec)
code = Hermite(converter)
stars = datamodel.Particles(keys=(1,2))
stars.mass = converter.to_si(1 | nbody_system.mass)
stars.position = converter.to_si([
[0,0,0],
[1.2, 0, 0]
]|nbody_system.length)
stars.velocity = converter.to_si([
[0,0,0],
[0,0.1, 0]
]|nbody_system.speed)
stars.radius = converter.to_si(0.5 | nbody_system.length)
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler_si(),
resolve_collision_code = self.new_smalln_si(),
interaction_over_code = None,
G = constants.G
)
encounter_code.parameters.hard_binary_factor = 1
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code,
G = constants.G
)
end_time = converter.to_si(1.0|nbody_system.time)
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
multiples_code.evolve_model(end_time)
self.assertEqual(len(multiples_code.particles),1) # 1 multiples with 2 singles
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.multiples[0].components), 2)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles), 0)
def test6(self):
converter = nbody_system.nbody_to_si(units.MSun, units.parsec)
code = Hermite(converter)
stars = datamodel.Particles(keys=(1,2,3,4))
stars.mass = converter.to_si(1 | nbody_system.mass)
stars.position = converter.to_si([
[0,0,0],
[1.2, 0, 0],
[100, 0, 0],
[100, 1.2, 0]
]|nbody_system.length)
stars.velocity = converter.to_si([
[0,0,0],
[0,0.1, 0],
[0,0,0],
[0,0,0.1],
]|nbody_system.speed)
stars.radius = converter.to_si(0.5 | nbody_system.length)
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler_si(),
resolve_collision_code = self.new_smalln_si(),
interaction_over_code = None,
G = constants.G
)
encounter_code.small_scale_factor = 1.0
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code,
G = constants.G
)
multiples_code.must_handle_one_encounter_per_stopping_condition = False
multiples_code.particles.add_particles(stars)
multiples_code.commit_particles()
stopping_condition = multiples_code.stopping_conditions.multiples_change_detection
stopping_condition.enable()
end_time = converter.to_si(3.0|nbody_system.time)
print(end_time.as_quantity_in(units.Myr))
multiples_code.evolve_model(end_time)
self.assertTrue(stopping_condition.is_set())
print(multiples_code.model_time.as_quantity_in(units.Myr))
self.assertAlmostRelativeEquals(multiples_code.model_time , 7.99844 | units.Myr, 4)
self.assertEqual(len(stopping_condition.particles(0)), 2)
self.assertEqual(len(stopping_condition.particles(1)), 0)
self.assertEqual(len(multiples_code.particles), 2) # 1 multiples with 2 singles
self.assertEqual(len(multiples_code.multiples), 2)
self.assertEqual(len(multiples_code.binaries), 2)
self.assertEqual(len(multiples_code.multiples[0].components), 2)
self.assertEqual(len(multiples_code.multiples[1].components), 2)
self.assertEqual(len(multiples_code.singles), 0)
self.assertEqual(len(multiples_code.all_singles), 4)
def test7(self):
converter = nbody_system.nbody_to_si(units.MSun, units.parsec)
code = Hermite(converter)
stars = datamodel.Particles(keys=(1,2))
stars.mass = converter.to_si(1 | nbody_system.mass)
stars.position = converter.to_si([
[0,0,0],
[1.1, 0, 0],
]|nbody_system.length)
stars.velocity = converter.to_si([
[0,0,0],
[-0.5,1.5, 0],
]|nbody_system.speed)
stars.radius = converter.to_si(0.55 | nbody_system.length)
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler_si(),
resolve_collision_code = self.new_smalln_si(),
interaction_over_code = None,
G = constants.G
)
encounter_code.small_scale_factor = 1.0
encounter_code.parameters.hard_binary_factor = 1
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code,
G = constants.G
)
multiples_code.must_handle_one_encounter_per_stopping_condition = False
multiples_code.singles.add_particles(stars)
multiples_code.commit_particles()
stopping_condition = multiples_code.stopping_conditions.encounter_detection
stopping_condition.enable()
end_time = converter.to_si(3.0|nbody_system.time)
print(end_time.as_quantity_in(units.Myr))
multiples_code.evolve_model(end_time)
self.assertTrue(stopping_condition.is_set())
print(multiples_code.model_time.as_quantity_in(units.Myr))
#self.assertAlmostRelativeEquals(multiples_code.model_time , 5.96955 | units.Myr, 4)
self.assertEqual(len(stopping_condition.particles(0)), 1)
model = stopping_condition.particles(0)[0]
self.assertEqual(len(model.particles_before_encounter), 2)
self.assertEqual(len(model.particles_after_encounter), 2)
before = model.particles_before_encounter
after = model.particles_after_encounter
self.assertAlmostRelativeEquals(before.center_of_mass(), after.center_of_mass(), 7)
self.assertAlmostRelativeEquals(before.center_of_mass_velocity(), after.center_of_mass_velocity(), 7)
total_energy_before = before.kinetic_energy() + before.potential_energy(G=constants.G)
total_energy_after = after.kinetic_energy() + after.potential_energy(G=constants.G)
self.assertAlmostRelativeEquals(total_energy_before, total_energy_after, 7)
def test8(self):
code = Hermite()
particles_in_binary = self.new_binary(
0.1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.01 | nbody_system.length,
keyoffset = 1
)
particles_in_binary.radius = 0.001 | nbody_system.length
binary = datamodel.Particle(key = 3)
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.radius = 0.5 | nbody_system.length
binary.mass = 0.2 | nbody_system.mass
binary.position = [0.0,0.0,0.0] | nbody_system.length
binary.velocity = [0.0,0.0,0.0] | nbody_system.speed
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
interaction_over_code = None
)
encounter_code.parameters.hard_binary_factor = 1
encounter_code.small_scale_factor = 1
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.singles_in_binaries.add_particles(particles_in_binary)
multiples_code.binaries.add_particle(binary)
multiples_code.must_handle_one_encounter_per_stopping_condition = False
field_particle = datamodel.Particle(key = 4)
field_particle.mass = 0.5 | nbody_system.mass
field_particle.radius = 0.1 | nbody_system.length
field_particle.position = [0.0,0.2,0.0]| nbody_system.length
field_particle.velocity = [0.0,0.0,0.0] | nbody_system.speed
multiples_code.singles.add_particle(field_particle)
self.assertEqual(len(multiples_code.singles_in_binaries), 2)
self.assertEqual(id(multiples_code.binaries[0].child1.particles_set), id(multiples_code.singles_in_binaries))
multiples_code.commit_particles()
multiples_code.multiples.radius = 0.5 | nbody_system.length
initial_energy = multiples_code.get_total_energy()
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
self.assertEqual(len(multiples_code.particles), 2)
stopping_condition = multiples_code.stopping_conditions.encounter_detection
stopping_condition.enable()
singles = datamodel.Particles()
singles.add_particles(particles_in_binary)
singles.add_particle(field_particle)
singles_energy = singles.kinetic_energy() + singles.potential_energy(G=nbody_system.G)
self.assertAlmostRelativeEquals(initial_energy, singles_energy, 3)
multiples_code.evolve_model(2 |nbody_system.time)
final_energy = multiples_code.get_total_energy()
self.assertTrue(stopping_condition.is_set())
self.assertAlmostRelativeEquals(initial_energy, final_energy, 7)
def test9(self):
code = Hermite()
particles_in_binary = self.new_binary(
0.1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.01 | nbody_system.length,
keyoffset = 1
)
particles_in_binary.radius = 0.001 | nbody_system.length
binary = datamodel.Particle(key = 3)
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.radius = 0.5 | nbody_system.length
binary.mass = 0.2 | nbody_system.mass
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
)
others = datamodel.Particles(key = [4,5,6])
for i in range(3):
others[i].position = [i, 0, 0] | nbody_system.length
others[i].velocity = [0, 0, i] | nbody_system.speed
others[i].mass = 1 | nbody_system.mass
others[i].radius = 0 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.singles_in_binaries.add_particles(particles_in_binary)
multiples_code.binaries.add_particle(binary)
multiples_code.singles.add_particles(others)
multiples_code.commit_particles()
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
self.assertEqual(len(multiples_code.singles), 3)
self.assertEqual(len(multiples_code.particles), 4)
self.assertEqual(len(code.particles), 4)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass,0.2 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].mass,0.2 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].position, [0,0,0] | nbody_system.length, 6)
self.assertAlmostRelativeEquals(code.particles[-1].velocity, [0,0, 0] | nbody_system.speed, 6)
multiples_code.update_model()
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 0.2 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].mass, 0.2 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].position, [0,0,0] | nbody_system.length, 6)
self.assertAlmostRelativeEquals(code.particles[-1].velocity, [0,0, 0] | nbody_system.speed, 6)
multiples_code.singles_in_binaries[0].mass = 0.2 | nbody_system.mass
multiples_code.update_model()
print(code.particles.mass)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 0.3 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[-1].mass, 0.3 | nbody_system.mass)
print(code.particles[-1].position)
print(code.particles[-1].velocity)
self.assertAlmostRelativeEquals(code.particles[-1].position, [0.00166666666667,0,0] | nbody_system.length, 6)
self.assertAlmostRelativeEquals(code.particles[-1].velocity, [0, 0.7453559925, 0] | nbody_system.speed, 6)
def test10(self):
code = Hermite()
particles_in_binary = self.new_binary(
0.1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.01 | nbody_system.length,
keyoffset = 1
)
particles_in_binary.radius = 0.001 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
)
encounter_code.parameters.hard_binary_factor = 1
encounter_code.small_scale_factor = 1
others = datamodel.Particles(key = [4,5,6])
for i in range(3):
others[i].position = [i, 0, 0] | nbody_system.length
others[i].velocity = [0, 0, i] | nbody_system.speed
others[i].mass = 1 | nbody_system.mass
others[i].radius = 0.05 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.must_handle_one_encounter_per_stopping_condition = False
multiples_code.singles.add_particles(particles_in_binary)
multiples_code.singles.add_particles(others)
multiples_code.commit_particles()
multiples_code.evolve_model(1 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
self.assertEqual(len(multiples_code.singles), 3)
self.assertEqual(len(multiples_code.particles), 4)
self.assertEqual(len(code.particles), 4)
self.assertEqual(id(multiples_code.singles_in_binaries), id(multiples_code.binaries[0].child1.particles_set))
self.assertEqual(id(multiples_code.components_of_multiples), id(multiples_code.multiples[0].components[0].particles_set))
#multiples_code.singles_in_binaries[0].mass = 0.2 | nbody_system.mass
print(multiples_code.particles.mass)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 1.1 | nbody_system.mass)
self.assertAlmostRelativeEquals(multiples_code.particles.mass.sum(), 0.1 + 0.1 + 3.0 | nbody_system.mass)
multiples_code.update_model()
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 1.1 | nbody_system.mass)
index = -1
if not code.particles[index].mass > 1.0| nbody_system.mass:
index = -2
self.assertAlmostRelativeEquals(code.particles[index].mass, 1.1 | nbody_system.mass)
multiples_code.singles_in_binaries[0].mass += 0.2 | nbody_system.mass
multiples_code.update_model()
self.assertAlmostRelativeEquals(multiples_code.particles[-1].mass, 1.3 | nbody_system.mass)
self.assertAlmostRelativeEquals(code.particles[index].mass, 1.3 | nbody_system.mass)
def test11(self):
code = Hermite()
particles_in_binary = self.new_binary(
1.0 | nbody_system.mass,
1.0 | nbody_system.mass,
0.001 | nbody_system.length,
keyoffset = 1
)
particles_in_binary.radius = 0.01 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
)
others = datamodel.Particles(keys = [4,5,6])
for i in range(3):
others[i].position = [i, 0, 0] | nbody_system.length
others[i].velocity = [0, 0, 0] | nbody_system.speed
others[i].mass = 0.2 | nbody_system.mass
others[i].radius = 0.05 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.singles.add_particles(particles_in_binary)
multiples_code.singles.add_particles(others)
stopping_condition = multiples_code.stopping_conditions.binaries_change_detection
stopping_condition.enable()
multiples_code.commit_particles()
multiples_code.evolve_model(1 | nbody_system.time)
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.components_of_multiples), 2)
self.assertEqual(len(multiples_code.singles), 3)
self.assertEqual(len(multiples_code.particles), 4)
self.assertEqual(len(code.particles), 4)
self.assertTrue(stopping_condition.is_set())
multiples_code.particles[-1].velocity = [0, 0, 0] | nbody_system.speed
multiples_code.update_model()
print(multiples_code.particles.key)
self.assertEqual(len(stopping_condition.particles(0)), 1)
self.assertEqual(len(stopping_condition.particles(1)), 0)
self.assertEqual(len(stopping_condition.particles(2)), 0)
self.assertAlmostRelativeEquals(multiples_code.multiples[0].mass, 2.0 | nbody_system.mass)
self.assertAlmostRelativeEquals(multiples_code.particles.mass.sum(), 2.6 | nbody_system.mass)
print(multiples_code.particles.velocity)
multiples_code.evolve_model(2 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
self.assertEqual(len(stopping_condition.particles(0)), 0)
self.assertEqual(len(stopping_condition.particles(1)), 0)
self.assertEqual(len(stopping_condition.particles(2)), 1)
self.assertAlmostRelativeEquals(multiples_code.multiples[0].mass, 2.0 | nbody_system.mass)
self.assertAlmostRelativeEquals(multiples_code.particles.mass.sum(), 2.6 | nbody_system.mass)
def test12(self):
code = Hermite()
particles_in_binary = self.new_binary(
1.0 | nbody_system.mass,
1.0 | nbody_system.mass,
0.001 | nbody_system.length,
keyoffset = 10
)
particles_in_binary.radius = 0.01 | nbody_system.length
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
)
binary = datamodel.Particle(key=20)
binary.child1 = particles_in_binary[0]
binary.child2 = particles_in_binary[1]
binary.position = [1,0,1] | nbody_system.length
particles_in_binary.position += [1,0,1] | nbody_system.length
others = datamodel.Particles(keys = [4,5,6])
for i in range(3):
others[i].position = [i*10, 0, 0] | nbody_system.length
others[i].velocity = [0, 0, 0] | nbody_system.speed
others[i].mass = 0.2 | nbody_system.mass
others[i].radius = 0.05 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.particles.add_particles(others)
multiples_code.singles_in_binaries.add_particles(particles_in_binary)
multiples_code.binaries.add_particle(binary)
multiples_code.commit_particles()
print(multiples_code.particles)
self.assertEqual(len(multiples_code.particles), 4)
self.assertAlmostRelativeEquals(multiples_code.particles[-1].position, [1,0,1] | nbody_system.length)
def test13(self):
code = Hermite()
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
)
center_of_mass_particles = datamodel.Particles(5)
center_of_mass_particles.position = (numpy.asarray(range(5))).reshape(5,1) * ([1.0, 0.0, 0.0] | nbody_system.length)
center_of_mass_particles.velocity = [0.0, 0.0, 0.0] | nbody_system.speed
center_of_mass_particles.radius = 0.05 | nbody_system.length
binaries, singles_in_binaries = self.create_binaries(
center_of_mass_particles,
1 | nbody_system.mass,
0.01 | nbody_system.mass,
0.0001 | nbody_system.length
)
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.singles_in_binaries.add_particles(singles_in_binaries)
multiples_code.binaries.add_particles(binaries)
multiples_code.commit_particles()
#stopping_condition = multiples_code.stopping_conditions.encounter_detection
#stopping_condition.enable()
stopping_condition = multiples_code.stopping_conditions.binaries_change_detection
stopping_condition.enable()
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
multiples_code.evolve_model(1 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(0):
print("NEW:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(1):
print("REMOVED:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(2):
print("UPDATED:", x.key, x.child1.key, x.child2.key)
for x in multiples_code.singles:
print(x.key, x.mass)
self.assertEqual(len(multiples_code.singles_in_binaries) + len(multiples_code.singles), 2*len(center_of_mass_particles))
self.assertEqual(len(multiples_code.binaries) - len(stopping_condition.particles(0)) + len(stopping_condition.particles(1)), len(center_of_mass_particles))
def test14(self):
code = Hermite()
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
)
center_of_mass_particles = datamodel.Particles(5)
center_of_mass_particles.position = (numpy.asarray(range(5))).reshape(5,1) * ([1.0, 0.0, 0.0] | nbody_system.length)
center_of_mass_particles.velocity = [0.0, 0.0, 0.0] | nbody_system.speed
center_of_mass_particles.radius = 0.05 | nbody_system.length
binaries, singles_in_binaries = self.create_binaries(
center_of_mass_particles,
1 | nbody_system.mass,
0.1 | nbody_system.mass,
0.00000001 | nbody_system.length
)
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.singles_in_binaries.add_particles(singles_in_binaries)
multiples_code.binaries.add_particles(binaries)
multiples_code.commit_particles()
#stopping_condition = multiples_code.stopping_conditions.encounter_detection
#stopping_condition.enable()
stopping_condition = multiples_code.stopping_conditions.binaries_change_detection
stopping_condition.enable()
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
multiples_code.evolve_model(2 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(0):
print("NEW:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(1):
print("REMOVED:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(2):
print("UPDATED:", x.key, x.child1.key, x.child2.key)
for x in multiples_code.singles:
print(x.key, x.mass)
self.assertEqual(len(multiples_code.singles_in_binaries) + len(multiples_code.singles), 2*len(center_of_mass_particles))
self.assertEqual(len(multiples_code.binaries) - len(stopping_condition.particles(0)) + len(stopping_condition.particles(1)), len(center_of_mass_particles))
def test15(self):
code = Hermite()
encounter_code = encounters.HandleEncounter(
kepler_code = self.new_kepler(),
resolve_collision_code = self.new_smalln(),
)
n = 10
center_of_mass_particles = plummer.new_plummer_model(n, random=numpy.random.mtrand.RandomState(1))
center_of_mass_particles.radius = 0.5 | nbody_system.length
center_of_mass_particles.velocity *= 0
binaries, singles_in_binaries = self.create_binaries(
center_of_mass_particles,
0.999 * ((1.0 | nbody_system.mass) / n),
0.001 * ((1.0 | nbody_system.mass) / n),
0.00001 | nbody_system.length
)
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounter_code
)
multiples_code.singles_in_binaries.add_particles(singles_in_binaries)
multiples_code.binaries.add_particles(binaries)
multiples_code.commit_particles()
#stopping_condition = multiples_code.stopping_conditions.encounter_detection
#stopping_condition.enable()
stopping_condition = multiples_code.stopping_conditions.binaries_change_detection
stopping_condition.enable()
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
multiples_code.evolve_model(2 | nbody_system.time)
self.assertTrue(stopping_condition.is_set())
for x in multiples_code.binaries:
print(x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(0):
print("NEW:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(1):
print("REMOVED:", x.key, x.child1.key, x.child2.key)
for x in stopping_condition.particles(2):
print("UPDATED:", x.key, x.child1.key, x.child2.key)
for x in multiples_code.singles:
print(x.key, x.mass)
self.assertEqual(len(multiples_code.binaries) - len(stopping_condition.particles(0)) + len(stopping_condition.particles(1)), len(center_of_mass_particles))
def test16(self):
code = Hermite()
n = 10
singles = datamodel.Particles(keys = list(range(1,n+1)))
singles.mass = 1 | nbody_system.mass
for x in range(n):
singles[x].position = [x*x, 0, 0] | nbody_system.length
singles.velocity = [0,0,0] | nbody_system.speed
singles.radius = 0.5 | nbody_system.length
multiples_code = encounters.Multiples(
gravity_code = code,
handle_encounter_code = encounters.StickyHandleEncounter()
)
multiples_code.singles.add_particles(singles)
multiples_code.commit_particles()
multiples_code.evolve_model(1 | nbody_system.time)
print(len(multiples_code.multiples))
self.assertEqual(len(multiples_code.multiples), 1)
self.assertEqual(len(multiples_code.particles), 9)
self.assertEqual(len(multiples_code.singles), 8)
self.assertEqual(len(multiples_code.binaries), 1)
self.assertEqual(len(multiples_code.singles_in_binaries), 2)
self.assertEqual(id(multiples_code.components_of_multiples), id(multiples_code.multiples[0].components[0].particles_set))
print(multiples_code.multiples[0].components)
io.write_set_to_file(
(
multiples_code.singles,
multiples_code.singles_in_binaries,
multiples_code.binaries,
multiples_code.components_of_multiples,
multiples_code.multiples
),
"multiples.hdf5",
"hdf5",
version = "2.0",
names = (
"singles",
"singles_in_binaries",
"binaries",
"components_of_multiples",
"multiples"
)
)
multiples_code_loaded = encounters.Multiples(
gravity_code = Hermite(),
handle_encounter_code = encounters.StickyHandleEncounter()
)
(
singles,
singles_in_binaries,
binaries,
components_of_multiples,
multiples
) = io.read_set_from_file(
"multiples.hdf5",
"hdf5",
version = "2.0",
names = (
"singles",
"singles_in_binaries",
"binaries",
"components_of_multiples",
"multiples"
)
)
self.assertEqual(len(multiples), 1)
self.assertEqual(len(singles), 8)
self.assertEqual(len(binaries), 1)
self.assertEqual(len(singles_in_binaries), 2)
#self.assertEquals(id(components_of_multiples), id(multiples[0].components[0].particles_set))
multiples_code_loaded.singles.add_particles(singles)
multiples_code_loaded.singles_in_binaries.add_particles(singles_in_binaries)
multiples_code_loaded.binaries.add_particles(binaries)
multiples_code_loaded.components_of_multiples.add_particles(components_of_multiples)
multiples_code_loaded.multiples.add_particles(multiples)
multiples_code_loaded.commit_particles()
self.assertEqual(len(multiples_code_loaded.multiples), 1)
self.assertEqual(len(multiples_code_loaded.particles), 9)
self.assertEqual(len(multiples_code_loaded.singles), 8)
self.assertEqual(len(multiples_code_loaded.binaries), 1)
self.assertEqual(len(multiples_code_loaded.singles_in_binaries), 2)
#self.assertEquals(id(multiples_code_loaded.components_of_multiples), id(multiples_code_loaded.multiples[0].components[0].particles_set))
multiples_code.evolve_model(4 | nbody_system.time)
# need to use 3 here as the model_time is reset when doing a restart and we dit not set it after creating Hermite
multiples_code_loaded.evolve_model(3.0 | nbody_system.time)
print(len(multiples_code.multiples), multiples_code.particles)
print(multiples_code.particles.position - multiples_code_loaded.particles.position)
self.assertAlmostRelativeEquals(multiples_code.particles.position - multiples_code_loaded.particles.position, [0,0,0] | nbody_system.length)
for code in [multiples_code, multiples_code_loaded]:
self.assertEqual(len(code.multiples), 1)
self.assertEqual(len(code.particles), 8)
self.assertEqual(len(code.singles), 7)
self.assertEqual(len(code.binaries), 1)
self.assertEqual(len(code.singles_in_binaries), 2)
self.assertEqual(len(code.components_of_multiples), 3)
self.assertEqual(id(code.components_of_multiples), id(code.multiples[0].components[0].particles_set))
| 41.923657
| 164
| 0.633866
| 5,149
| 44,481
| 5.227811
| 0.045252
| 0.12122
| 0.066201
| 0.072219
| 0.885913
| 0.852589
| 0.816814
| 0.774649
| 0.742291
| 0.727617
| 0
| 0.031312
| 0.268384
| 44,481
| 1,060
| 165
| 41.963208
| 0.795839
| 0.023516
| 0
| 0.669767
| 0
| 0
| 0.00539
| 0.00106
| 0
| 0
| 0
| 0
| 0.173256
| 1
| 0.026744
| false
| 0
| 0.019767
| 0
| 0.055814
| 0.039535
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0aa2e3f77ab7844dadccac7d7828ec021a54add2
| 108
|
py
|
Python
|
parquet_to_root/__init__.py
|
ponyisi/parquet_to_root
|
b978c0fba33d98428de35abbad2f424092e4f0c7
|
[
"Apache-2.0"
] | 4
|
2020-12-19T22:48:31.000Z
|
2021-08-17T23:01:03.000Z
|
parquet_to_root/__init__.py
|
ponyisi/parquet_to_root
|
b978c0fba33d98428de35abbad2f424092e4f0c7
|
[
"Apache-2.0"
] | 2
|
2021-01-14T17:15:50.000Z
|
2021-01-16T19:38:30.000Z
|
parquet_to_root/__init__.py
|
ponyisi/parquet_to_root
|
b978c0fba33d98428de35abbad2f424092e4f0c7
|
[
"Apache-2.0"
] | 1
|
2020-12-19T22:55:17.000Z
|
2020-12-19T22:55:17.000Z
|
from .parquet_to_root_pyroot import parquet_to_root_pyroot as parquet_to_root
__all__ = ['parquet_to_root']
| 36
| 77
| 0.861111
| 18
| 108
| 4.388889
| 0.444444
| 0.455696
| 0.658228
| 0.481013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 108
| 2
| 78
| 54
| 0.79798
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0ad631cdf6597275c367b066b7c97fe3721e1d65
| 224
|
py
|
Python
|
pylogger/formats/__init__.py
|
agSant01/pylogger
|
99a5d08b0f486c43dc4936cd89474e21a86f377a
|
[
"MIT"
] | null | null | null |
pylogger/formats/__init__.py
|
agSant01/pylogger
|
99a5d08b0f486c43dc4936cd89474e21a86f377a
|
[
"MIT"
] | null | null | null |
pylogger/formats/__init__.py
|
agSant01/pylogger
|
99a5d08b0f486c43dc4936cd89474e21a86f377a
|
[
"MIT"
] | null | null | null |
from .timestamp import Timestamp
from .caller import ClassCaller, FunctionCaller, FileCaller, FileLine
from .format import Format
__all__ = ['Timestamp', 'ClassCaller', 'FunctionCaller', 'FileCaller', 'FileLine', 'Format']
| 37.333333
| 92
| 0.776786
| 22
| 224
| 7.727273
| 0.454545
| 0.294118
| 0.411765
| 0.505882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 224
| 5
| 93
| 44.8
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0.258929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0adb0ed8f059ff05d15ce76e6992b8fe0da30683
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/95/82/31/185593a2ca9b7cb9e465738183b8a063bd6246f33735439a0f4b2d510f
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.489583
| 0
| 96
| 1
| 96
| 96
| 0.40625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ae659e7c9b9fd9429031373b7a90323f1071015
| 3,913
|
py
|
Python
|
testo.py
|
radiojam11/esame_immobili
|
7c1863b629fe2f9602f7ff634b2511d20fdeab72
|
[
"MIT"
] | null | null | null |
testo.py
|
radiojam11/esame_immobili
|
7c1863b629fe2f9602f7ff634b2511d20fdeab72
|
[
"MIT"
] | null | null | null |
testo.py
|
radiojam11/esame_immobili
|
7c1863b629fe2f9602f7ff634b2511d20fdeab72
|
[
"MIT"
] | null | null | null |
banner = """
_ _ _ _ _____ _
| | | | | | (_) |_ _| (_)
| | | | __ _| | ___ _ __ _ ___ | | ___ __ _ _ __ ___ _________
| | | |/ _` | |/ _ \ '__| |/ _ \ | |/ _ \ / _` | '_ \ / _ \_ /_ / |
\ \_/ / (_| | | __/ | | | (_) | | | (_) | (_| | | | | (_) / / / /| |
\___/ \__,_|_|\___|_| |_|\___/ \_/\___/ \__, |_| |_|\___/___/___|_|
__/ |
|___/
_____ ___________ _____ _____
/ __ \ |_ _| ___|_ _/ ___|
| / \/ ___ _ __ ___ ___ ______ | | | |_ | | \ `--.
| | / _ \| '__/ __|/ _ \ |______| | | | _| | | `--. \
| \__/\ (_) | | \__ \ (_) | _| |_| | | | /\__/ /
\____/\___/|_| |___/\___/ \___/\_| \_/ \____/
________ ___ ___ ______ _____ ___ ___ ___ _ _ _ _______ ___ _____ _____ _ _______ _____ _ _ _____
/ ___| \/ | / _ \ | ___ \_ _| | \/ | / _ \ | \ | | | | | ___/ _ \/ __ \_ _| | | | ___ \_ _| \ | | __ \
\ `--.| . . |/ /_\ \| |_/ / | | | . . |/ /_\ \| \| | | | | |_ / /_\ \ / \/ | | | | | | |_/ / | | | \| | | \/
`--. \ |\/| || _ || / | | | |\/| || _ || . ` | | | | _|| _ | | | | | | | | / | | | . ` | | __
/\__/ / | | || | | || |\ \ | | | | | || | | || |\ | |_| | | | | | | \__/\ | | | |_| | |\ \ _| |_| |\ | |_\ \
\____/\_| |_/\_| |_/\_| \_| \_/ \_| |_/\_| |_/\_| \_/\___/\_| \_| |_/\____/ \_/ \___/\_| \_|\___/\_| \_/\____/
______ _____ _ _ _____ _ ___________ ___________ ___ _____
| _ \ ___| | | | ___| | | _ | ___ \ ___| ___ \ / || _ |
| | | | |__ | | | | |__ | | | | | | |_/ / |__ | |_/ / / /| || |/' |
| | | | __|| | | | __|| | | | | | __/| __|| / / /_| || /| |
| |/ /| |___\ \_/ / |___| |___\ \_/ / | | |___| |\ \ \___ |\ |_/ /
|___/ \____/ \___/\____/\_____/\___/\_| \____/\_| \_| |_(_)___/
"""
| 108.694444
| 117
| 0.131613
| 1
| 3,913
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.715308
| 3,913
| 36
| 118
| 108.694444
| 0.005386
| 0
| 0
| 0
| 0
| 0.642857
| 0.995912
| 0.006643
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0af626ba927d65a27879f2fd54b2f9098e64d60c
| 42
|
py
|
Python
|
python/testData/copyPaste/WholeIndentedLineSelectedWithoutIndentAndReplacedWithWord.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/copyPaste/WholeIndentedLineSelectedWithoutIndentAndReplacedWithWord.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/copyPaste/WholeIndentedLineSelectedWithoutIndentAndReplacedWithWord.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
if True:
if True:
pass
foo
| 10.5
| 12
| 0.452381
| 6
| 42
| 3.166667
| 0.666667
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 42
| 4
| 13
| 10.5
| 0.904762
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
7c633a13154c19b4e19c51e2ed490797f2c3c324
| 23
|
py
|
Python
|
torchsolver/nn/__init__.py
|
killf/torchsolver
|
d9d651a646cff3a00ff36c455704cb009d45aa9a
|
[
"MIT"
] | 1,816
|
2018-02-14T01:59:51.000Z
|
2022-03-31T17:09:20.000Z
|
torchsolver/nn/__init__.py
|
killf/torchsolver
|
d9d651a646cff3a00ff36c455704cb009d45aa9a
|
[
"MIT"
] | 340
|
2018-02-11T00:27:26.000Z
|
2022-03-21T12:00:24.000Z
|
torchsolver/nn/__init__.py
|
killf/torchsolver
|
d9d651a646cff3a00ff36c455704cb009d45aa9a
|
[
"MIT"
] | 144
|
2018-03-18T00:08:16.000Z
|
2022-02-26T01:51:58.000Z
|
from .view import View
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7c72c941ecc1bbef2168e6287b2f0f9479a55321
| 197,642
|
py
|
Python
|
pirates/leveleditor/worldData/pvpShipIsland1.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/leveleditor/worldData/pvpShipIsland1.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/leveleditor/worldData/pvpShipIsland1.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1196970080.56sdnaik': {'Type': 'Island','Name': 'Team 1 HQ','File': '','Environment': 'OpenSky','Minimap': False,'Objects': {'1196991789.28sdnaik': {'Type': 'Dinghy','Aggro Radius': '20.0000','Hpr': VBase3(-179.869, 0.0, 0.0),'Location': 'Water','Pos': Point3(-82.5, -223.874, 0.746),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/shipparts/dingy-geometry_High'}},'1196991806.06sdnaik': {'Type': 'Player Spawn Node','GridPos': Point3(-17.056, -199.424, 10.028),'Hpr': VBase3(22.12, 0.0, 0.0),'Index': 1,'Pos': Point3(-29.493, -212.199, 4.482),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1201548250.81kmuller': {'Type': 'Dinghy','Aggro Radius': '20.0000','Hpr': VBase3(-157.877, 0.0, 0.0),'Location': 'Water','Pos': Point3(140.894, 301.46, 0.667),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/shipparts/dingy-geometry_High'}},'1201548362.32kmuller': {'Type': 'Dinghy','Aggro Radius': '20.0000','Hpr': VBase3(119.549, 0.0, 0.0),'Location': 'Water','Pos': Point3(309.464, 198.649, -0.016),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/shipparts/dingy-geometry_High'}},'1201548416.07kmuller': {'Type': 'Building Exterior','File': '','ExtUid': '1201548416.07kmuller0','Hpr': VBase3(81.927, 0.326, -4.632),'Objects': {},'Pos': Point3(-235.488, -242.127, 2.436),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Door': 'models/buildings/shanty_guildhall_door','Model': 'models/buildings/shanty_repairshop_exterior','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_barber'}},'1201548787.04kmuller': {'Type': 'Townsperson','Category': 'Shipwright','AnimSet': 'default','AuraFX': 'None','Boss': False,'CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','GridPos': Point3(-235.317, -219.749, 5.658),'Hpr': VBase3(-12.194, 1.423, 4.012),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '1.0000','Pos': Point3(-234.17, -220.997, 4.184),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Idle','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1201558950.67kmuller': {'Type': 'Player Spawn Node','GridPos': Point3(-363.542, -174.738, 7.863),'Hpr': VBase3(-69.741, 0.0, 0.0),'Index': -1,'Pos': Point3(-337.744, -189.396, 3.611),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1201558963.15kmuller': {'Type': 'Player Spawn Node','GridPos': Point3(-115.402, -186.912, 5.989),'Hpr': VBase3(27.663, 0.0, 0.0),'Index': -1,'Pos': Point3(-124.913, -193.373, 4.287),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1202419919.16akelts': {'Type': 'Townsperson','Category': 'Gunsmith','AnimSet': 'default','AuraFX': 'None','Boss': False,'CustomModel': 'None','DNA': '1202419919.16akelts','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','HelpID': 'NONE','Hpr': VBase3(141.505, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '6.6084','Pos': Point3(-47.741, -120.761, 21.824),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Walk','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1202519353.83akelts': {'Type': 'Building Exterior','File': '','ExtUid': '1202519353.83akelts0','Hpr': VBase3(31.5, 1.976, 359.642),'Objects': {'1210704930.45kmuller': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-0.277, -13.756, 1.561),'Scale': VBase3(1.0, 1.0, 1.0)}},'Pos': Point3(-308.94, -20.683, 33.328),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.9200000166893005, 0.8999999761581421, 0.7900000214576721, 1.0),'Door': 'models/buildings/shanty_guildhall_door','Model': 'models/buildings/shanty_guildhall_exterior','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_doctor'}},'1202519757.13akelts': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'drunk','AuraFX': 'None','Boss': False,'CustomModel': 'models/char/plf_zero','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','GridPos': Point3(307.519, 22.278, 59.769),'HelpID': 'NONE','Hpr': VBase3(208.49, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Objects': {},'Patrol Radius': '6.2651','Pos': Point3(-312.659, -30.506, 61.591),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1202521042.56akelts': {'Type': 'Ship Wreck','Hpr': VBase3(141.206, 0.0, -19.14),'Pos': Point3(-47.861, -98.694, 22.343),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_shp_wrk_sloop_fore'}},'1202521625.98akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(0.171, 0.0, 0.0),'Pos': Point3(0.498, 4.479, 0.952),'Scale': VBase3(1.0, 1.0, 1.0),'TargetUIDs': []},'1202521699.22akelts': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(160.849, -9.225, 3.535),'Pos': Point3(-41.575, -95.77, 26.038),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/prop_group_A'}},'1202521744.44akelts': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(-13.079, 3.848, -3.509),'Pos': Point3(-18.449, -91.278, 27.894),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/prop_group_B'}},'1202521770.64akelts': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(-117.926, -4.388, 7.674),'Pos': Point3(-33.557, -110.624, 23.869),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/prop_group_C'}},'1202521854.64akelts': {'Type': 'Ship_Props','DisableCollision': True,'Hpr': VBase3(-156.676, 0.0, 0.0),'Pos': Point3(-42.62, -105.983, 24.564),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cannonball_stack_triangle'}},'1202521861.92akelts': {'Type': 'Ship_Props','DisableCollision': True,'Hpr': VBase3(162.089, 0.0, 0.0),'Pos': Point3(-38.083, -108.828, 24.314),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cannonball_stack_square'}},'1202521909.2akelts': {'Type': 'Ship_Props','DisableCollision': True,'Hpr': VBase3(66.887, 0.0, -12.422),'Pos': Point3(-33.015, -116.94, 23.689),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cannon_stack_01'}},'1202521931.22akelts': {'Type': 'Ship_Props','DisableCollision': True,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-31.266, -95.35, 27.148),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cannon_stack_02'}},'1202522010.05akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-157.446, 0.0, 0.0),'Pos': Point3(-48.466, -85.119, 24.635),'Scale': VBase3(2.463, 2.463, 3.195),'Visual': {'Color': (0.89, 0.85, 0.796078431372549, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1202522090.09akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-70.485, -56.698, 26.49),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_a'}},'1202522096.61akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(117.765, 0.0, 0.0),'Pos': Point3(-76.934, -54.282, 26.597),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_b'}},'1202522133.41akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-136.434, 0.0, 0.0),'Pos': Point3(-73.095, -55.381, 26.565),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/bush_a'}},'1202839503.38akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(87.291, -122.025, 42.188),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_a'}},'1202839506.13akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(209.743, 0.877, 68.654),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_d'}},'1202839507.11akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-51.106, 47.516, 92.662),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_a'}},'1202839581.52akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-157.564, 0.0, 0.0),'Pos': Point3(-199.205, 64.125, 94.062),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_b'}},'1202839582.81akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(117.765, 0.0, 0.0),'Pos': Point3(166.801, -255.754, 31.025),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_b'}},'1202839585.47akelts': {'Type': 'Tree','DisableCollision': False,'GridPos': Point3(443.172, -116.196, 55.301),'Hpr': VBase3(117.765, 0.0, 0.0),'Pos': Point3(-195.155, -214.069, 2.395),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_d'}},'1202839586.58akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(117.765, 0.0, 0.0),'Pos': Point3(315.224, 143.437, 7.736),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_e'}},'1202839590.66akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(169.215, 0.0, 0.0),'Pos': Point3(-176.21, 176.026, 36.132),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_b'}},'1202839593.34akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(101.682, 0.0, 0.0),'Pos': Point3(25.489, 123.851, 23.624),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_a'}},'1202839599.58akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-33.854, 0.0, 0.0),'Objects': {},'Pos': Point3(-150.694, -12.186, 18.163),'Scale': VBase3(8.581, 8.581, 8.581),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1202839600.83akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-67.597, 0.0, 7.49),'Pos': Point3(-230.313, 16.964, 32.291),'Scale': VBase3(2.565, 2.565, 2.177),'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1202839604.0akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-0.081, -4.343, 10.293),'Pos': Point3(39.895, 154.253, 17.689),'Scale': VBase3(7.465, 7.465, 7.465),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1202839606.13akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-2.205, 0.0, 0.0),'Pos': Point3(331.724, 139.654, 9.52),'Scale': VBase3(3.499, 3.499, 3.499),'Visual': {'Model': 'models/props/rock_group_3_floor'}},'1202839607.27akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-67.171, -11.295, -6.112),'Pos': Point3(446.574, 127.396, 2.648),'Scale': VBase3(5.597, 5.597, 7.798),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/rock_group_2_sphere'}},'1202839608.95akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(161.532, 1.784, 5.488),'Pos': Point3(337.98, 147.835, 1.803),'Scale': VBase3(7.572, 7.572, 7.572),'Visual': {'Model': 'models/props/rock_group_4_floor'}},'1202843883.48akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-18.196, -482.595, -64.502),'Scale': VBase3(0.843, 0.843, 1.078),'Visual': {'Model': 'models/props/mound_light_med'}},'1202844035.08akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(174.705, 0.0, 0.0),'Pos': Point3(79.339, -326.786, -62.925),'Scale': VBase3(1.36, 1.36, 1.257),'Visual': {'Color': (0.788235294117647, 0.71, 0.6039215686274509, 1.0),'Model': 'models/props/mound_light_med2'}},'1202844089.09akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-373.02, -150.177, -36.069),'Scale': VBase3(0.343, 0.235, 0.348),'Visual': {'Model': 'models/props/mound_light_lrg'}},'1202844133.89akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(51.414, 0.0, 0.0),'Pos': Point3(382.734, 133.916, -61.116),'Scale': VBase3(0.801, 0.801, 1.047),'Visual': {'Color': (0.5, 0.5, 0.5019607843137255, 1.0),'Model': 'models/props/mound_light_med2'}},'1202844200.77akelts': {'Type': 'Shanty Tents','Hpr': VBase3(38.053, 1.069, 0.298),'Pos': Point3(-150.503, 277.399, 34.238),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/buildings/shanty_tent_house_body'}},'1202846001.67akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-3.673, 0.0, 0.0),'Pos': Point3(25.208, 125.712, 25.364),'Scale': VBase3(0.843, 0.843, 0.843),'Visual': {'Model': 'models/vegetation/bush_a'}},'1202846053.19akelts': {'Type': 'Building Exterior','File': 'pvpShipIsland1_int_tavern','ExtUid': '1202846053.19akelts0','Hpr': VBase3(-101.348, 0.096, 2.224),'Objects': {'1210704940.31kmuller': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-179.829, 0.0, 0.0),'Pos': Point3(-0.498, -4.914, 0.952),'Scale': VBase3(1.0, 1.0, 1.0)},'1210704940.34kmuller': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-6.626, 20.947, 1.006),'Scale': VBase3(1.0, 1.0, 1.0)}},'Pos': Point3(67.163, -229.356, 28.427),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Door': 'models/buildings/shanty_guildhall_door','Model': 'models/buildings/shanty_tavern_exterior','SignFrame': 'models/buildings/sign1_shanty_a_frame','SignImage': 'models/buildings/sign1_eng_a_icon_tavern'}},'1202846053.25akelts': {'Type': 'Door Locator Node','Name': 'door_locator','GridPos': Point3(171.384, -63.442, 95.932),'Hpr': VBase3(78.823, -0.103, -2.224),'Pos': Point3(62.86, -228.023, 29.39),'Scale': VBase3(1.0, 1.0, 1.0)},'1202846053.28akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','GridPos': Point3(165.256, -38.122, 95.986),'Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(88.89, -227.004, 29.724),'Scale': VBase3(1.0, 1.0, 1.0)},'1202846446.94akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-6.626, 20.947, 1.006),'Scale': VBase3(1.0, 1.0, 1.0),'TargetUIDs': []},'1203009085.13akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(266.129, -23.355, 82.555),'Hpr': VBase3(121.571, -18.131, -1.503),'Objects': {},'Pos': Point3(-264.313, 21.771, 76.478),'Scale': VBase3(0.437, 0.511, 0.485),'Visual': {'Model': 'models/props/shanty_rope_bridge'}},'1203009093.11akelts': {'Type': 'Bridge','DisableCollision': False,'Hpr': VBase3(126.059, -19.215, -1.652),'Pos': Point3(-245.503, 37.919, 85.156),'Scale': VBase3(0.645, 0.645, 0.645),'Visual': {'Model': 'models/props/shanty_rope_bridge_post'}},'1203028879.95akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-179.829, 0.0, 0.0),'Pos': Point3(-0.498, -4.479, 0.952),'Scale': VBase3(1.0, 1.0, 1.0)},'1203029906.03akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(283.656, -11.581, 77.963),'Hpr': VBase3(121.84, -3.987, -1.765),'Objects': {},'Pos': Point3(-282.635, 10.423, 74.91),'Scale': VBase3(0.441, 0.489, 0.489),'Visual': {'Model': 'models/props/shanty_rope_bridge'}},'1203029906.05akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(268.093, -19.065, 77.603),'Hpr': VBase3(122.305, 10.989, -1.794),'Objects': {},'Pos': Point3(-300.577, -0.885, 78.94),'Scale': VBase3(0.441, 0.489, 0.489),'Visual': {'Model': 'models/props/shanty_rope_bridge'}},'1203030236.98akelts': {'Type': 'Bridge','DisableCollision': False,'Hpr': VBase3(114.876, -18.572, -5.284),'Pos': Point3(-240.23, 30.84, 85.085),'Scale': VBase3(0.645, 0.645, 0.645),'Visual': {'Model': 'models/props/shanty_rope_bridge_post'}},'1203030255.86akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(303.618, -2.095, 79.383),'Hpr': VBase3(-54.499, -20.046, -3.644),'Pos': Point3(-302.621, 1.879, 79.363),'Scale': VBase3(0.515, 0.515, 0.454),'Visual': {'Model': 'models/props/shanty_rope_bridge_post'}},'1203030322.05akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(299.49, 4.038, 79.189),'Hpr': VBase3(-64.549, -10.414, 6.048),'Pos': Point3(-298.75, -4.42, 79.168),'Scale': VBase3(0.555, 0.555, 0.49),'Visual': {'Model': 'models/props/shanty_rope_bridge_post'}},'1203031365.84akelts': {'Type': 'Building Exterior','File': '','ExtUid': '1203031365.84akelts0','Hpr': VBase3(34.146, 0.0, 0.0),'Pos': Point3(-56.148, -107.761, 21.739),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.79, 0.5882352941176471, 0.3843137254901961, 1.0),'Door': 'models/buildings/shanty_guildhall_door','Model': 'models/buildings/shanty_signpost','SignFrame': 'models/buildings/sign1_shanty_a_frame','SignImage': 'models/buildings/sign1_eng_a_icon_weapons'}},'1203446211.75akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(117.765, 0.0, 0.0),'Pos': Point3(195.742, -117.943, 64.054),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_b'}},'1203446214.03akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(117.765, 0.0, 0.0),'Pos': Point3(277.43, -113.713, 58.755),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_c'}},'1203446215.28akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(117.765, 0.0, 0.0),'Pos': Point3(235.4, -81.351, 59.805),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_e'}},'1203446268.36akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(50.885, 0.0, 0.0),'Pos': Point3(-251.68, 132.391, 41.387),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_f'}},'1203446290.13akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-157.446, 10.359, 0.0),'Pos': Point3(-253.55, 115.449, 41.45),'Scale': VBase3(4.324, 4.324, 2.749),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1203446376.64akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(49.51, -1.693, -14.496),'Pos': Point3(11.922, -71.895, 37.201),'Scale': VBase3(3.856, 3.856, 3.856),'Visual': {'Color': (0.8999999761581421, 0.8999999761581421, 0.8999999761581421, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1203446441.06akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(170.09, 0.0, 0.0),'Pos': Point3(-61.575, -68.227, 24.541),'Scale': VBase3(2.734, 2.734, 4.003),'Visual': {'Color': (0.78, 0.7, 0.5843137254901961, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1203446504.67akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-104.832, 0.056, 1.764),'Pos': Point3(-228.258, 145.714, 88.067),'Scale': VBase3(2.463, 2.463, 3.292),'Visual': {'Color': (0.4, 0.45, 0.4, 1.0),'Model': 'models/props/rock_group_1_sphere'}},'1203446683.59akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(153.234, -12.708, 13.03),'Pos': Point3(142.827, -257.634, 17.677),'Scale': VBase3(7.96, 7.96, 7.96),'Visual': {'Color': (0.76, 0.68, 0.5607843137254902, 1.0),'Model': 'models/props/rock_group_4_sphere'}},'1203446729.5akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-180.0, 8.505, 0.0),'Pos': Point3(-177.626, 168.709, 36.119),'Scale': VBase3(2.55, 2.55, 2.55),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/rock_group_3_sphere'}},'1203447159.59akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(12.036, 5.331, -1.619),'Pos': Point3(449.828, -77.443, 57.57),'Scale': VBase3(6.175, 6.175, 6.175),'Visual': {'Color': (0.5176470588235295, 0.4196078431372549, 0.2784313725490196, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1203447249.77akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-25.28, 5.224, 1.938),'Pos': Point3(314.345, -128.582, 45.91),'Scale': VBase3(7.621, 7.621, 7.621),'Visual': {'Color': (0.5176470588235295, 0.4196078431372549, 0.2784313725490196, 1.0),'Model': 'models/props/rock_group_3_sphere'}},'1203447289.66akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-78.275, 2.453, 9.902),'Pos': Point3(141.715, -47.545, 62.244),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.62, 0.51, 0.3333333333333333, 1.0),'Model': 'models/props/rock_group_2_floor'}},'1203447492.48akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-1.617, 4.756, 4.431),'Pos': Point3(392.98, 102.088, 72.895),'Scale': VBase3(4.429, 4.429, 7.515),'Visual': {'Color': (0.73, 0.64, 0.5098039215686274, 1.0),'Model': 'models/props/rock_group_2_floor'}},'1203447521.73akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(101.22, 1.433, 3.472),'Pos': Point3(232.463, 55.224, 70.511),'Scale': VBase3(3.287, 3.287, 3.287),'Visual': {'Color': (0.7176470588235294, 0.62, 0.4823529411764706, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1203447554.77akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(138.376, 0.0, 0.0),'Pos': Point3(-199.863, 61.464, 93.931),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_c'}},'1203447596.81akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-136.434, 0.0, 0.0),'Pos': Point3(-199.179, 63.872, 94.086),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_a'}},'1203447611.5akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-101.07, 0.0, 0.0),'Pos': Point3(-50.77, 46.33, 92.61),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_c'}},'1203447627.69akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-34.304, 0.0, 0.0),'Pos': Point3(-48.932, 79.607, 92.06),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Model': 'models/vegetation/bush_d'}},'1203447638.27akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(166.654, 0.0, 0.0),'Pos': Point3(-37.31, 53.479, 91.273),'Scale': VBase3(2.479, 2.479, 2.479),'Visual': {'Model': 'models/vegetation/bush_d'}},'1203447680.0akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(97.925, 0.0, 0.0),'Pos': Point3(-139.604, 182.466, 37.606),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/vegetation/bush_c'}},'1203447779.02akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(163.66, 0.0, 0.0),'Pos': Point3(35.385, -411.864, -34.59),'Scale': VBase3(0.664, 0.664, 1.225),'Visual': {'Model': 'models/props/mound_light_small'}},'1203447839.47akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(130.721, 0.0, 0.0),'Pos': Point3(68.687, -243.651, 27.867),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_a'}},'1203447866.33akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(102.581, 0.0, 0.0),'Pos': Point3(77.52, -209.514, 30.35),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_f'}},'1203447889.61akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-171.938, 0.0, 0.0),'Pos': Point3(93.481, -249.314, 27.867),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_d'}},'1203447955.42akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(76.904, 0.0, 0.0),'Pos': Point3(69.887, -250.803, 27.867),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_d'}},'1203447972.22akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-5.979, 0.0, 0.0),'Pos': Point3(88.352, -251.082, 27.869),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_g'}},'1203448602.5akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(74.498, 0.0, 0.0),'Pos': Point3(-372.892, -112.957, 23.017),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_a'}},'1203448974.69akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(341.154, -150.57, 43.714),'Hpr': VBase3(-48.099, 0.0, 0.0),'Pos': Point3(322.636, -185.406, 26.809),'Scale': VBase3(5.799, 3.086, 8.416),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1203449100.89akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-92.237, 0.0, 0.0),'Pos': Point3(360.019, 81.029, 61.463),'Scale': VBase3(6.408, 3.086, 7.845),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1203449450.05akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-156.411, 0.0, 0.0),'Pos': Point3(-194.694, -230.806, -1.227),'Scale': VBase3(4.172, 4.172, 4.172),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1203469222.88akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(181.737, -72.996, 84.51),'Scale': VBase3(1.203, 1.203, 1.203),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1203469248.22akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(364.732, -61.815, 71.698),'Scale': VBase3(2.219, 2.219, 2.219),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1203469277.86akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(277.818, -13.742, 100.833),'Scale': VBase3(2.07, 2.07, 1.416),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1203469364.98akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-80.598, 0.0, 0.0),'Pos': Point3(-199.729, 371.331, -9.632),'Scale': VBase3(1.24, 1.24, 1.146),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/mound_light_med'}},'1203469432.73akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-111.799, 30.817, -22.906),'Pos': Point3(-63.873, 137.066, 60.091),'Scale': VBase3(0.664, 0.664, 1.225),'Visual': {'Model': 'models/props/mound_light_small'}},'1203469480.03akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-62.737, 115.266, 84.372),'Scale': VBase3(0.236, 0.162, 0.24),'Visual': {'Color': (0.72, 0.62, 0.47843137254901963, 1.0),'Model': 'models/props/mound_light_lrg'}},'1203469532.7akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-72.089, 0.0, 0.0),'Pos': Point3(530.95, -110.608, -5.295),'Scale': VBase3(0.47, 0.47, 0.549),'Visual': {'Color': (0.76, 0.68, 0.5568627450980392, 1.0),'Model': 'models/props/mound_light_lrg'}},'1203469661.41akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-113.825, 0.0, 0.0),'Pos': Point3(-354.425, 38.826, -11.22),'Scale': VBase3(13.148, 13.148, 13.148),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_group_5_sphere'}},'1203469718.22akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-31.621, 0.867, -1.848),'Pos': Point3(487.853, -178.038, -13.172),'Scale': VBase3(13.804, 13.804, 19.743),'Visual': {'Color': (0.65, 0.6, 0.5215686274509804, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1203469953.31akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(77.01, 0.0, -2.793),'Pos': Point3(-27.244, -30.058, 26.532),'Scale': VBase3(0.168, 0.262, 0.317),'Visual': {'Color': (0.59, 0.5607843137254902, 0.47058823529411764, 1.0),'Model': 'models/props/mound_light_lrg'}},'1203470019.06akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(53.373, 0.0, 0.0),'Pos': Point3(26.923, -41.057, 36.312),'Scale': VBase3(0.235, 0.249, 0.355),'Visual': {'Color': (0.75, 0.66, 0.5333333333333333, 1.0),'Model': 'models/props/mound_light_med2'}},'1203470074.14akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-56.244, 8.355, 20.396),'Pos': Point3(52.903, -23.547, 52.525),'Scale': VBase3(10.848, 10.848, 10.848),'Visual': {'Color': (0.6078431372549019, 0.6, 0.5490196078431373, 1.0),'Model': 'models/props/rock_3_sphere'}},'1203470326.73akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(164.042, -9.694, 3.622),'Pos': Point3(295.467, -150.276, 45.373),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.52, 0.42, 0.2784313725490196, 1.0),'Model': 'models/props/rock_group_2_floor'}},'1203470396.59akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-72.089, 0.0, 0.0),'Pos': Point3(349.37, -17.025, 61.745),'Scale': VBase3(0.301, 0.179, 0.525),'Visual': {'Color': (0.52, 0.42, 0.2784313725490196, 1.0),'Model': 'models/props/mound_light_lrg'}},'1203470427.94akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(204.546, -27.822, 99.532),'Scale': VBase3(1.723, 1.723, 1.081),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1203470693.56akelts': {'Type': 'Stairs','DisableCollision': False,'Hpr': VBase3(28.098, 9.351, -0.438),'Pos': Point3(-296.19, -43.519, 25.878),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/buildings/stone_ramp_quadruple'}},'1203470872.34akelts': {'Type': 'Stairs','DisableCollision': False,'Hpr': VBase3(-58.325, 3.464, 0.0),'Pos': Point3(-243.107, 34.168, 83.781),'Scale': VBase3(1.304, 1.0, 0.678),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/buildings/landing_double'}},'1203471038.22akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-34.304, 10.291, 0.0),'Pos': Point3(91.995, -113.654, 45.611),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Model': 'models/vegetation/bush_d'}},'1203471069.7akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-34.304, 6.015, 3.996),'Pos': Point3(367.737, -167.8, 45.167),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Model': 'models/vegetation/bush_e'}},'1203471110.84akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-26.474, 6.501, 3.141),'Pos': Point3(348.825, -135.794, 49.256),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Color': (1.0, 0.800000011920929, 0.6000000238418579, 1.0),'Model': 'models/vegetation/bush_c'}},'1203965063.7akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(376.308, 4.658, 78.653),'Scale': VBase3(2.219, 2.219, 2.219),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1203965125.84akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(-180.0, -6.883, 0.0),'Pos': Point3(227.093, -85.355, 63.706),'Scale': VBase3(2.219, 2.219, 2.219),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1203965293.91akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(53.68, 5.784, -8.414),'Pos': Point3(137.398, 4.097, 70.319),'Scale': VBase3(5.018, 5.018, 3.271),'Visual': {'Color': (0.47, 0.47, 0.32941176470588235, 1.0),'Model': 'models/props/rock_group_3_floor'}},'1203965371.95akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-102.006, -1.433, 7.074),'Pos': Point3(439.511, 48.689, 68.962),'Scale': VBase3(2.951, 2.951, 2.951),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_d'}},'1203965503.47akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(112.863, -2.888, -6.617),'Pos': Point3(186.726, 35.455, 75.416),'Scale': VBase3(2.951, 2.951, 2.951),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_b'}},'1203965538.14akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-58.95, 3.803, 6.138),'Pos': Point3(212.986, 45.881, 73.466),'Scale': VBase3(2.951, 2.951, 2.951),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_c'}},'1203974170.3akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-38.767, 0.55, 0.0),'Pos': Point3(288.408, 67.739, 73.23),'Scale': VBase3(1.311, 1.311, 1.311),'Visual': {'Model': 'models/vegetation/bush_d'}},'1203974235.06akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(173.884, -10.52, 0.656),'Pos': Point3(274.425, 60.988, 70.665),'Scale': VBase3(2.52, 2.52, 2.52),'Visual': {'Color': (0.82, 0.83, 0.7725490196078432, 1.0),'Model': 'models/props/rock_group_4_floor'}},'1204225487.34akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(318.747, 6.389, 62.397),'Hpr': VBase3(-149.315, 37.278, 1.813),'Objects': {},'Pos': Point3(-318.746, -6.389, 62.396),'Scale': VBase3(0.486, 0.486, 0.486),'Visual': {'Model': 'models/props/shanty_rope_bridge'}},'1204232954.13akelts': {'Type': 'Stairs','DisableCollision': False,'Hpr': VBase3(31.634, -14.333, -0.772),'Objects': {},'Pos': Point3(-339.25, 28.575, 39.421),'Scale': VBase3(1.354, 1.354, 1.354),'Visual': {'Color': (0.6, 0.6, 0.61, 1.0),'Model': 'models/buildings/landing_double'}},'1204233112.66akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(342.546, -24.105, 40.559),'Hpr': VBase3(-138.264, 11.108, 0.755),'Pos': Point3(-342.732, 24.406, 40.468),'Scale': VBase3(0.532, 0.532, 0.532),'Visual': {'Model': 'models/props/shanty_rope_bridge_post'}},'1204233142.94akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(333.734, -29.168, 40.864),'Hpr': VBase3(-158.579, 9.463, -4.121),'Pos': Point3(-333.919, 29.469, 40.774),'Scale': VBase3(0.5, 0.5, 0.5),'Visual': {'Model': 'models/props/shanty_rope_bridge_post'}},'1204234158.86akelts': {'Type': 'Bridge','DisableCollision': False,'GridPos': Point3(327.46, -8.299, 49.469),'Hpr': VBase3(-148.836, 25.581, 1.565),'Objects': {},'Pos': Point3(-327.46, 8.299, 49.469),'Scale': VBase3(0.485, 0.485, 0.485),'Visual': {'Model': 'models/props/shanty_rope_bridge'}},'1204234736.47akelts': {'Type': 'Bush','DisableCollision': False,'GridPos': Point3(312.715, 48.787, 29.405),'Hpr': VBase3(-106.08, 0.0, 0.0),'Pos': Point3(-312.631, -49.676, 29.238),'Scale': VBase3(0.876, 0.876, 0.876),'Visual': {'Model': 'models/vegetation/bush_a'}},'1204234767.58akelts': {'Type': 'Bush','DisableCollision': True,'GridPos': Point3(312.715, 48.787, 29.405),'Hpr': VBase3(153.141, 0.0, 0.0),'Pos': Point3(-282.256, -31.459, 29.518),'Scale': VBase3(0.876, 0.876, 0.876),'Visual': {'Model': 'models/vegetation/bush_c'}},'1204234795.08akelts': {'Type': 'Bush','DisableCollision': True,'GridPos': Point3(312.715, 48.787, 29.405),'Hpr': VBase3(141.967, 0.615, 0.644),'Pos': Point3(-88.988, 132.385, 92.304),'Scale': VBase3(1.275, 1.275, 1.275),'Visual': {'Model': 'models/vegetation/bush_c'}},'1204234960.88akelts': {'Type': 'Prop_Groups','DisableCollision': True,'GridPos': Point3(320.732, 13.576, 77.981),'Hpr': VBase3(125.951, 0.0, 0.0),'Pos': Point3(-320.733, -13.576, 77.981),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/prop_group_A'}},'1204235002.48akelts': {'Type': 'Prop_Groups','DisableCollision': True,'GridPos': Point3(305.222, 39.157, 77.172),'Hpr': VBase3(161.297, -1.986, -0.295),'Pos': Point3(-305.223, -39.156, 77.172),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/prop_group_D'}},'1204235062.94akelts': {'Type': 'Rock','DisableCollision': False,'GridPos': Point3(302.366, 0.195, 33.97),'Hpr': VBase3(141.678, 0.884, -6.315),'Pos': Point3(-302.397, -0.177, 33.656),'Scale': VBase3(3.025, 3.025, 2.317),'Visual': {'Color': (0.7900000214576721, 0.7799999713897705, 0.699999988079071, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1204235175.16akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-33.945, 0.0, 0.0),'Pos': Point3(-253.789, 70.535, 30.087),'Scale': VBase3(0.554, 0.554, 0.783),'Visual': {'Color': (0.8705882352941177, 0.8392156862745098, 0.803921568627451, 1.0),'Model': 'models/props/mound_light_med2'}},'1208537531.58akelts': {'Type': 'Wall_Hangings','DisableCollision': False,'GridPos': Point3(-297.019, -39.431, 58.982),'Hpr': VBase3(31.723, 0.0, 0.0),'Pos': Point3(-297.02, -39.431, 58.982),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/flag_hanging_french'}},'1208537612.06akelts': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','Boss': False,'CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','Hpr': VBase3(-130.463, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '5.0060','Pos': Point3(-147.597, 273.375, 34.613),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Walk','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1208538744.23akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-179.829, 0.0, 0.0),'Pos': Point3(-0.498, -4.479, 0.952),'Scale': VBase3(1.0, 1.0, 1.0),'TargetUIDs': []},'1208538745.98akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-6.626, 20.841, 1.006),'Scale': VBase3(1.0, 1.0, 1.0),'TargetUIDs': []},'1208541269.28akelts': {'Type': 'Jungle_Props','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-253.393, 138.905, 39.936),'Scale': VBase3(1.654, 1.654, 1.654),'Visual': {'Model': 'models/vegetation/jungle_fern_a'}},'1208541296.17akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(170.684, 5.783, -0.385),'Pos': Point3(-230.471, 154.256, 39.0),'Scale': VBase3(1.739, 1.739, 1.739),'Visual': {'Model': 'models/vegetation/bush_d'}},'1208541345.56akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-27.453, -1.015, -1.344),'Pos': Point3(-171.923, 285.857, 35.253),'Scale': VBase3(1.35, 1.35, 1.35),'Visual': {'Model': 'models/vegetation/bush_d'}},'1208541410.31akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-144.958, 1.297, 0.0),'Pos': Point3(-129.445, 287.917, 34.898),'Scale': VBase3(1.193, 1.193, 1.193),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/vegetation/bush_c'}},'1208541454.5akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(12.235, 0.0, 0.0),'Pos': Point3(162.611, -261.905, 30.284),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208541471.16akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-169.75, 0.0, 0.0),'Pos': Point3(178.145, -253.051, 32.481),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208541481.64akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-103.077, 0.0, 0.0),'Pos': Point3(174.017, -234.279, 32.73),'Scale': VBase3(1.347, 1.347, 1.347),'Visual': {'Model': 'models/vegetation/bush_c'}},'1208541557.61akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(162.565, 0.0, 0.0),'Pos': Point3(402.522, -166.697, 45.961),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_f'}},'1208541582.89akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-42.642, 4.099, 7.218),'Pos': Point3(393.903, -166.151, 46.033),'Scale': VBase3(1.841, 1.841, 1.841),'Visual': {'Color': (1.0, 0.800000011920929, 0.6000000238418579, 1.0),'Model': 'models/vegetation/bush_c'}},'1208541687.83akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(341.154, -150.57, 43.714),'Hpr': VBase3(-31.803, 0.0, 0.0),'Pos': Point3(277.438, -148.042, 43.69),'Scale': VBase3(6.344, 3.086, 5.102),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208541829.66akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(276.012, -223.365, 44.451),'Scale': VBase3(1.084, 1.084, 1.084),'Visual': {'Model': 'models/vegetation/gen_tree_b'}},'1208541881.3akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(34.062, 0.0, 0.0),'Pos': Point3(420.593, 82.554, 80.989),'Scale': VBase3(1.084, 1.084, 1.084),'Visual': {'Model': 'models/vegetation/gen_tree_g'}},'1208541909.53akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-15.49, 0.0, 0.0),'Pos': Point3(-98.813, 134.803, 91.682),'Scale': VBase3(1.084, 1.084, 1.084),'Visual': {'Model': 'models/vegetation/gen_tree_g'}},'1208541918.48akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(34.062, 0.0, 0.0),'Pos': Point3(-120.13, 284.624, 33.68),'Scale': VBase3(1.084, 1.084, 1.084),'Visual': {'Model': 'models/vegetation/gen_tree_h'}},'1208541988.52akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(160.328, 0.0, 0.0),'Pos': Point3(-14.847, 113.295, 91.185),'Scale': VBase3(0.76, 0.76, 0.76),'Visual': {'Model': 'models/vegetation/gen_tree_g'}},'1208542009.2akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(172.646, 0.0, 0.0),'Pos': Point3(159.606, 27.547, 73.764),'Scale': VBase3(0.762, 0.762, 0.762),'Visual': {'Model': 'models/vegetation/gen_tree_g'}},'1208542108.5akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(14.798, 0.0, 0.0),'Pos': Point3(-211.076, 66.396, 93.41),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_a'}},'1208542139.22akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(304.358, -140.723, 47.592),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_e'}},'1208542139.63akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(0.0, 6.69, 0.0),'Pos': Point3(141.03, -32.572, 66.146),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_d'}},'1208542140.41akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(38.628, -0.492, 19.002),'Pos': Point3(-49.528, 132.687, 87.123),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_b'}},'1208542167.61akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(102.836, -106.751, 45.42),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_e'}},'1208542176.02akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(25.035, 152.225, 23.714),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_e'}},'1208542185.47akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(363.763, 79.385, 75.059),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_a'}},'1208542191.47akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-94.481, 0.0, 0.0),'Pos': Point3(65.684, -253.375, 27.867),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_d'}},'1208542196.03akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-375.272, -105.376, 25.046),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_b'}},'1208542200.48akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-236.121, 165.669, 37.351),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_b'}},'1208542220.23akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(22.533, 0.0, 0.0),'Pos': Point3(164.697, -236.291, 31.542),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_c'}},'1208542597.31akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(0.58, 0.0, 0.0),'Pos': Point3(264.4, -231.069, 38.118),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208542717.61akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(0.0, 27.183, 0.0),'Pos': Point3(215.957, -120.61, 63.366),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_d'}},'1208542760.47akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(43.11, 0.315, -11.744),'Pos': Point3(169.593, -97.198, 66.617),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Model': 'models/vegetation/bush_leaves'}},'1208542802.39akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-87.974, 8.704, -16.791),'Pos': Point3(277.874, 11.461, 86.713),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Model': 'models/vegetation/bush_leaves'}},'1208542846.92akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-142.733, -2.719, 12.449),'Pos': Point3(383.624, 97.892, 71.622),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Model': 'models/vegetation/bush_leaves'}},'1208542897.36akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(160.328, 0.0, 0.0),'Pos': Point3(68.44, 20.9, 85.966),'Scale': VBase3(1.119, 1.119, 1.119),'Visual': {'Model': 'models/vegetation/gen_tree_b'}},'1208542933.77akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(120.571, 47.459, 116.473),'Scale': VBase3(1.884, 1.884, 1.884),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1208542952.97akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(160.328, 0.0, 0.0),'Pos': Point3(120.118, 55.714, 77.661),'Scale': VBase3(3.579, 3.579, 3.579),'Visual': {'Model': 'models/vegetation/gen_tree_trunk_only'}},'1208542990.66akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(-125.874, 0.102, 0.074),'Pos': Point3(116.478, 49.446, 86.707),'Scale': VBase3(1.884, 1.884, 1.884),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1208543133.05akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-29.134, 53.282, 90.349),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_a'}},'1208543149.84akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(176.793, -6.68, -0.373),'Pos': Point3(-49.597, 74.123, 94.819),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_b'}},'1208559537.25akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-179.829, 0.0, 0.0),'Pos': Point3(-0.498, -4.479, 0.952),'Scale': VBase3(1.0, 1.0, 1.0)},'1208559538.48akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(78.823, -0.103, -2.224),'Pos': Point3(62.86, -228.023, 29.39),'Scale': VBase3(1.0, 1.0, 1.0)},'1208559538.5akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(88.889, -227.004, 29.724),'Scale': VBase3(1.0, 1.0, 1.0)},'1208559585.09akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-113.276, 0.0, 0.0),'Pos': Point3(-248.739, 100.517, 95.87),'Scale': VBase3(1.121, 1.121, 1.286),'Visual': {'Model': 'models/vegetation/gen_tree_b'}},'1208559993.3akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(120.111, -1.764, -8.107),'Pos': Point3(327.437, -166.838, 46.195),'Scale': VBase3(1.841, 1.841, 1.841),'Visual': {'Color': (1.0, 0.800000011920929, 0.6000000238418579, 1.0),'Model': 'models/vegetation/bush_a'}},'1208560253.42akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-164.706, 3.362, -1.673),'Pos': Point3(380.254, 59.822, 71.641),'Scale': VBase3(5.775, 5.775, 5.775),'Visual': {'Color': (0.5176470588235295, 0.4196078431372549, 0.2784313725490196, 1.0),'Model': 'models/props/rock_group_3_floor'}},'1208560331.47akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-156.893, 0.0, 0.0),'Pos': Point3(198.113, 52.848, 72.034),'Scale': VBase3(5.0, 3.086, 5.774),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208560363.91akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-90.357, 0.0, 0.0),'Pos': Point3(175.665, 30.305, 71.521),'Scale': VBase3(2.746, 3.086, 5.774),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208560474.95akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-107.787, 0.0, 0.0),'Pos': Point3(433.738, 122.705, 1.151),'Scale': VBase3(8.353, 8.353, 8.353),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208560742.78akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(174.874, 14.218, -1.992),'Pos': Point3(344.089, 159.804, 2.975),'Scale': VBase3(5.241, 5.241, 5.241),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208560837.39akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-167.512, 0.0, 0.0),'Pos': Point3(22.622, 156.549, 19.181),'Scale': VBase3(7.391, 3.086, 7.845),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208560860.78akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(103.776, 0.0, 0.0),'Pos': Point3(61.315, 152.539, 19.074),'Scale': VBase3(2.532, 3.086, 7.845),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208560878.19akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(55.886, 0.0, 0.0),'Pos': Point3(52.987, 123.596, 19.061),'Scale': VBase3(4.16, 3.086, 7.845),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208560915.7akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-161.756, 0.0, 0.0),'Pos': Point3(-45.681, 144.007, 35.2),'Scale': VBase3(0.985, 1.201, 2.083),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208561074.45akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(24.612, 0.0, 0.0),'Pos': Point3(-125.297, 281.136, 33.058),'Scale': VBase3(1.837, 1.61, 1.843),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208561128.33akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(96.433, 0.0, 0.0),'Pos': Point3(-117.519, 288.157, 32.883),'Scale': VBase3(0.668, 1.61, 1.843),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208561221.5akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(85.28, 0.0, 0.0),'Pos': Point3(-164.813, 290.752, 31.588),'Scale': VBase3(2.388, 1.969, 2.827),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208561255.91akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(22.09, 0.0, 0.0),'Pos': Point3(-175.054, 267.633, 34.333),'Scale': VBase3(1.995, 1.969, 2.254),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208564843.16akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(130.516, 0.0, 0.0),'Pos': Point3(-183.998, 282.878, 33.629),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Color': (0.7, 0.7, 0.7, 1.0),'Model': 'models/vegetation/bush_d'}},'1208565012.22akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(121.752, 0.0, 0.0),'Pos': Point3(-220.188, 166.844, 35.915),'Scale': VBase3(1.465, 1.786, 3.098),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208565157.44akelts': {'Type': 'Rock','DisableCollision': False,'Holiday': '','Hpr': VBase3(-153.934, 0.0, 0.0),'Pos': Point3(14.065, 79.558, 71.545),'Scale': VBase3(0.653, 0.28, 0.664),'VisSize': '','Visual': {'Color': (0.8, 0.74, 0.6392156862745098, 1.0),'Model': 'models/props/mound_light_med'}},'1208565470.34akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-9.373, 7.134, 1.096),'Pos': Point3(474.094, -99.455, 57.624),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Color': (1.0, 0.800000011920929, 0.6000000238418579, 1.0),'Model': 'models/vegetation/bush_c'}},'1208797450.2akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(27.644, 0.0, 0.0),'Pos': Point3(-282.393, -32.12, 28.501),'Scale': VBase3(2.357, 1.55, 2.432),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1208797943.19akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-296.491, -34.081, 77.164),'Hpr': VBase3(167.007, 0.0, 0.0),'Pos': Point3(-296.491, -34.081, 77.163),'Scale': VBase3(1.118, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208797989.22akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-325.023, -19.65, 77.998),'Hpr': VBase3(120.337, 1.307, 1.524),'Pos': Point3(-325.023, -19.65, 77.997),'Scale': VBase3(1.848, 1.641, 1.363),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1208798071.03akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(75.684, 0.0, 0.0),'Pos': Point3(-154.673, 14.771, 84.233),'Scale': VBase3(0.355, 0.526, 0.365),'Visual': {'Color': (0.87, 0.82, 0.7490196078431373, 1.0),'Model': 'models/props/mound_light_med'}},'1208798220.89akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(174.558, 1.136, 3.886),'Pos': Point3(-64.496, -11.881, 91.711),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_c'}},'1208798241.98akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-78.467, 0.0, 0.0),'Pos': Point3(-63.901, -9.897, 91.77),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_c'}},'1208798284.11akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(150.621, -5.949, 0.538),'Pos': Point3(-191.677, 13.736, 89.635),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208798332.25akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(57.056, 4.476, -6.405),'Pos': Point3(-116.485, -1.736, 93.903),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_d'}},'1208798382.77akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(175.642, -7.763, -0.844),'Pos': Point3(-82.154, -8.242, 89.863),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208798460.47akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(157.552, 0.0, 0.0),'Pos': Point3(-108.076, 0.354, 93.422),'Scale': VBase3(2.505, 1.371, 1.371),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208798474.59akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-157.716, 0.0, 0.0),'Pos': Point3(-89.038, -1.169, 92.54),'Scale': VBase3(1.742, 1.371, 1.535),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208798499.8akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(160.751, 0.0, 0.0),'Pos': Point3(-67.621, -2.741, 91.317),'Scale': VBase3(2.965, 1.371, 1.772),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208798528.09akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(126.421, 0.0, 0.0),'Pos': Point3(-49.624, -13.391, 88.699),'Scale': VBase3(1.554, 1.371, 2.29),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208799106.09akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-26.609, 0.0, 0.0),'Pos': Point3(59.795, 90.957, 84.247),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_a'}},'1208799145.66akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-136.511, -6.952, 2.694),'Pos': Point3(310.695, 64.694, 71.465),'Scale': VBase3(3.287, 3.287, 2.492),'Visual': {'Color': (0.66, 0.6, 0.49, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1208799165.69akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(117.765, 0.0, 0.0),'Pos': Point3(307.375, 80.718, 74.547),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_d'}},'1208799215.61akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-227.742, 150.619, 88.596),'Scale': VBase3(4.263, 4.263, 4.263),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208801100.09akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(171.521, 3.751, -0.177),'Pos': Point3(378.439, 78.834, 71.677),'Scale': VBase3(4.897, 4.897, 6.883),'Visual': {'Color': (0.5176470588235295, 0.4196078431372549, 0.2784313725490196, 1.0),'Model': 'models/props/rock_3_sphere'}},'1208801182.16akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(16.115, 0.0, 0.0),'Pos': Point3(361.569, 114.132, 78.661),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/palm_tree_a'}},'1208801336.25akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(133.95, 0.0, 0.0),'Pos': Point3(237.343, 45.258, 69.555),'Scale': VBase3(5.0, 3.086, 6.23),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208801439.14akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-107.091, 2.617, 4.753),'Pos': Point3(359.724, 72.268, 74.247),'Scale': VBase3(3.287, 3.287, 2.066),'Visual': {'Color': (0.71, 0.6, 0.5, 1.0),'Model': 'models/props/rock_group_3_floor'}},'1208801653.48akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(151.405, -0.541, 0.097),'Pos': Point3(369.414, 109.368, 77.809),'Scale': VBase3(1.311, 1.311, 1.311),'Visual': {'Model': 'models/vegetation/bush_f'}},'1208801677.42akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(152.069, -0.54, 0.103),'Pos': Point3(367.675, 79.544, 75.341),'Scale': VBase3(1.311, 1.311, 1.311),'Visual': {'Color': (0.75, 0.64, 0.44, 1.0),'Model': 'models/vegetation/bush_c'}},'1208801740.33akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-49.785, 5.344, 0.105),'Pos': Point3(357.254, 53.202, 73.645),'Scale': VBase3(1.311, 1.311, 1.311),'Visual': {'Color': (0.97, 0.85, 0.69, 1.0),'Model': 'models/vegetation/bush_d'}},'1208801858.2akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(34.062, 0.0, 0.0),'Pos': Point3(501.496, 5.165, 64.779),'Scale': VBase3(1.234, 1.234, 1.234),'Visual': {'Model': 'models/vegetation/gen_tree_b'}},'1208801896.34akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(433.104, 31.446, 94.549),'Scale': VBase3(2.07, 2.07, 1.88),'Visual': {'Model': 'models/vegetation/gen_tree_canopy'}},'1208801950.42akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(62.348, -34.636, 34.91),'Pos': Point3(359.473, 35.098, 90.388),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/fern_tree_d'}},'1208802034.64akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-138.15, -3.824, 4.054),'Pos': Point3(370.553, -178.844, 39.498),'Scale': VBase3(7.621, 7.621, 7.621),'Visual': {'Color': (0.5176470588235295, 0.4196078431372549, 0.2784313725490196, 1.0),'Model': 'models/props/rock_group_3_sphere'}},'1208802151.95akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-40.357, 4.385, 7.049),'Pos': Point3(314.048, -154.924, 46.06),'Scale': VBase3(1.841, 1.841, 1.841),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/vegetation/bush_g'}},'1208802333.55akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(91.801, -0.692, -10.175),'Pos': Point3(180.208, -135.561, 46.777),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.66, 0.5, 0.39215686274509803, 1.0),'Model': 'models/props/rock_group_2_floor'}},'1208802346.91akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-144.363, -8.102, 6.214),'Pos': Point3(197.088, -148.654, 44.757),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.69, 0.59, 0.4392156862745098, 1.0),'Model': 'models/props/rock_group_1_floor'}},'1208802361.61akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-25.617, 9.364, 4.059),'Pos': Point3(186.306, -152.635, 44.198),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.42, 0.35, 0.22745098039215686, 1.0),'Model': 'models/props/rock_group_3_floor'}},'1208802379.75akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(76.886, -5.889, -5.434),'Pos': Point3(211.304, -145.114, 45.802),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.52, 0.42, 0.2784313725490196, 1.0),'Model': 'models/props/rock_group_4_floor'}},'1208802426.88akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-135.985, -3.668, 4.196),'Pos': Point3(214.951, -135.403, 44.474),'Scale': VBase3(7.621, 7.621, 5.021),'Visual': {'Color': (0.5176470588235295, 0.4196078431372549, 0.2784313725490196, 1.0),'Model': 'models/props/rock_group_3_sphere'}},'1208802585.83akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-95.024, -0.534, 10.184),'Pos': Point3(162.168, -119.61, 41.696),'Scale': VBase3(11.284, 11.284, 11.284),'Visual': {'Color': (0.52, 0.42, 0.2784313725490196, 1.0),'Model': 'models/props/rock_3_sphere'}},'1208802593.55akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-146.036, -12.865, 11.646),'Pos': Point3(144.224, -69.368, 57.104),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.53, 0.51, 0.27450980392156865, 1.0),'Model': 'models/props/rock_group_4_floor'}},'1208802604.41akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-70.671, 7.177, 9.507),'Pos': Point3(151.738, -101.344, 50.622),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.52, 0.42, 0.2784313725490196, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1208802673.66akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(137.074, -10.759, -5.095),'Pos': Point3(149.85, -114.076, 49.01),'Scale': VBase3(1.906, 1.906, 1.906),'Visual': {'Color': (0.49411764705882355, 0.47, 0.3058823529411765, 1.0),'Model': 'models/props/rock_group_1_floor'}},'1208802699.16akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(4.924, 2.967, -0.256),'Pos': Point3(137.172, -103.465, 48.326),'Scale': VBase3(1.096, 1.096, 1.096),'Visual': {'Model': 'models/vegetation/fern_tree_e'}},'1208802757.09akelts': {'Type': 'Tree','DisableCollision': False,'Hpr': VBase3(78.859, 0.576, -2.922),'Pos': Point3(187.223, -149.882, 45.214),'Scale': VBase3(1.096, 1.096, 1.096),'Visual': {'Model': 'models/vegetation/fern_tree_c'}},'1208803223.69akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(79.447, -246.301, 27.867),'Scale': VBase3(3.624, 3.624, 3.624),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208803320.33akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(169.007, -4.664, -13.481),'Pos': Point3(-4.635, -24.264, 77.08),'Scale': VBase3(6.169, 1.792, 1.792),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208803487.38akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-46.499, 64.943, 92.283),'Scale': VBase3(5.293, 5.293, 5.293),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208803525.8akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-9.767, 0.0, 0.0),'Pos': Point3(57.987, 90.863, 84.198),'Scale': VBase3(0.808, 0.808, 0.808),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208803619.64akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-16.004, 0.0, 0.0),'Pos': Point3(-89.958, 134.617, 90.878),'Scale': VBase3(5.361, 2.476, 2.476),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1208803829.39akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-63.659, 0.0, 0.0),'Pos': Point3(-61.064, -82.55, 25.489),'Scale': VBase3(2.522, 1.65, 1.65),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208803903.89akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(91.76, 0.0, 0.0),'Pos': Point3(-59.071, -74.372, 25.028),'Scale': VBase3(4.331, 4.331, 11.099),'Visual': {'Color': (0.78, 0.71, 0.615686274509804, 1.0),'Model': 'models/props/rock_4_floor'}},'1208803961.48akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(91.76, 0.0, 0.0),'Pos': Point3(-32.696, -54.806, 27.652),'Scale': VBase3(3.112, 3.112, 7.975),'Visual': {'Color': (0.6862745098039216, 0.6, 0.47058823529411764, 1.0),'Model': 'models/props/rock_1_floor'}},'1208803995.58akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(36.877, 2.872, 6.209),'Pos': Point3(-81.385, -32.517, 16.054),'Scale': VBase3(21.129, 21.129, 46.799),'Visual': {'Color': (0.62, 0.6, 0.5254901960784314, 1.0),'Model': 'models/props/rock_4_sphere'}},'1208804069.98akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(-41.141, 5.048, 4.396),'Pos': Point3(-53.159, -56.325, 26.978),'Scale': VBase3(0.738, 0.738, 0.971),'Visual': {'Model': 'models/vegetation/fern_tree_d'}},'1208823264.25akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-179.829, 0.0, 0.0),'Pos': Point3(-0.498, -4.479, 0.952),'Scale': VBase3(1.0, 1.0, 1.0)},'1208823266.13akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(78.823, -0.103, -2.224),'Pos': Point3(62.86, -228.023, 29.39),'Scale': VBase3(1.0, 1.0, 1.0)},'1208823266.14akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(88.889, -227.004, 29.724),'Scale': VBase3(1.0, 1.0, 1.0)},'1208824084.88akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-59.818, 9.306, 4.413),'Pos': Point3(95.113, -129.033, 41.882),'Scale': VBase3(1.712, 1.712, 1.712),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208824151.67akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(13.951, 0.21, -1.644),'Pos': Point3(-67.842, 68.807, 93.932),'Scale': VBase3(1.059, 1.059, 1.059),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208824198.13akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-34.285, 5.973, -0.028),'Pos': Point3(-243.637, 49.144, 88.132),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_b'}},'1208824267.17akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-138.578, 2.64, 2.506),'Pos': Point3(-238.805, 90.848, 92.81),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_c'}},'1208824362.53akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(100.758, 0.0, 0.0),'Pos': Point3(-234.305, 92.703, 92.374),'Scale': VBase3(1.411, 1.443, 1.443),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208824405.69akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(143.527, 0.0, 0.0),'Pos': Point3(-249.961, 96.329, 85.014),'Scale': VBase3(3.898, 4.44, 2.908),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1208824613.52akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(113.485, 84.382, 104.933),'Pos': Point3(-234.198, 142.148, 92.334),'Scale': VBase3(3.63, 3.63, 4.852),'Visual': {'Color': (0.4, 0.45, 0.4, 1.0),'Model': 'models/props/rock_group_5_floor'}},'1208824790.17akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-176.754, -46.62, 23.919),'Scale': VBase3(2.628, 2.628, 2.628),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208824810.09akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-154.175, -25.576, 25.005),'Scale': VBase3(4.734, 4.734, 4.734),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208824829.36akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-200.819, -7.771, 24.987),'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-200.822, -7.771, 24.985),'Scale': VBase3(4.325, 4.325, 4.325),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208886510.41akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(55.31, 0.0, 0.0),'Pos': Point3(-178.816, -28.467, 25.318),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208886578.03akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(171.175, -0.24, 3.631),'Pos': Point3(-203.062, 146.399, 91.567),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208890140.34akelts': {'Type': 'Dinghy','Aggro Radius': '20.0000','Hpr': VBase3(119.454, 0.0, 0.0),'Location': 'Water','Pos': Point3(-362.241, -214.481, 0.125),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/shipparts/dingy-geometry_High'}},'1208890226.42akelts': {'Type': 'Bush','DisableCollision': True,'GridPos': Point3(-326.502, 10.367, 37.276),'Hpr': VBase3(-108.716, -1.193, 2.814),'Pos': Point3(-326.646, 9.978, 36.51),'Scale': VBase3(0.679, 0.679, 0.679),'Visual': {'Model': 'models/vegetation/bush_a'}},'1208890285.03akelts': {'Type': 'Bush','DisableCollision': True,'GridPos': Point3(-326.502, 10.367, 37.276),'Hpr': VBase3(-175.764, 0.0, 0.0),'Pos': Point3(-332.8, 12.715, 38.828),'Scale': VBase3(0.679, 0.679, 0.679),'Visual': {'Model': 'models/vegetation/bush_b'}},'1208890307.03akelts': {'Type': 'Bush','DisableCollision': True,'GridPos': Point3(-326.502, 10.367, 37.276),'Hpr': VBase3(-4.906, 0.0, 0.0),'Pos': Point3(-328.15, 16.954, 39.555),'Scale': VBase3(0.679, 0.679, 0.679),'Visual': {'Model': 'models/vegetation/bush_b'}},'1208890332.34akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(115.113, 0.0, 0.0),'Pos': Point3(-324.794, 15.449, 37.358),'Scale': VBase3(1.523, 1.523, 1.474),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208890370.53akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-56.834, 0.0, 0.0),'Pos': Point3(-334.838, 9.189, 36.974),'Scale': VBase3(1.523, 1.523, 1.474),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208890410.26akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(32.402, 0.0, 0.0),'Pos': Point3(-326.204, 5.768, 37.136),'Scale': VBase3(1.125, 1.523, 1.474),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1208890537.5akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-200.357, -226.2, 1.469),'Scale': VBase3(2.342, 2.342, 2.342),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208890571.39akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-179.699, -212.376, 1.367),'Scale': VBase3(2.176, 2.176, 2.176),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1208890789.8akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(165.11, 0.0, 0.0),'Pos': Point3(-195.439, 13.529, 90.971),'Scale': VBase3(1.0, 1.0, 1.0),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_c_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1208896939.3akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(31.5, 1.976, -0.358),'Pos': Point3(229.522, -287.381, 1.245),'Scale': VBase3(1.0, 1.0, 1.0)},'1208896940.48akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(229.522, -287.381, 1.245),'Scale': VBase3(1.0, 1.0, 1.0)},'1208896940.5akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(229.522, -287.381, 1.245),'Scale': VBase3(1.0, 1.0, 1.0)},'1208896984.84akelts': {'Type': 'Building Exterior','File': '','ExtUid': '1208896984.84akelts0','Hpr': VBase3(152.305, 2.951, -1.668),'Objects': {},'Pos': Point3(-26.337, 149.659, 35.756),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Door': 'models/buildings/shanty_guildhall_door','Model': 'models/buildings/shanty_leanto_B','SignFrame': '','SignImage': 'models/buildings/sign1_eng_a_icon_barber'}},'1208898135.26akelts': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','Boss': False,'CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','GridPos': Point3(-29.276, 146.564, 36.591),'Hpr': VBase3(32.394, 1.119, 1.716),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '3.9759','Pos': Point3(-29.276, 146.564, 36.591),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Idle','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Visual': {},'Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1208899163.01akelts': {'Type': 'Furniture','DisableCollision': False,'GridPos': Point3(-22.431, 139.687, 36.41),'Hpr': VBase3(-135.186, 0.825, -3.289),'Pos': Point3(-22.431, 139.688, 36.41),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/bed_shanty'}},'1208899214.44akelts': {'Type': 'Furniture','DisableCollision': False,'GridPos': Point3(-19.686, 148.575, 34.701),'Hpr': VBase3(-34.563, -9.894, -0.516),'Pos': Point3(-19.636, 148.595, 36.06),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/chair_shanty'}},'1208899379.06akelts': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(57.689, 0.0, 0.0),'Pos': Point3(-36.346, 144.827, 36.253),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/table_shanty_2'}},'1208899615.03akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(177.583, 0.0, 0.0),'Pos': Point3(-32.571, 156.224, 31.885),'Scale': VBase3(1.375, 1.375, 1.591),'Visual': {'Model': 'models/props/dirt_pile_cave_volcano'}},'1208899664.2akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-9.002, -16.234, -24.714),'Pos': Point3(-40.033, 154.437, 34.492),'Scale': VBase3(1.957, 1.957, 1.201),'Visual': {'Color': (0.51, 0.48, 0.39215686274509803, 1.0),'Model': 'models/props/rock_2_floor'}},'1208899711.3akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(73.193, 0.0, 7.799),'Pos': Point3(-47.306, 157.113, 33.58),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.7803921568627451, 0.76, 0.6784313725490196, 1.0),'Model': 'models/props/rock_group_2_floor'}},'1208899895.08akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(-78.997, 0.0, 0.0),'Pos': Point3(85.863, -139.205, 39.12),'Scale': VBase3(1.0, 1.0, 1.0),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_a_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1208899917.34akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(-30.973, 0.0, 0.0),'Pos': Point3(-47.209, 59.335, 92.313),'Scale': VBase3(1.0, 1.0, 1.0),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_a_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1208899957.64akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(-165.511, 0.0, 0.0),'Pos': Point3(-72.966, 75.735, 93.536),'Scale': VBase3(1.552, 1.552, 1.552),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/fern_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/fern_leaf_a_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/fern_trunk_a_idle','Model': 'models/vegetation/fern_trunk_c_hi','PartName': 'trunk'}},'1208900010.97akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(-27.608, 0.0, 0.0),'Pos': Point3(-185.651, 273.711, 34.397),'Scale': VBase3(1.283, 1.283, 1.283),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/fern_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/fern_leaf_a_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/fern_trunk_a_idle','Model': 'models/vegetation/fern_trunk_b_hi','PartName': 'trunk'}},'1208900045.91akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(32.517, 0.0, 0.0),'Pos': Point3(329.184, 134.97, 10.677),'Scale': VBase3(1.283, 1.283, 1.283),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_a_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1208900097.17akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(-172.581, 0.0, 0.0),'Pos': Point3(296.71, 69.342, 73.293),'Scale': VBase3(1.283, 1.283, 1.283),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_b_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1208900115.67akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(156.944, 0.0, 0.0),'Pos': Point3(209.544, -157.701, 43.628),'Scale': VBase3(1.283, 1.283, 1.283),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/fern_short_leaf_c_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/fern_short_leaf_c_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/fern_short_trunk_d_idle','Model': 'models/vegetation/fern_short_trunk_e_hi','PartName': 'trunk'}},'1208900165.75akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(88.912, 0.0, 0.0),'Pos': Point3(370.006, -131.754, 48.812),'Scale': VBase3(1.283, 1.283, 1.283),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/fern_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/fern_leaf_b_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/fern_trunk_a_idle','Model': 'models/vegetation/fern_trunk_a_hi','PartName': 'trunk'}},'1208900227.64akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-22.5, 0.0, 0.0),'Pos': Point3(-224.612, -273.579, -8.216),'Scale': VBase3(7.114, 7.114, 7.114),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_group_5_sphere'}},'1208900324.75akelts': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(0.651, 6.172, -6.033),'Pos': Point3(-66.758, -98.929, 22.886),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/table_shanty_2'}},'1208900373.67akelts': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(-20.754, 0.0, 0.0),'Objects': {},'Pos': Point3(-63.626, -97.597, 23.265),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/chair_shanty'}},'1208900380.39akelts': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(96.709, -5.377, 0.0),'Pos': Point3(-70.661, -99.708, 22.338),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/chair_shanty'}},'1208900382.17akelts': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(0.0, -19.076, 0.0),'Pos': Point3(-68.557, -105.115, 22.397),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/chair_shanty'}},'1208900438.95akelts': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'sit_sleep','AuraFX': 'None','Boss': False,'CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','GridPos': Point3(-64.156, -98.014, 23.33),'HelpID': 'NONE','Holiday': '','Hpr': VBase3(161.453, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '5.0060','Pos': Point3(-63.778, -98.579, 23.255),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Idle','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1208900732.64akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(-103.307, 0.0, 0.0),'Pos': Point3(-178.067, -27.669, 25.445),'Scale': VBase3(1.552, 1.552, 1.552),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/fern_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/fern_leaf_a_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/fern_trunk_a_idle','Model': 'models/vegetation/fern_trunk_c_hi','PartName': 'trunk'}},'1208905499.84akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-58.512, 0.358, 1.976),'Pos': Point3(-301.98, -32.583, 33.874),'Scale': VBase3(1.0, 1.0, 1.0)},'1208905501.14akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(78.823, -0.103, -2.224),'Pos': Point3(62.86, -228.023, 29.39),'Scale': VBase3(1.0, 1.0, 1.0)},'1208905501.16akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(88.889, -227.004, 29.724),'Scale': VBase3(1.0, 1.0, 1.0)},'1209081343.05akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(31.778, -5.177, 5.841),'Pos': Point3(-78.16, 160.019, 31.694),'Scale': VBase3(15.935, 15.935, 15.935),'Visual': {'Color': (0.7372549019607844, 0.75, 0.7137254901960784, 1.0),'Model': 'models/props/rock_4_sphere'}},'1209081415.41akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(121.445, 5.835, 5.184),'Pos': Point3(39.94, 61.915, 73.013),'Scale': VBase3(12.863, 12.863, 12.863),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/rock_3_sphere'}},'1209081604.39akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-137.674, -2.237, 0.0),'Pos': Point3(-343.97, -123.642, 14.67),'Scale': VBase3(7.235, 7.235, 7.235),'Visual': {'Color': (0.55, 0.55, 0.47058823529411764, 1.0),'Model': 'models/props/rock_2_sphere'}},'1209142352.5akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(117.238, 2.433, 0.057),'Objects': {'1231795765.84akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(21.114, 62.93, 86.943),'Holiday': '','Hpr': VBase3(-159.476, 2.299, -0.799),'Pos': Point3(3.594, -0.409, 1.073),'Scale': VBase3(0.378, 0.378, 0.678),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}},'Pos': Point3(24.402, 53.913, 84.159),'Scale': VBase3(2.649, 2.649, 2.649),'Visual': {'Color': (0.61, 0.6, 0.5490196078431373, 1.0),'Model': 'models/props/rock_group_1_floor'}},'1209142420.69akelts': {'Type': 'Bush','DisableCollision': False,'Holiday': '','Hpr': VBase3(-92.588, -2.845, -0.715),'Pos': Point3(33.592, 43.418, 86.564),'Scale': VBase3(1.447, 1.447, 1.447),'VisSize': '','Visual': {'Model': 'models/vegetation/bush_c'}},'1209142456.2akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-20.619, -7.967, -2.985),'Pos': Point3(-206.91, 260.472, 17.553),'Scale': VBase3(24.106, 24.106, 33.192),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/rock_4_sphere'}},'1209142659.83akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-2.671, 0.778, 2.864),'Pos': Point3(124.412, 65.071, 78.938),'Scale': VBase3(1.974, 1.974, 1.974),'Visual': {'Color': (0.7176470588235294, 0.62, 0.4823529411764706, 1.0),'Model': 'models/props/rock_group_2_floor'}},'1209142702.38akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(123.373, 52.511, 80.011),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_g'}},'1209142753.88akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(39.65, -26.306, 71.379),'Scale': VBase3(1.581, 1.581, 1.581),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142792.05akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-98.561, -4.973, 93.0),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142795.05akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-70.676, 60.5, 93.414),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142800.44akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-24.075, 56.663, 90.511),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142807.2akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-242.167, 46.698, 87.86),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142816.58akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-210.88, 136.976, 90.867),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142823.27akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-162.388, 263.766, 34.351),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142841.72akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-221.533, 156.436, 37.405),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142852.56akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-135.479, 175.72, 38.361),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142858.83akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-44.351, 141.285, 37.103),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142884.73akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-11.355, 144.525, 33.161),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142891.28akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(47.348, 2.525, -3.673),'Pos': Point3(-20.11, 132.684, 34.565),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_g'}},'1209142941.7akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-124.18, 294.627, 33.582),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142955.89akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.255, 4.203, -1.482),'Pos': Point3(-364.683, -109.998, 23.139),'Scale': VBase3(1.426, 1.426, 1.426),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_h'}},'1209142969.11akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(165.11, 0.0, 0.0),'Pos': Point3(-198.361, 288.567, 32.316),'Scale': VBase3(1.0, 1.0, 1.0),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_c_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1209142984.83akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(165.11, 0.0, 0.0),'Pos': Point3(98.187, -253.81, 27.867),'Scale': VBase3(1.0, 1.0, 1.0),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_b_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1209143021.88akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-42.642, 4.099, 10.274),'Pos': Point3(69.109, 16.928, 80.517),'Scale': VBase3(1.841, 1.841, 1.841),'Visual': {'Color': (1.0, 0.800000011920929, 0.6000000238418579, 1.0),'Model': 'models/vegetation/bush_c'}},'1209143109.45akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(105.455, 0.0, 0.0),'Pos': Point3(232.782, 43.277, 71.507),'Scale': VBase3(2.395, 2.395, 2.395),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1209143149.36akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(87.025, 6.384, 24.384),'Pos': Point3(162.122, 92.129, 16.046),'Scale': VBase3(9.478, 9.478, 9.478),'Visual': {'Color': (0.56, 0.52, 0.4588235294117647, 1.0),'Model': 'models/props/rock_1_floor'}},'1209143725.8akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(31.778, -5.177, 5.841),'Pos': Point3(7.208, 271.377, -19.208),'Scale': VBase3(15.935, 15.935, 15.935),'Visual': {'Color': (0.7372549019607844, 0.75, 0.7137254901960784, 1.0),'Model': 'models/props/rock_3_sphere'}},'1209143741.25akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(42.608, -10.339, 10.589),'Pos': Point3(31.14, 261.199, 0.972),'Scale': VBase3(2.876, 2.876, 2.876),'Visual': {'Color': (0.7803921568627451, 0.76, 0.6784313725490196, 1.0),'Model': 'models/props/rock_group_2_floor'}},'1209143836.75akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(-27.608, 0.0, 0.0),'Pos': Point3(12.063, 253.112, 3.562),'Scale': VBase3(1.283, 1.283, 1.283),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/fern_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/fern_leaf_a_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/fern_trunk_a_idle','Model': 'models/vegetation/fern_trunk_b_hi','PartName': 'trunk'}},'1209143869.5akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(6.843, -7.151, 3.121),'Pos': Point3(-96.822, 325.85, -10.096),'Scale': VBase3(28.461, 28.461, 28.461),'Visual': {'Color': (0.7372549019607844, 0.75, 0.7137254901960784, 1.0),'Model': 'models/props/rock_4_sphere'}},'1209144075.11akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-54.422, 1.403, -10.128),'Pos': Point3(15.251, -268.499, 13.983),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1209144120.39akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-66.268, 3.471, -6.68),'Pos': Point3(7.275, -250.261, 11.721),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1209144137.89akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-38.593, -3.009, -12.712),'Pos': Point3(-8.11, -238.364, 7.535),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1209144226.33akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(130.721, 0.0, 0.0),'Pos': Point3(25.79, -283.525, 17.029),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_e'}},'1209144240.61akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(123.285, 0.0, 0.0),'Pos': Point3(3.425, -252.585, 11.511),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_g'}},'1209144260.67akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(80.849, -16.621, 0.0),'Pos': Point3(-11.839, -239.951, 6.338),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_c'}},'1209145262.98akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(129.98, -17.072, -6.992),'Pos': Point3(-227.002, 15.762, 83.223),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1209145315.78akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(158.061, -18.332, 6.139),'Pos': Point3(-208.822, 7.51, 85.396),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1209145346.77akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(161.644, -17.932, 2.199),'Pos': Point3(-190.128, 0.934, 86.885),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1209145511.8akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-141.081, -9.091, 27.002),'Objects': {},'Pos': Point3(-239.782, 25.415, 81.994),'Scale': VBase3(4.438, 4.438, 4.438),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_4_sphere'}},'1209145586.08akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-141.081, -9.091, 27.002),'Objects': {},'Pos': Point3(-184.661, 0.735, 85.109),'Scale': VBase3(4.438, 4.438, 4.438),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_3_sphere'}},'1209145608.59akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(137.878, -9.075, 0.86),'Objects': {},'Pos': Point3(-121.839, -8.262, 84.12),'Scale': VBase3(7.915, 7.915, 7.915),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_3_sphere'}},'1209158582.38akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-61.944, 13.374, -9.961),'Pos': Point3(52.004, -256.242, 25.392),'Scale': VBase3(1.403, 1.403, 1.403),'Visual': {'Model': 'models/vegetation/bush_c'}},'1209158803.08akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(130.972, -10.835, 12.681),'Pos': Point3(27.778, -270.931, 18.466),'Scale': VBase3(0.947, 0.947, 0.947),'Visual': {'Model': 'models/vegetation/bush_a'}},'1209158804.3akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(167.304, -1.057, 16.588),'Pos': Point3(39.288, -260.354, 21.947),'Scale': VBase3(1.683, 1.683, 1.683),'Visual': {'Model': 'models/vegetation/bush_b'}},'1209158884.19akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, -15.788),'Pos': Point3(54.276, -251.218, 25.552),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209158907.77akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-135.305, 11.247, 14.018),'Pos': Point3(42.157, -253.071, 21.857),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209158922.67akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-174.547, -0.402, 16.938),'Pos': Point3(31.353, -256.734, 18.428),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209158942.52akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-130.757, 11.624, 12.413),'Pos': Point3(22.997, -262.145, 16.131),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209158955.7akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(147.836, -10.838, 13.102),'Pos': Point3(1.768, -243.873, 10.144),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209486396.61akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-125.788, 0.0, 0.0),'Pos': Point3(344.413, 143.42, 9.702),'Scale': VBase3(2.572, 2.572, 4.484),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/rock_2_sphere'}},'1209486468.75akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-23.891, 0.0, 0.0),'Pos': Point3(369.785, 175.047, -4.829),'Scale': VBase3(6.091, 6.091, 19.805),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/rock_3_floor'}},'1209486534.94akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-161.12, 3.252, -4.89),'Pos': Point3(76.786, 92.988, 80.724),'Scale': VBase3(2.371, 2.371, 2.371),'Visual': {'Color': (0.7176470588235294, 0.62, 0.4823529411764706, 1.0),'Model': 'models/props/rock_group_1_floor'}},'1209486589.19akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-164.674, 3.549, -4.68),'Pos': Point3(168.395, 82.302, 75.383),'Scale': VBase3(2.371, 2.371, 2.371),'Visual': {'Color': (0.7176470588235294, 0.62, 0.4823529411764706, 1.0),'Model': 'models/props/rock_1_floor'}},'1209486614.53akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(47.322, -0.524, 1.74),'Pos': Point3(178.778, 86.464, 73.119),'Scale': VBase3(7.484, 7.484, 11.678),'Visual': {'Color': (0.7176470588235294, 0.62, 0.4823529411764706, 1.0),'Model': 'models/props/rock_4_sphere'}},'1209486677.22akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(47.322, -0.524, -6.173),'Pos': Point3(223.015, -285.294, 27.077),'Scale': VBase3(6.507, 6.507, 10.154),'Visual': {'Color': (0.7176470588235294, 0.62, 0.4823529411764706, 1.0),'Model': 'models/props/rock_4_sphere'}},'1209486694.28akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-119.113, 1.962, 5.877),'Pos': Point3(208.774, -286.996, 21.058),'Scale': VBase3(6.507, 6.507, 10.154),'Visual': {'Color': (0.7176470588235294, 0.62, 0.4823529411764706, 1.0),'Model': 'models/props/rock_3_sphere'}},'1209486728.45akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-10.964, 4.984, -3.684),'Pos': Point3(184.085, -243.157, 31.757),'Scale': VBase3(1.706, 1.706, 3.043),'Visual': {'Color': (0.7176470588235294, 0.62, 0.4823529411764706, 1.0),'Model': 'models/props/rock_1_floor'}},'1209486790.02akelts': {'Type': 'Tree','DisableCollision': True,'Hpr': VBase3(124.236, 36.527, -94.285),'Pos': Point3(331.324, -177.038, 47.945),'Scale': VBase3(2.256, 2.256, 2.256),'Visual': {'Model': 'models/vegetation/gen_tree_trunk_only'}},'1209486893.03akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-44.822, 3.821, 7.369),'Pos': Point3(327.543, -178.245, 42.977),'Scale': VBase3(1.841, 1.841, 1.841),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/vegetation/bush_g'}},'1209486909.75akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-165.14, -6.958, 1.921),'Pos': Point3(336.66, -189.739, 41.289),'Scale': VBase3(2.211, 2.211, 2.211),'Visual': {'Model': 'models/vegetation/bush_f'}},'1209486962.83akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-63.774, 0.0, 8.178),'Pos': Point3(329.001, -192.796, 40.338),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209487002.83akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(169.021, -6.53, -4.935),'Pos': Point3(320.406, -183.643, 41.951),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209487013.41akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-61.811, 0.282, 8.174),'Pos': Point3(313.758, -175.971, 43.369),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209487028.42akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-88.18, -3.398, 7.443),'Pos': Point3(172.324, -146.896, 44.978),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209487047.83akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(20.496, 8.138, -8.149),'Pos': Point3(131.961, -113.65, 47.415),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209487103.2akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-81.142, -2.322, -2.901),'Pos': Point3(121.512, 105.312, 24.973),'Scale': VBase3(2.063, 2.063, 2.063),'Visual': {'Color': (1.0, 1.0, 1.0, 1.0),'Model': 'models/vegetation/bush_c'}},'1209487274.66akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-34.037, -0.104, 0.632),'Pos': Point3(-20.279, 154.799, 32.549),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209487333.36akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(61.76, 0.64, 1.812),'Pos': Point3(-171.494, 271.437, 34.247),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209487389.98akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(23.029, 7.501, -1.17),'Pos': Point3(-293.623, 53.116, 42.815),'Scale': VBase3(1.386, 1.386, 1.386),'Visual': {'Model': 'models/vegetation/bush_d'}},'1209487424.16akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(85.208, 3.439, -3.187),'Pos': Point3(-307.547, 64.675, 41.212),'Scale': VBase3(2.565, 2.565, 2.177),'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/rock_group_5_sphere'}},'1209487525.34akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-200.819, -7.771, 24.987),'Hpr': VBase3(31.38, 0.0, 0.0),'Pos': Point3(-228.31, 9.938, 30.676),'Scale': VBase3(1.089, 1.951, 1.951),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1209487554.48akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-200.819, -7.771, 24.987),'Hpr': VBase3(-18.389, 0.0, 0.0),'Pos': Point3(-235.869, 8.233, 30.708),'Scale': VBase3(0.677, 1.951, 1.951),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1209487594.08akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(151.24, -5.576, -5.159),'Pos': Point3(-284.359, 34.664, 39.695),'Scale': VBase3(1.386, 1.386, 1.386),'Visual': {'Model': 'models/vegetation/bush_leaves'}},'1209487617.22akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-104.832, -3.694, 6.637),'Pos': Point3(-228.626, 178.679, 39.276),'Scale': VBase3(1.386, 1.386, 1.386),'Visual': {'Model': 'models/vegetation/bush_leaves'}},'1209487642.13akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(112.809, -1.151, -7.504),'Pos': Point3(106.738, 97.311, 23.391),'Scale': VBase3(1.386, 1.386, 1.386),'Visual': {'Model': 'models/vegetation/bush_half_d'}},'1209487701.09akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-200.819, -7.771, 24.987),'Hpr': VBase3(1.541, 0.0, 0.0),'Pos': Point3(-275.181, 33.715, 39.199),'Scale': VBase3(0.677, 1.951, 1.951),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1209487714.66akelts': {'Type': 'Bush','DisableCollision': True,'GridPos': Point3(-268.392, 33.083, 39.182),'Hpr': VBase3(-69.499, 0.846, 7.544),'Pos': Point3(-268.392, 33.083, 39.182),'Scale': VBase3(1.386, 1.386, 1.386),'Visual': {'Model': 'models/vegetation/bush_b'}},'1209491441.39akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-58.512, 0.358, 1.976),'Pos': Point3(-301.98, -32.583, 33.874),'Scale': VBase3(1.0, 1.0, 1.0)},'1209491444.86akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(78.823, -0.103, -2.224),'Pos': Point3(62.86, -228.023, 29.39),'Scale': VBase3(1.0, 1.0, 1.0)},'1209491444.89akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(88.889, -227.004, 29.724),'Scale': VBase3(1.0, 1.0, 1.0)},'1209491975.06akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-167.512, 0.0, 0.0),'Pos': Point3(119.342, 97.338, 25.004),'Scale': VBase3(4.022, 4.022, 4.022),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492183.72akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(447.933, -233.624, 0.0),'Scale': VBase3(4.194, 4.194, 4.194),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492200.92akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(485.534, -199.793, 0.0),'Scale': VBase3(7.327, 7.327, 7.327),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492209.8akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(441.998, -186.999, 0.0),'Scale': VBase3(2.711, 2.711, 2.711),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492216.61akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(403.408, -171.686, 0.0),'Scale': VBase3(6.773, 6.773, 6.773),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492242.84akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-413.413, 70.025, 0.0),'Scale': VBase3(3.812, 3.812, 3.812),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492262.25akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-376.303, 39.387, -0.001),'Scale': VBase3(6.428, 6.428, 6.428),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492321.78akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-356.924, 115.361, 0.0),'Scale': VBase3(6.428, 6.428, 6.428),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492330.23akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-367.593, 81.925, 0.0),'Scale': VBase3(2.142, 2.142, 2.142),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1209492383.55akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(79.784, -7.547, -3.39),'Pos': Point3(-355.315, -0.935, 37.548),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood60'}},'1209492437.27akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-102.421, 7.077, -0.22),'Pos': Point3(-332.21, 117.208, 40.724),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood60'}},'1209492491.13akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-111.642, 7.732, -5.896),'Pos': Point3(-324.475, 135.511, 38.707),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1209492538.03akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(150.621, -5.949, 0.538),'Pos': Point3(-344.877, 59.474, 41.479),'Scale': VBase3(0.98, 0.98, 0.98),'Visual': {'Model': 'models/vegetation/bush_e'}},'1209492558.39akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(33.154, 2.273, -5.525),'Pos': Point3(-326.323, 134.857, 38.697),'Scale': VBase3(0.98, 0.98, 0.98),'Visual': {'Model': 'models/vegetation/bush_f'}},'1209492592.89akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-24.555, 5.883, -1.036),'Pos': Point3(-355.199, 9.827, 37.975),'Scale': VBase3(0.98, 0.98, 0.98),'Visual': {'Model': 'models/vegetation/bush_f'}},'1209492625.06akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-137.674, -2.237, 0.0),'Pos': Point3(-359.092, -4.479, 34.086),'Scale': VBase3(2.142, 2.142, 2.142),'Visual': {'Color': (0.55, 0.55, 0.47058823529411764, 1.0),'Model': 'models/props/rock_2_sphere'}},'1209492656.16akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-168.304, -1.925, 7.414),'Pos': Point3(-328.589, 133.399, 35.956),'Scale': VBase3(2.737, 2.737, 2.737),'Visual': {'Color': (0.55, 0.55, 0.47058823529411764, 1.0),'Model': 'models/props/rock_3_sphere'}},'1209492696.38akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-157.268, -0.462, 7.644),'Pos': Point3(-341.734, 88.605, 37.404),'Scale': VBase3(3.697, 3.708, 6.613),'Visual': {'Color': (0.55, 0.55, 0.47058823529411764, 1.0),'Model': 'models/props/rock_4_sphere'}},'1209492757.0akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-94.53, -1.675, 2.073),'Pos': Point3(-347.751, 37.676, 39.466),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209492798.52akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(87.21, 1.612, -2.123),'Pos': Point3(-343.076, 59.03, 40.654),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1209492811.14akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(90.643, 1.482, -2.216),'Pos': Point3(-345.186, 47.785, 39.962),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_8feet'}},'1209492982.39akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(150.621, -5.949, 0.538),'Pos': Point3(-342.804, 82.897, 39.893),'Scale': VBase3(0.98, 0.98, 0.98),'Visual': {'Model': 'models/vegetation/bush_h'}},'1209493001.3akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(150.562, 0.366, 0.535),'Pos': Point3(-342.147, 94.796, 39.533),'Scale': VBase3(1.338, 1.338, 1.837),'Visual': {'Model': 'models/vegetation/bush_h'}},'1209493034.36akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(167.571, 0.506, 16.845),'Pos': Point3(-336.555, 111.824, 39.374),'Scale': VBase3(0.519, 0.519, 0.712),'Visual': {'Model': 'models/vegetation/bush_half_a'}},'1209493112.39akelts': {'Type': 'Tree - Animated','DisableCollision': False,'Hpr': VBase3(165.11, 0.0, 0.0),'Pos': Point3(-348.172, 56.107, 38.167),'Scale': VBase3(1.0, 1.0, 1.0),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_c_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1209769069.2akelts': {'Type': 'Furniture','DisableCollision': False,'GridPos': Point3(-315.151, -33.893, 61.465),'Hpr': VBase3(-148.285, -0.668, 2.057),'Pos': Point3(-315.151, -33.893, 61.465),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/table_shanty_2'}},'1209776668.64akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-167.0, 0.598, -3.083),'Pos': Point3(243.135, 96.055, 72.761),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood60'}},'1209776753.91akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(37.339, 3.125, -3.178),'Pos': Point3(181.893, 81.762, 75.992),'Scale': VBase3(0.93, 0.93, 0.93),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_g'}},'1209776791.42akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(154.818, 5.405, 3.315),'Pos': Point3(243.211, 100.397, 70.765),'Scale': VBase3(0.93, 0.93, 0.93),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_c'}},'1209776864.33akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(94.764, -1.001, -4.343),'Pos': Point3(184.145, 86.552, 74.805),'Scale': VBase3(0.93, 0.93, 0.93),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_g'}},'1209776884.91akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-92.573, 0.437, 9.136),'Pos': Point3(197.324, 89.765, 74.703),'Scale': VBase3(0.93, 0.93, 0.93),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_f'}},'1210016877.48akelts': {'Type': 'Cups','DisableCollision': False,'GridPos': Point3(-314.053, -33.459, 64.465),'Hpr': VBase3(-124.245, 0.0, 0.0),'Pos': Point3(-314.054, -33.459, 64.465),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/beerstein'}},'1210016904.36akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-316.894, -32.134, 64.442),'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-316.894, -32.133, 64.442),'Scale': VBase3(0.735, 0.735, 0.735),'Visual': {'Model': 'models/props/bottle_brown'}},'1210016931.33akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-314.485, -34.974, 64.537),'Hpr': VBase3(158.126, 1.968, 0.404),'Pos': Point3(-314.485, -34.974, 64.537),'Scale': VBase3(0.735, 0.735, 0.735),'Visual': {'Model': 'models/props/bottle_tan'}},'1210016954.08akelts': {'Type': 'Cups','DisableCollision': False,'GridPos': Point3(-312.713, -35.573, 64.449),'Hpr': VBase3(62.77, 1.354, 0.376),'Pos': Point3(-312.713, -35.573, 64.449),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/beerstein'}},'1210019310.33akelts': {'Type': 'Light_Fixtures','DisableCollision': False,'GridPos': Point3(-314.375, -36.698, 64.409),'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-314.375, -36.698, 64.409),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/candle_holder'}},'1210354645.64akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-179.829, 0.0, 0.0),'Pos': Point3(-0.498, -4.914, 0.952),'Scale': VBase3(1.0, 1.0, 1.0)},'1210354650.75akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(78.823, -0.103, -2.224),'Pos': Point3(62.86, -228.023, 29.39),'Scale': VBase3(1.0, 1.0, 1.0)},'1210354650.84akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(88.889, -227.004, 29.724),'Scale': VBase3(1.0, 1.0, 1.0)},'1210364025.09akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-60.492, 0.0, 0.0),'Pos': Point3(-100.811, -35.554, 25.666),'Scale': VBase3(3.409, 3.409, 3.409),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210364105.42akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-40.387, 0.0, 0.0),'Pos': Point3(-79.578, -60.831, 24.988),'Scale': VBase3(3.656, 3.409, 3.409),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210364212.52akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-52.104, 0.0, 0.0),'Pos': Point3(-48.233, -102.997, 24.622),'Scale': VBase3(2.522, 1.65, 1.65),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210364242.95akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-52.104, 0.0, 0.0),'Pos': Point3(-31.56, -112.314, 20.952),'Scale': VBase3(2.294, 2.294, 2.294),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1210364306.98akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-52.104, 0.0, 0.0),'Pos': Point3(17.05, -74.781, 38.575),'Scale': VBase3(2.253, 2.253, 2.253),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1210364333.72akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-52.104, 0.0, 0.0),'Pos': Point3(25.56, -85.298, 38.542),'Scale': VBase3(1.329, 1.329, 1.329),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1210367481.94akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-52.104, 0.0, 0.0),'Pos': Point3(6.687, -93.4, 34.069),'Scale': VBase3(1.97, 1.97, 1.97),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1210367542.7akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(24.276, 0.0, 0.0),'Pos': Point3(18.055, -87.983, 34.893),'Scale': VBase3(1.117, 1.117, 1.34),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210367636.27akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(49.698, 0.0, 0.0),'Pos': Point3(-10.429, -107.705, 24.15),'Scale': VBase3(3.392, 1.0, 2.641),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210367763.34akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-19.892, 0.0, 0.0),'Pos': Point3(-26.276, -118.674, 22.874),'Scale': VBase3(1.108, 1.0, 2.262),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210367878.2akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(72.594, -5.039, -24.041),'Pos': Point3(18.35, -60.967, 33.618),'Scale': VBase3(3.856, 3.856, 3.856),'Visual': {'Color': (0.33, 0.33, 0.3, 1.0),'Model': 'models/props/rock_group_4_floor'}},'1210367939.16akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-52.104, 0.0, 0.0),'Pos': Point3(25.561, -60.588, 42.278),'Scale': VBase3(2.253, 2.253, 2.253),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1210369671.58akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-127.892, 0.0, 0.0),'Pos': Point3(29.588, -25.666, 69.344),'Scale': VBase3(2.633, 0.935, 2.426),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1210369772.67akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-179.56, 0.0, 0.0),'Pos': Point3(40.257, -21.847, 72.912),'Scale': VBase3(1.0, 1.0, 1.706),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210369842.94akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-52.104, 0.0, 0.0),'Pos': Point3(54.799, -27.899, 57.778),'Scale': VBase3(1.96, 1.96, 1.96),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_tube'}},'1210369880.25akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(90.458, -11.502, -30.715),'Pos': Point3(45.413, -36.422, 54.527),'Scale': VBase3(1.21, 1.21, 3.688),'Visual': {'Color': (0.45, 0.44, 0.39215686274509803, 1.0),'Model': 'models/props/rock_1_sphere'}},'1210370053.67akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(44.614, 0.0, 0.0),'Pos': Point3(47.839, -38.865, 50.583),'Scale': VBase3(1.96, 1.96, 3.486),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210370151.59akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-344.331, -121.47, 17.437),'Scale': VBase3(4.116, 4.116, 4.116),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1210370175.25akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(159.83, 0.0, 0.0),'Pos': Point3(-364.299, -106.972, 22.912),'Scale': VBase3(2.176, 2.176, 2.176),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210370213.61akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(91.23, 2.548, 0.0),'Pos': Point3(-356.296, -5.495, 34.885),'Scale': VBase3(0.676, 2.176, 2.618),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1210370296.83akelts': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(126.672, 0.0, 0.0),'Pos': Point3(-157.265, 270.239, 34.429),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/bed_shanty'}},'1210370345.66akelts': {'Type': 'Furniture','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Objects': {},'Pos': Point3(-143.998, 284.196, 34.377),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.61, 0.6, 0.5490196078431373, 1.0),'Model': 'models/props/stool_shanty'}},'1210370417.3akelts': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(-48.445, 0.0, 0.0),'Pos': Point3(-141.603, 281.965, 34.53),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.61, 0.6, 0.5490196078431373, 1.0),'Model': 'models/props/table_shanty'}},'1210370501.63akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-144.355, 284.077, 35.828),'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-144.355, 284.077, 35.828),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/jar'}},'1210370515.27akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-143.845, 284.383, 35.881),'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-141.519, 282.404, 37.475),'Scale': VBase3(0.551, 0.551, 0.551),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/bottle_green'}},'1210370516.02akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-143.845, 284.383, 35.881),'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-140.529, 281.1, 37.472),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/waterpitcher'}},'1210370516.34akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-143.845, 284.383, 35.881),'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-142.354, 283.397, 37.501),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/winebottle_B'}},'1210370517.23akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-143.845, 284.383, 35.881),'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-142.388, 282.463, 37.501),'Scale': VBase3(0.74, 0.74, 0.74),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/bottle_red'}},'1210370606.14akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-143.733, 284.585, 35.853),'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-143.733, 284.585, 35.853),'Scale': VBase3(0.488, 0.488, 0.488),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/bottle_tan'}},'1210370644.22akelts': {'Type': 'Cups','DisableCollision': False,'Hpr': VBase3(98.518, 0.0, 0.0),'Pos': Point3(-140.834, 280.488, 37.501),'Scale': VBase3(0.553, 0.553, 0.553),'Visual': {'Color': (0.55, 0.5, 0.5, 1.0),'Model': 'models/props/cup_tin'}},'1210370650.47akelts': {'Type': 'Cups','DisableCollision': False,'Hpr': VBase3(-76.467, -55.854, -77.001),'Pos': Point3(-142.02, 281.204, 37.668),'Scale': VBase3(0.668, 0.668, 0.668),'Visual': {'Color': (0.9, 0.85, 0.85, 1.0),'Model': 'models/props/cup_tin'}},'1210370734.7akelts': {'Type': 'Pots','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, -7.298),'Pos': Point3(-136.512, 277.987, 35.899),'Scale': VBase3(1.739, 1.739, 1.739),'Visual': {'Model': 'models/props/pot_B'}},'1210370791.75akelts': {'Type': 'Pots','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-156.126, 269.603, 37.443),'Scale': VBase3(1.456, 1.456, 1.456),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/pot_A'}},'1210370813.42akelts': {'Type': 'Rope','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-158.703, 272.916, 37.516),'Scale': VBase3(0.68, 0.68, 0.68),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rope_pile'}},'1210370834.7akelts': {'Type': 'Pan','DisableCollision': False,'Hpr': VBase3(0.0, -4.176, 0.0),'Pos': Point3(-154.818, 268.174, 37.432),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pan'}},'1210370878.17akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(0.0, -1.514, 0.0),'Objects': {},'Pos': Point3(-148.725, 264.298, 33.024),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.89, 0.78, 0.7058823529411765, 1.0),'Model': 'models/props/dirt_pile'}},'1210370944.19akelts': {'Type': 'Log_Stack','DisableCollision': True,'GridPos': Point3(-148.854, 264.612, 35.307),'Hpr': VBase3(22.412, 0.0, 0.0),'Pos': Point3(-148.854, 264.612, 35.307),'Scale': VBase3(0.594, 0.594, 0.594),'Visual': {'Color': (1.0, 0.800000011920929, 0.6000000238418579, 1.0),'Model': 'models/vegetation/gen_log02'}},'1210370972.41akelts': {'Type': 'Log_Stack','DisableCollision': True,'GridPos': Point3(-148.452, 264.483, 35.307),'Hpr': VBase3(-82.714, 0.192, 1.502),'Pos': Point3(-148.191, 264.198, 35.499),'Scale': VBase3(0.34, 0.34, 0.34),'Visual': {'Color': (1.0, 0.800000011920929, 0.6000000238418579, 1.0),'Model': 'models/vegetation/gen_log03'}},'1210371019.55akelts': {'Type': 'Effect Node','EffectName': 'torch_effect','GridPos': Point3(-148.663, 264.064, 35.894),'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-148.663, 264.064, 35.894),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1210371098.14akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-148.695, 264.305, 35.245),'Scale': VBase3(0.49, 0.49, 0.49),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1210371142.19akelts': {'Type': 'Food','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-143.079, 282.858, 37.479),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/fishbasket'}},'1210371224.86akelts': {'Type': 'Pots','DisableCollision': False,'GridPos': Point3(-34.973, 142.34, 39.242),'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-34.973, 142.341, 39.242),'Scale': VBase3(1.47, 1.47, 1.47),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/pot_A'}},'1210371243.42akelts': {'Type': 'Sack','DisableCollision': True,'Hpr': VBase3(60.832, 0.0, 0.0),'Pos': Point3(-36.461, 143.199, 39.236),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/package_sack'}},'1210371257.77akelts': {'Type': 'Sack','DisableCollision': True,'GridPos': Point3(-38.031, 143.79, 39.6),'Hpr': VBase3(19.541, -1.732, 91.972),'Pos': Point3(-38.032, 143.791, 39.6),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.8274509803921568, 0.81, 0.7725490196078432, 1.0),'Model': 'models/props/package_sack'}},'1210371260.97akelts': {'Type': 'Sack','DisableCollision': True,'GridPos': Point3(-38.95, 144.836, 39.224),'Hpr': VBase3(11.301, 0.0, 0.0),'Pos': Point3(-38.95, 144.836, 39.224),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/package_sack'}},'1210371327.56akelts': {'Type': 'Sack','DisableCollision': True,'GridPos': Point3(-34.622, 144.151, 37.004),'Hpr': VBase3(-152.536, 1.852, -2.84),'Pos': Point3(-34.622, 144.151, 37.003),'Scale': VBase3(0.511, 0.511, 0.511),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/Sack'}},'1210371404.84akelts': {'Type': 'Sack','DisableCollision': True,'GridPos': Point3(-36.944, 145.304, 36.973),'Hpr': VBase3(73.67, 1.624, 0.021),'Pos': Point3(-36.944, 145.304, 36.973),'Scale': VBase3(0.511, 0.511, 0.511),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/Sack'}},'1210371434.41akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-34.081, 143.867, 39.224),'Hpr': VBase3(-83.055, 0.0, 0.0),'Pos': Point3(-34.081, 143.867, 39.224),'Scale': VBase3(0.745, 0.745, 0.745),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/largejug_A2'}},'1210371447.73akelts': {'Type': 'Jugs_and_Jars','DisableCollision': False,'GridPos': Point3(-37.544, 145.006, 39.175),'Hpr': VBase3(-78.381, -2.228, -2.555),'Pos': Point3(-37.545, 145.005, 39.175),'Scale': VBase3(1.415, 1.415, 1.415),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/jug'}},'1210371638.52akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(31.5, 1.976, -0.358),'Pos': Point3(-276.882, -116.908, 57.185),'Scale': VBase3(1.0, 1.0, 1.0)},'1210371640.69akelts': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(-276.882, -116.908, 57.185),'Scale': VBase3(1.0, 1.0, 1.0)},'1210371640.7akelts': {'Type': 'Door Locator Node','Name': 'door_locator_2','Hpr': VBase3(-101.348, 0.096, 2.224),'Pos': Point3(-276.882, -116.908, 57.185),'Scale': VBase3(1.0, 1.0, 1.0)},'1210710642.36kmuller': {'Type': 'Furniture','DisableCollision': False,'GridPos': Point3(-312.661, -29.957, 61.689),'Hpr': VBase3(-54.569, 0.0, 0.0),'Pos': Point3(-312.661, -29.957, 61.689),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/chair_shanty'}},'1211486169.14akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-200.819, -7.771, 24.987),'Hpr': VBase3(-31.544, 0.0, 0.0),'Pos': Point3(-362.84, 8.216, -1.361),'Scale': VBase3(1.089, 1.951, 1.951),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1211486195.58akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-200.819, -7.771, 24.987),'Hpr': VBase3(-156.083, 0.0, 0.0),'Pos': Point3(-331.806, 139.275, -1.181),'Scale': VBase3(1.089, 1.951, 1.951),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1211486216.97akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-200.819, -7.771, 24.987),'Hpr': VBase3(120.276, 0.0, 0.0),'Pos': Point3(-72.904, 306.241, -1.467),'Scale': VBase3(1.089, 1.951, 1.951),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1211486244.61akelts': {'Type': 'Collision Barrier','DisableCollision': False,'GridPos': Point3(-200.819, -7.771, 24.987),'Hpr': VBase3(133.761, 0.0, 0.0),'Pos': Point3(517.887, -52.393, -1.103),'Scale': VBase3(1.089, 1.951, 1.951),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1211486334.98akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-223.932, -285.272, -3.032),'Scale': VBase3(3.644, 3.644, 3.644),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1211486364.42akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-265.566, -276.618, 0.533),'Scale': VBase3(3.644, 3.644, 3.644),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1211486372.42akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-240.285, -305.707, -1.416),'Scale': VBase3(1.825, 1.825, 1.825),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1211487140.7akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-179.603, -0.105, 2.309),'Pos': Point3(364.611, 118.175, 79.858),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood60'}},'1211487341.31akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(154.818, 5.405, 3.315),'Pos': Point3(305.096, 116.822, 77.065),'Scale': VBase3(0.93, 0.93, 0.93),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_b'}},'1211487383.75akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-16.934, -5.823, -2.507),'Pos': Point3(302.972, 119.363, 75.846),'Scale': VBase3(0.93, 0.93, 0.93),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_b'}},'1211487432.39akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-92.237, 0.0, 0.0),'Pos': Point3(303.604, 118.857, 74.107),'Scale': VBase3(0.89, 0.89, 0.89),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_tube'}},'1211487485.69akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(96.085, 1.347, 14.123),'Pos': Point3(305.539, 118.995, 75.214),'Scale': VBase3(1.311, 1.311, 1.311),'Visual': {'Color': (0.97, 0.85, 0.69, 1.0),'Model': 'models/vegetation/bush_f'}},'1211487601.42akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-36.859, 0.549, -0.018),'Pos': Point3(360.946, 114.905, 78.031),'Scale': VBase3(1.311, 1.311, 1.311),'Visual': {'Model': 'models/vegetation/bush_g'}},'1211487635.59akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-59.71, 0.0, 0.0),'Pos': Point3(358.27, 111.091, 77.998),'Scale': VBase3(2.051, 0.988, 2.511),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1211925120.0WDIG': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','Boss': False,'CustomModel': 'None','DNA': '1211925120.0WDIG','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','HelpID': 'SHIP_PVP_HELP_FRENCH_B','Hpr': VBase3(-147.578, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '12.0000','Pos': Point3(-284.011, -43.592, 27.794),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Idle','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1211926144.0WDIG': {'Type': 'Townsperson','Category': 'Commoner','AnimSet': 'default','AuraFX': 'None','Boss': False,'CustomModel': 'None','DNA': '1211926144.0WDIG','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','HelpID': 'SHIP_PVP_HELP_FRENCH_A','Hpr': VBase3(-104.09, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '12.0000','Pos': Point3(-221.218, -127.28, 14.2),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PORT_ROYAL_DEFAULTS','Start State': 'Idle','StartFrame': '0','Team': 'Player','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0},'1212021925.08akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(47.513, -3.951, 4.567),'Pos': Point3(-286.862, 208.168, 31.983),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood60'}},'1212021989.13akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(63.981, -2.493, 5.499),'Pos': Point3(-313.681, 154.241, 38.055),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood60'}},'1212022009.59akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-0.146, 1.697, -5.061),'Pos': Point3(-245.975, 252.538, 27.417),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1212022057.0akelts': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(-143.045, 10.04, -2.563),'Pos': Point3(-308.39, 138.229, 38.988),'Scale': VBase3(4.324, 4.324, 2.749),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/rock_group_5_sphere'}},'1212022114.48akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(125.102, 0.0, 9.721),'Pos': Point3(-240.318, 257.802, 27.34),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/vegetation/bush_c'}},'1212022214.7akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-32.74, -3.697, -8.997),'Pos': Point3(-297.858, 191.478, 33.893),'Scale': VBase3(1.099, 1.099, 1.099),'Visual': {'Color': (0.7, 0.7, 0.7, 1.0),'Model': 'models/vegetation/bush_f'}},'1212022283.2akelts': {'Type': 'Tree - Animated','DisableCollision': True,'Hpr': VBase3(47.451, 0.0, 0.0),'Pos': Point3(-301.409, 173.207, 35.105),'Scale': VBase3(1.0, 1.0, 1.0),'SubObjs': {'Top Model': {'Visual': {'Animate': 'models/vegetation/palm_leaf_a_idle','Attach': ['trunk', 'def_trunk_attach'],'Model': 'models/vegetation/palm_leaf_c_hi','PartName': 'leaf'}}},'Visual': {'Animate': 'models/vegetation/palm_trunk_a_idle','Model': 'models/vegetation/palm_trunk_a_hi','PartName': 'trunk'}},'1212022321.92akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(-168.219, 0.0, 0.0),'Pos': Point3(-273.724, 222.675, 30.784),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/vegetation/bush_b'}},'1212022390.09akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-120.433, 0.394, -6.965),'Pos': Point3(-285.342, 206.987, 32.089),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1212022415.19akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(-54.573, -6.202, -4.167),'Pos': Point3(-294.349, 173.687, 35.656),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1212022441.52akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(6.143, -6.678, 3.35),'Pos': Point3(-242.809, 251.733, 26.779),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_8feet'}},'1212022459.69akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(6.143, -6.678, -7.542),'Pos': Point3(-233.454, 251.277, 28.291),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_8feet'}},'1212022481.81akelts': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(125.102, 0.0, 9.721),'Pos': Point3(-229.683, 250.556, 28.848),'Scale': VBase3(1.447, 1.447, 1.447),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/vegetation/bush_h'}},'1212022513.88akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-16.141, 0.0, 0.0),'Pos': Point3(-226.884, 247.379, 28.82),'Scale': VBase3(1.0, 1.0, 1.47),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1212022536.36akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(115.617, 0.0, 0.0),'Pos': Point3(-300.338, 175.519, 34.658),'Scale': VBase3(1.0, 1.0, 1.47),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1212022609.84akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(7.932, 0.0, 0.0),'Pos': Point3(-323.559, 134.206, 38.731),'Scale': VBase3(1.0, 1.0, 1.47),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1213033327.77akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-93.212, -5.751, 5.099),'Pos': Point3(-356.375, -8.766, 37.02),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood60'}},'1213033380.48akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-95.554, 0.4, 11.553),'Pos': Point3(-373.448, -83.666, 29.918),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1213033475.47akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(-132.211, -6.642, 2.965),'Pos': Point3(-360.284, -68.985, 31.526),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1213033675.06akelts': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(62.198, 3.33, -12.664),'Pos': Point3(-374.774, -59.992, 32.041),'Scale': VBase3(2.565, 2.565, 2.519),'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/rock_group_5_sphere'}},'1213033712.05akelts': {'Type': 'Bush','DisableCollision': True,'GridPos': Point3(312.715, 48.787, 29.405),'Hpr': VBase3(-106.08, 0.0, 0.0),'Pos': Point3(-368.589, -69.02, 32.536),'Scale': VBase3(1.056, 1.056, 1.056),'Visual': {'Model': 'models/vegetation/bush_b'}},'1213033740.3akelts': {'Type': 'Bush','DisableCollision': True,'GridPos': Point3(312.715, 48.787, 29.405),'Hpr': VBase3(16.704, 4.984, 0.661),'Pos': Point3(-363.497, -50.427, 34.583),'Scale': VBase3(1.056, 1.056, 1.056),'Visual': {'Model': 'models/vegetation/bush_c'}},'1213041901.75akelts': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(90.222, 9.263, -4.056),'Pos': Point3(-357.01, -27.755, 34.911),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1216686208.0akelts0': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(171.743, 3.0, 0.88),'Pos': Point3(-148.152, 292.987, 34.503),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6, 0.6, 0.6, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1216686336.0akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(11.129, -0.655, -1.478),'Pos': Point3(-167.649, 296.234, 33.517),'Scale': VBase3(0.745, 0.745, 0.745),'Visual': {'Color': (0.796078431372549, 0.73, 0.6196078431372549, 1.0),'Model': 'models/vegetation/bush_e'}},'1216686336.0akelts1': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(15.132, -3.103, 7.901),'Pos': Point3(-117.304, 289.354, 32.382),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6, 0.6, 0.6, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1216686464.0akelts': {'Type': 'Wall','DisableCollision': False,'Hpr': VBase3(0.309, -5.027, 9.496),'Pos': Point3(-98.085, 294.394, 29.392),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6, 0.6, 0.6, 1.0),'Model': 'models/props/pir_m_prp_fnc_wood20'}},'1216686464.0akelts0': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-87.916, -7.294, -2.124),'Pos': Point3(-66.316, 295.604, 23.777),'Scale': VBase3(1.35, 1.35, 1.35),'Visual': {'Model': 'models/vegetation/bush_d'}},'1216686592.0akelts': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-33.601, 0.0, 0.0),'Pos': Point3(-88.253, 292.699, 27.172),'Scale': VBase3(0.945, 1.292, 1.479),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1216686592.0akelts0': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-3.572, 0.484, 7.644),'Pos': Point3(-75.694, 289.713, 25.923),'Scale': VBase3(2.076, 1.292, 1.479),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1216686592.0akelts1': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(51.09, 0.0, 0.0),'Pos': Point3(-61.14, 294.576, 19.112),'Scale': VBase3(1.465, 1.791, 2.603),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1216686592.0akelts2': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-26.968, -7.678, -1.372),'Pos': Point3(-60.32, 299.282, 19.194),'Scale': VBase3(2.759, 2.759, 3.114),'Visual': {'Color': (0.30000001192092896, 0.30000001192092896, 0.30000001192092896, 1.0),'Model': 'models/props/rock_3_sphere'}},'1216686720.0akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(83.172, 6.88, 3.226),'Pos': Point3(-117.057, 294.538, 32.012),'Scale': VBase3(0.66, 0.66, 0.66),'Visual': {'Model': 'models/vegetation/bush_a'}},'1216686848.0akelts': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(44.397, 3.349, 6.821),'Pos': Point3(-77.456, 293.936, 25.467),'Scale': VBase3(0.898, 0.898, 0.898),'Visual': {'Model': 'models/vegetation/bush_e'}},'1216766113.92aapatel': {'Type': 'Townsperson','Category': 'PvPRewards','AnimSet': 'default','AuraFX': 'None','Boss': False,'CustomModel': 'None','GhostColor': 'None','GhostFX': 0,'Greeting Animation': '','HelpID': 'NONE','Holiday': '','Hpr': VBase3(156.659, 0.0, 0.0),'Instanced World': 'None','Level': '37','Notice Animation 1': '','Notice Animation 2': '','Patrol Radius': '12.0000','Pos': Point3(-120.709, -40.882, 26.486),'PoseAnim': '','PoseFrame': '','Private Status': 'All','PropFXLeft': 'None','PropFXRight': 'None','PropLeft': 'None','PropRight': 'None','Respawns': True,'Scale': VBase3(1.0, 1.0, 1.0),'ShopID': 'PRIVATEER_TATTOOS','Start State': 'Idle','StartFrame': '0','Team': 'Villager','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Zombie': False,'spawnTimeAlt': '','spawnTimeBegin': 0.0,'spawnTimeEnd': 0.0}},'PVPTeam': '1','Team': 1,'Undockable': False,'Visibility': 'Grid','Visual': {'Model': 'models/islands/pir_m_are_isl_pvpFrench'}}},'Node Links': [],'Layers': {'Collisions': ['1184008208.59kmuller', '1184016064.62kmuller', '1184013852.84kmuller', '1185822696.06kmuller', '1184006140.32kmuller', '1184002350.98kmuller', '1184007573.29kmuller', '1184021176.59kmuller', '1184005963.59kmuller', '1188324241.31akelts', '1184006537.34kmuller', '1184006605.81kmuller', '1187139568.33kmuller', '1188324186.98akelts', '1184006730.66kmuller', '1184007538.51kmuller', '1184006188.41kmuller', '1184021084.27kmuller', '1185824396.94kmuller', '1185824250.16kmuller', '1185823630.52kmuller', '1185823760.23kmuller', '1185824497.83kmuller', '1185824751.45kmuller', '1187739103.34akelts', '1188323993.34akelts', '1184016538.29kmuller', '1185822200.97kmuller', '1184016225.99kmuller', '1195241421.34akelts', '1195242796.08akelts', '1184020642.13kmuller', '1195237994.63akelts', '1184020756.88kmuller', '1184020833.4kmuller', '1185820992.97kmuller', '1185821053.83kmuller', '1184015068.54kmuller', '1184014935.82kmuller', '1185821432.88kmuller', '1185821701.86kmuller', '1195240137.55akelts', '1195241539.38akelts', '1195238422.3akelts', '1195238473.22akelts', '1185821453.17kmuller', '1184021269.96kmuller', '1185821310.89kmuller', '1185821165.59kmuller', '1185821199.36kmuller', '1185822035.98kmuller', '1184015806.59kmuller', '1185822059.48kmuller', '1185920461.76kmuller', '1194984449.66akelts', '1185824206.22kmuller', '1184003446.23kmuller', '1184003254.85kmuller', '1184003218.74kmuller', '1184002700.44kmuller', '1186705073.11kmuller', '1187658531.86akelts', '1186705214.3kmuller', '1185824927.28kmuller', '1184014204.54kmuller', '1184014152.84kmuller']},'ObjectIds': {'1196970080.56sdnaik': '["Objects"]["1196970080.56sdnaik"]','1196991789.28sdnaik': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1196991789.28sdnaik"]','1196991806.06sdnaik': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1196991806.06sdnaik"]','1201548250.81kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1201548250.81kmuller"]','1201548362.32kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1201548362.32kmuller"]','1201548416.07kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1201548416.07kmuller"]','1201548416.07kmuller0': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1201548416.07kmuller"]','1201548787.04kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1201548787.04kmuller"]','1201558950.67kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1201558950.67kmuller"]','1201558963.15kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1201558963.15kmuller"]','1202419919.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202419919.16akelts"]','1202519353.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202519353.83akelts"]','1202519353.83akelts0': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202519353.83akelts"]','1202519757.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202519757.13akelts"]','1202521042.56akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521042.56akelts"]','1202521625.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521625.98akelts"]','1202521699.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521699.22akelts"]','1202521744.44akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521744.44akelts"]','1202521770.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521770.64akelts"]','1202521854.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521854.64akelts"]','1202521861.92akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521861.92akelts"]','1202521909.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521909.2akelts"]','1202521931.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202521931.22akelts"]','1202522010.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202522010.05akelts"]','1202522090.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202522090.09akelts"]','1202522096.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202522096.61akelts"]','1202522133.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202522133.41akelts"]','1202839503.38akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839503.38akelts"]','1202839506.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839506.13akelts"]','1202839507.11akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839507.11akelts"]','1202839581.52akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839581.52akelts"]','1202839582.81akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839582.81akelts"]','1202839585.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839585.47akelts"]','1202839586.58akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839586.58akelts"]','1202839590.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839590.66akelts"]','1202839593.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839593.34akelts"]','1202839599.58akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839599.58akelts"]','1202839600.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839600.83akelts"]','1202839604.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839604.0akelts"]','1202839606.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839606.13akelts"]','1202839607.27akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839607.27akelts"]','1202839608.95akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202839608.95akelts"]','1202843883.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202843883.48akelts"]','1202844035.08akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202844035.08akelts"]','1202844089.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202844089.09akelts"]','1202844133.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202844133.89akelts"]','1202844200.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202844200.77akelts"]','1202846001.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202846001.67akelts"]','1202846053.19akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202846053.19akelts"]','1202846053.19akelts0': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202846053.19akelts"]','1202846053.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202846053.25akelts"]','1202846053.28akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202846053.28akelts"]','1202846446.94akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202846446.94akelts"]','1203009085.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203009085.13akelts"]','1203009093.11akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203009093.11akelts"]','1203028879.95akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203028879.95akelts"]','1203029906.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203029906.03akelts"]','1203029906.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203029906.05akelts"]','1203030236.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203030236.98akelts"]','1203030255.86akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203030255.86akelts"]','1203030322.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203030322.05akelts"]','1203031365.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203031365.84akelts"]','1203031365.84akelts0': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203031365.84akelts"]','1203446211.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446211.75akelts"]','1203446214.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446214.03akelts"]','1203446215.28akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446215.28akelts"]','1203446268.36akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446268.36akelts"]','1203446290.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446290.13akelts"]','1203446376.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446376.64akelts"]','1203446441.06akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446441.06akelts"]','1203446504.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446504.67akelts"]','1203446683.59akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446683.59akelts"]','1203446729.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203446729.5akelts"]','1203447159.59akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447159.59akelts"]','1203447249.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447249.77akelts"]','1203447289.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447289.66akelts"]','1203447492.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447492.48akelts"]','1203447521.73akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447521.73akelts"]','1203447554.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447554.77akelts"]','1203447596.81akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447596.81akelts"]','1203447611.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447611.5akelts"]','1203447627.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447627.69akelts"]','1203447638.27akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447638.27akelts"]','1203447680.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447680.0akelts"]','1203447779.02akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447779.02akelts"]','1203447839.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447839.47akelts"]','1203447866.33akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447866.33akelts"]','1203447889.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447889.61akelts"]','1203447955.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447955.42akelts"]','1203447972.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203447972.22akelts"]','1203448602.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203448602.5akelts"]','1203448974.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203448974.69akelts"]','1203449100.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203449100.89akelts"]','1203449450.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203449450.05akelts"]','1203469222.88akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469222.88akelts"]','1203469248.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469248.22akelts"]','1203469277.86akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469277.86akelts"]','1203469364.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469364.98akelts"]','1203469432.73akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469432.73akelts"]','1203469480.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469480.03akelts"]','1203469532.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469532.7akelts"]','1203469661.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469661.41akelts"]','1203469718.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469718.22akelts"]','1203469953.31akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203469953.31akelts"]','1203470019.06akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203470019.06akelts"]','1203470074.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203470074.14akelts"]','1203470326.73akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203470326.73akelts"]','1203470396.59akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203470396.59akelts"]','1203470427.94akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203470427.94akelts"]','1203470693.56akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203470693.56akelts"]','1203470872.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203470872.34akelts"]','1203471038.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203471038.22akelts"]','1203471069.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203471069.7akelts"]','1203471110.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203471110.84akelts"]','1203965063.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203965063.7akelts"]','1203965125.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203965125.84akelts"]','1203965293.91akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203965293.91akelts"]','1203965371.95akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203965371.95akelts"]','1203965503.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203965503.47akelts"]','1203965538.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203965538.14akelts"]','1203974170.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203974170.3akelts"]','1203974235.06akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1203974235.06akelts"]','1204225487.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204225487.34akelts"]','1204232954.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204232954.13akelts"]','1204233112.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204233112.66akelts"]','1204233142.94akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204233142.94akelts"]','1204234158.86akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204234158.86akelts"]','1204234736.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204234736.47akelts"]','1204234767.58akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204234767.58akelts"]','1204234795.08akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204234795.08akelts"]','1204234960.88akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204234960.88akelts"]','1204235002.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204235002.48akelts"]','1204235062.94akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204235062.94akelts"]','1204235175.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1204235175.16akelts"]','1208537531.58akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208537531.58akelts"]','1208537612.06akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208537612.06akelts"]','1208538744.23akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208538744.23akelts"]','1208538745.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208538745.98akelts"]','1208541269.28akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541269.28akelts"]','1208541296.17akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541296.17akelts"]','1208541345.56akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541345.56akelts"]','1208541410.31akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541410.31akelts"]','1208541454.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541454.5akelts"]','1208541471.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541471.16akelts"]','1208541481.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541481.64akelts"]','1208541557.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541557.61akelts"]','1208541582.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541582.89akelts"]','1208541687.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541687.83akelts"]','1208541829.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541829.66akelts"]','1208541881.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541881.3akelts"]','1208541909.53akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541909.53akelts"]','1208541918.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541918.48akelts"]','1208541988.52akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208541988.52akelts"]','1208542009.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542009.2akelts"]','1208542108.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542108.5akelts"]','1208542139.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542139.22akelts"]','1208542139.63akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542139.63akelts"]','1208542140.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542140.41akelts"]','1208542167.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542167.61akelts"]','1208542176.02akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542176.02akelts"]','1208542185.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542185.47akelts"]','1208542191.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542191.47akelts"]','1208542196.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542196.03akelts"]','1208542200.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542200.48akelts"]','1208542220.23akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542220.23akelts"]','1208542597.31akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542597.31akelts"]','1208542717.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542717.61akelts"]','1208542760.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542760.47akelts"]','1208542802.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542802.39akelts"]','1208542846.92akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542846.92akelts"]','1208542897.36akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542897.36akelts"]','1208542933.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542933.77akelts"]','1208542952.97akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542952.97akelts"]','1208542990.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208542990.66akelts"]','1208543133.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208543133.05akelts"]','1208543149.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208543149.84akelts"]','1208559537.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208559537.25akelts"]','1208559538.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208559538.48akelts"]','1208559538.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208559538.5akelts"]','1208559585.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208559585.09akelts"]','1208559993.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208559993.3akelts"]','1208560253.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560253.42akelts"]','1208560331.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560331.47akelts"]','1208560363.91akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560363.91akelts"]','1208560474.95akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560474.95akelts"]','1208560742.78akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560742.78akelts"]','1208560837.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560837.39akelts"]','1208560860.78akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560860.78akelts"]','1208560878.19akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560878.19akelts"]','1208560915.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208560915.7akelts"]','1208561074.45akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208561074.45akelts"]','1208561128.33akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208561128.33akelts"]','1208561221.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208561221.5akelts"]','1208561255.91akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208561255.91akelts"]','1208564843.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208564843.16akelts"]','1208565012.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208565012.22akelts"]','1208565157.44akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208565157.44akelts"]','1208565470.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208565470.34akelts"]','1208797450.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208797450.2akelts"]','1208797943.19akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208797943.19akelts"]','1208797989.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208797989.22akelts"]','1208798071.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798071.03akelts"]','1208798220.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798220.89akelts"]','1208798241.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798241.98akelts"]','1208798284.11akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798284.11akelts"]','1208798332.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798332.25akelts"]','1208798382.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798382.77akelts"]','1208798460.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798460.47akelts"]','1208798474.59akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798474.59akelts"]','1208798499.8akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798499.8akelts"]','1208798528.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208798528.09akelts"]','1208799106.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208799106.09akelts"]','1208799145.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208799145.66akelts"]','1208799165.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208799165.69akelts"]','1208799215.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208799215.61akelts"]','1208801100.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801100.09akelts"]','1208801182.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801182.16akelts"]','1208801336.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801336.25akelts"]','1208801439.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801439.14akelts"]','1208801653.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801653.48akelts"]','1208801677.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801677.42akelts"]','1208801740.33akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801740.33akelts"]','1208801858.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801858.2akelts"]','1208801896.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801896.34akelts"]','1208801950.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208801950.42akelts"]','1208802034.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802034.64akelts"]','1208802151.95akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802151.95akelts"]','1208802333.55akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802333.55akelts"]','1208802346.91akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802346.91akelts"]','1208802361.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802361.61akelts"]','1208802379.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802379.75akelts"]','1208802426.88akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802426.88akelts"]','1208802585.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802585.83akelts"]','1208802593.55akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802593.55akelts"]','1208802604.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802604.41akelts"]','1208802673.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802673.66akelts"]','1208802699.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802699.16akelts"]','1208802757.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208802757.09akelts"]','1208803223.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803223.69akelts"]','1208803320.33akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803320.33akelts"]','1208803487.38akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803487.38akelts"]','1208803525.8akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803525.8akelts"]','1208803619.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803619.64akelts"]','1208803829.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803829.39akelts"]','1208803903.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803903.89akelts"]','1208803961.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803961.48akelts"]','1208803995.58akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208803995.58akelts"]','1208804069.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208804069.98akelts"]','1208823264.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208823264.25akelts"]','1208823266.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208823266.13akelts"]','1208823266.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208823266.14akelts"]','1208824084.88akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824084.88akelts"]','1208824151.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824151.67akelts"]','1208824198.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824198.13akelts"]','1208824267.17akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824267.17akelts"]','1208824362.53akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824362.53akelts"]','1208824405.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824405.69akelts"]','1208824613.52akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824613.52akelts"]','1208824790.17akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824790.17akelts"]','1208824810.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824810.09akelts"]','1208824829.36akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208824829.36akelts"]','1208886510.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208886510.41akelts"]','1208886578.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208886578.03akelts"]','1208890140.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890140.34akelts"]','1208890226.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890226.42akelts"]','1208890285.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890285.03akelts"]','1208890307.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890307.03akelts"]','1208890332.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890332.34akelts"]','1208890370.53akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890370.53akelts"]','1208890410.26akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890410.26akelts"]','1208890537.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890537.5akelts"]','1208890571.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890571.39akelts"]','1208890789.8akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208890789.8akelts"]','1208896939.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208896939.3akelts"]','1208896940.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208896940.48akelts"]','1208896940.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208896940.5akelts"]','1208896984.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208896984.84akelts"]','1208896984.84akelts0': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208896984.84akelts"]','1208898135.26akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208898135.26akelts"]','1208899163.01akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899163.01akelts"]','1208899214.44akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899214.44akelts"]','1208899379.06akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899379.06akelts"]','1208899615.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899615.03akelts"]','1208899664.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899664.2akelts"]','1208899711.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899711.3akelts"]','1208899895.08akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899895.08akelts"]','1208899917.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899917.34akelts"]','1208899957.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208899957.64akelts"]','1208900010.97akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900010.97akelts"]','1208900045.91akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900045.91akelts"]','1208900097.17akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900097.17akelts"]','1208900115.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900115.67akelts"]','1208900165.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900165.75akelts"]','1208900227.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900227.64akelts"]','1208900324.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900324.75akelts"]','1208900373.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900373.67akelts"]','1208900380.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900380.39akelts"]','1208900382.17akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900382.17akelts"]','1208900438.95akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900438.95akelts"]','1208900732.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208900732.64akelts"]','1208905499.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208905499.84akelts"]','1208905501.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208905501.14akelts"]','1208905501.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1208905501.16akelts"]','1209081343.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209081343.05akelts"]','1209081415.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209081415.41akelts"]','1209081604.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209081604.39akelts"]','1209142352.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142352.5akelts"]','1209142420.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142420.69akelts"]','1209142456.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142456.2akelts"]','1209142659.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142659.83akelts"]','1209142702.38akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142702.38akelts"]','1209142753.88akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142753.88akelts"]','1209142792.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142792.05akelts"]','1209142795.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142795.05akelts"]','1209142800.44akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142800.44akelts"]','1209142807.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142807.2akelts"]','1209142816.58akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142816.58akelts"]','1209142823.27akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142823.27akelts"]','1209142841.72akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142841.72akelts"]','1209142852.56akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142852.56akelts"]','1209142858.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142858.83akelts"]','1209142884.73akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142884.73akelts"]','1209142891.28akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142891.28akelts"]','1209142941.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142941.7akelts"]','1209142955.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142955.89akelts"]','1209142969.11akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142969.11akelts"]','1209142984.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142984.83akelts"]','1209143021.88akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209143021.88akelts"]','1209143109.45akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209143109.45akelts"]','1209143149.36akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209143149.36akelts"]','1209143725.8akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209143725.8akelts"]','1209143741.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209143741.25akelts"]','1209143836.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209143836.75akelts"]','1209143869.5akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209143869.5akelts"]','1209144075.11akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209144075.11akelts"]','1209144120.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209144120.39akelts"]','1209144137.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209144137.89akelts"]','1209144226.33akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209144226.33akelts"]','1209144240.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209144240.61akelts"]','1209144260.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209144260.67akelts"]','1209145262.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209145262.98akelts"]','1209145315.78akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209145315.78akelts"]','1209145346.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209145346.77akelts"]','1209145511.8akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209145511.8akelts"]','1209145586.08akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209145586.08akelts"]','1209145608.59akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209145608.59akelts"]','1209158582.38akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209158582.38akelts"]','1209158803.08akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209158803.08akelts"]','1209158804.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209158804.3akelts"]','1209158884.19akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209158884.19akelts"]','1209158907.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209158907.77akelts"]','1209158922.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209158922.67akelts"]','1209158942.52akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209158942.52akelts"]','1209158955.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209158955.7akelts"]','1209486396.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486396.61akelts"]','1209486468.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486468.75akelts"]','1209486534.94akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486534.94akelts"]','1209486589.19akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486589.19akelts"]','1209486614.53akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486614.53akelts"]','1209486677.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486677.22akelts"]','1209486694.28akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486694.28akelts"]','1209486728.45akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486728.45akelts"]','1209486790.02akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486790.02akelts"]','1209486893.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486893.03akelts"]','1209486909.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486909.75akelts"]','1209486962.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209486962.83akelts"]','1209487002.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487002.83akelts"]','1209487013.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487013.41akelts"]','1209487028.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487028.42akelts"]','1209487047.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487047.83akelts"]','1209487103.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487103.2akelts"]','1209487274.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487274.66akelts"]','1209487333.36akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487333.36akelts"]','1209487389.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487389.98akelts"]','1209487424.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487424.16akelts"]','1209487525.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487525.34akelts"]','1209487554.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487554.48akelts"]','1209487594.08akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487594.08akelts"]','1209487617.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487617.22akelts"]','1209487642.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487642.13akelts"]','1209487701.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487701.09akelts"]','1209487714.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209487714.66akelts"]','1209491441.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209491441.39akelts"]','1209491444.86akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209491444.86akelts"]','1209491444.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209491444.89akelts"]','1209491975.06akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209491975.06akelts"]','1209492183.72akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492183.72akelts"]','1209492200.92akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492200.92akelts"]','1209492209.8akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492209.8akelts"]','1209492216.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492216.61akelts"]','1209492242.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492242.84akelts"]','1209492262.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492262.25akelts"]','1209492321.78akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492321.78akelts"]','1209492330.23akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492330.23akelts"]','1209492383.55akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492383.55akelts"]','1209492437.27akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492437.27akelts"]','1209492491.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492491.13akelts"]','1209492538.03akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492538.03akelts"]','1209492558.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492558.39akelts"]','1209492592.89akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492592.89akelts"]','1209492625.06akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492625.06akelts"]','1209492656.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492656.16akelts"]','1209492696.38akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492696.38akelts"]','1209492757.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492757.0akelts"]','1209492798.52akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492798.52akelts"]','1209492811.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492811.14akelts"]','1209492982.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209492982.39akelts"]','1209493001.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209493001.3akelts"]','1209493034.36akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209493034.36akelts"]','1209493112.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209493112.39akelts"]','1209769069.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209769069.2akelts"]','1209776668.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209776668.64akelts"]','1209776753.91akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209776753.91akelts"]','1209776791.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209776791.42akelts"]','1209776864.33akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209776864.33akelts"]','1209776884.91akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209776884.91akelts"]','1210016877.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210016877.48akelts"]','1210016904.36akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210016904.36akelts"]','1210016931.33akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210016931.33akelts"]','1210016954.08akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210016954.08akelts"]','1210019310.33akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210019310.33akelts"]','1210354645.64akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210354645.64akelts"]','1210354650.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210354650.75akelts"]','1210354650.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210354650.84akelts"]','1210364025.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210364025.09akelts"]','1210364105.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210364105.42akelts"]','1210364212.52akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210364212.52akelts"]','1210364242.95akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210364242.95akelts"]','1210364306.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210364306.98akelts"]','1210364333.72akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210364333.72akelts"]','1210367481.94akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210367481.94akelts"]','1210367542.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210367542.7akelts"]','1210367636.27akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210367636.27akelts"]','1210367763.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210367763.34akelts"]','1210367878.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210367878.2akelts"]','1210367939.16akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210367939.16akelts"]','1210369671.58akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210369671.58akelts"]','1210369772.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210369772.67akelts"]','1210369842.94akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210369842.94akelts"]','1210369880.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210369880.25akelts"]','1210370053.67akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370053.67akelts"]','1210370151.59akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370151.59akelts"]','1210370175.25akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370175.25akelts"]','1210370213.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370213.61akelts"]','1210370296.83akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370296.83akelts"]','1210370345.66akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370345.66akelts"]','1210370417.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370417.3akelts"]','1210370501.63akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370501.63akelts"]','1210370515.27akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370515.27akelts"]','1210370516.02akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370516.02akelts"]','1210370516.34akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370516.34akelts"]','1210370517.23akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370517.23akelts"]','1210370606.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370606.14akelts"]','1210370644.22akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370644.22akelts"]','1210370650.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370650.47akelts"]','1210370734.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370734.7akelts"]','1210370791.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370791.75akelts"]','1210370813.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370813.42akelts"]','1210370834.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370834.7akelts"]','1210370878.17akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370878.17akelts"]','1210370944.19akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370944.19akelts"]','1210370972.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210370972.41akelts"]','1210371019.55akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371019.55akelts"]','1210371098.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371098.14akelts"]','1210371142.19akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371142.19akelts"]','1210371224.86akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371224.86akelts"]','1210371243.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371243.42akelts"]','1210371257.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371257.77akelts"]','1210371260.97akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371260.97akelts"]','1210371327.56akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371327.56akelts"]','1210371404.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371404.84akelts"]','1210371434.41akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371434.41akelts"]','1210371447.73akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371447.73akelts"]','1210371638.52akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371638.52akelts"]','1210371640.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371640.69akelts"]','1210371640.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210371640.7akelts"]','1210704930.45kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202519353.83akelts"]["Objects"]["1210704930.45kmuller"]','1210704940.31kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202846053.19akelts"]["Objects"]["1210704940.31kmuller"]','1210704940.34kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1202846053.19akelts"]["Objects"]["1210704940.34kmuller"]','1210710642.36kmuller': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1210710642.36kmuller"]','1211486169.14akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211486169.14akelts"]','1211486195.58akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211486195.58akelts"]','1211486216.97akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211486216.97akelts"]','1211486244.61akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211486244.61akelts"]','1211486334.98akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211486334.98akelts"]','1211486364.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211486364.42akelts"]','1211486372.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211486372.42akelts"]','1211487140.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211487140.7akelts"]','1211487341.31akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211487341.31akelts"]','1211487383.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211487383.75akelts"]','1211487432.39akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211487432.39akelts"]','1211487485.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211487485.69akelts"]','1211487601.42akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211487601.42akelts"]','1211487635.59akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211487635.59akelts"]','1211925120.0WDIG': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211925120.0WDIG"]','1211926144.0WDIG': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1211926144.0WDIG"]','1212021925.08akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212021925.08akelts"]','1212021989.13akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212021989.13akelts"]','1212022009.59akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022009.59akelts"]','1212022057.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022057.0akelts"]','1212022114.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022114.48akelts"]','1212022214.7akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022214.7akelts"]','1212022283.2akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022283.2akelts"]','1212022321.92akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022321.92akelts"]','1212022390.09akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022390.09akelts"]','1212022415.19akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022415.19akelts"]','1212022441.52akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022441.52akelts"]','1212022459.69akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022459.69akelts"]','1212022481.81akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022481.81akelts"]','1212022513.88akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022513.88akelts"]','1212022536.36akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022536.36akelts"]','1212022609.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1212022609.84akelts"]','1213033327.77akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1213033327.77akelts"]','1213033380.48akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1213033380.48akelts"]','1213033475.47akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1213033475.47akelts"]','1213033675.06akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1213033675.06akelts"]','1213033712.05akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1213033712.05akelts"]','1213033740.3akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1213033740.3akelts"]','1213041901.75akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1213041901.75akelts"]','1216686208.0akelts0': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686208.0akelts0"]','1216686336.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686336.0akelts"]','1216686336.0akelts1': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686336.0akelts1"]','1216686464.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686464.0akelts"]','1216686464.0akelts0': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686464.0akelts0"]','1216686592.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686592.0akelts"]','1216686592.0akelts0': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686592.0akelts0"]','1216686592.0akelts1': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686592.0akelts1"]','1216686592.0akelts2': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686592.0akelts2"]','1216686720.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686720.0akelts"]','1216686848.0akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216686848.0akelts"]','1216766113.92aapatel': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1216766113.92aapatel"]','1231795765.84akelts': '["Objects"]["1196970080.56sdnaik"]["Objects"]["1209142352.5akelts"]["Objects"]["1231795765.84akelts"]'}}
extraInfo = {'camPos': Point3(-124.693, -49.0333, 34.4189),'camHpr': VBase3(-27.0322, -30.8212, 0),'focalLength': 0.726165890694,'skyState': 2,'fog': 0}
| 65,880.666667
| 197,430
| 0.681879
| 27,645
| 197,642
| 4.821523
| 0.073359
| 0.01424
| 0.014089
| 0.12748
| 0.679791
| 0.515264
| 0.458831
| 0.381976
| 0.350414
| 0.339993
| 0
| 0.293284
| 0.048896
| 197,642
| 3
| 197,431
| 65,880.666667
| 0.415793
| 0
| 0
| 0
| 0
| 0
| 0.544795
| 0.267877
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7cfc9052ff6faec547979606eb005fae6742348f
| 118,953
|
py
|
Python
|
api_tests/nodes/views/test_node_list.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | 1
|
2019-12-23T04:30:20.000Z
|
2019-12-23T04:30:20.000Z
|
api_tests/nodes/views/test_node_list.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | 20
|
2020-03-24T16:48:03.000Z
|
2022-03-08T22:38:38.000Z
|
api_tests/nodes/views/test_node_list.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from api.base.settings.defaults import API_BASE, MAX_PAGE_SIZE
from api_tests.nodes.filters.test_filters import NodesListFilteringMixin, NodesListDateFilteringMixin
from framework.auth.core import Auth
from osf.models import AbstractNode, Node, NodeLog
from osf.utils.sanitize import strip_html
from osf.utils import permissions
from osf_tests.factories import (
CollectionFactory,
ProjectFactory,
NodeFactory,
RegistrationFactory,
AuthUserFactory,
UserFactory,
PreprintFactory,
InstitutionFactory,
RegionFactory
)
from addons.osfstorage.settings import DEFAULT_REGION_ID
from rest_framework import exceptions
from tests.utils import assert_items_equal
from website.views import find_bookmark_collection
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.fixture()
def non_contrib():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeList:
@pytest.fixture()
def deleted_project(self):
return ProjectFactory(is_deleted=True)
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(is_public=False, creator=user)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def url(self, user):
return '/{}nodes/'.format(API_BASE)
def test_return(
self, app, user, non_contrib, deleted_project,
private_project, public_project, url):
# test_only_returns_non_deleted_public_projects
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project._id in ids
assert deleted_project._id not in ids
assert private_project._id not in ids
# test_return_public_node_list_logged_out_user
res = app.get(url, expect_errors=True)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
ids = [each['id'] for each in res.json['data']]
assert public_project._id in ids
assert private_project._id not in ids
# test_return_public_node_list_logged_in_user
res = app.get(url, auth=non_contrib)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
ids = [each['id'] for each in res.json['data']]
assert public_project._id in ids
assert private_project._id not in ids
# test_return_private_node_list_logged_out_user
res = app.get(url)
ids = [each['id'] for each in res.json['data']]
assert public_project._id in ids
assert private_project._id not in ids
# test_return_private_node_list_logged_in_contributor
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
ids = [each['id'] for each in res.json['data']]
assert public_project._id in ids
assert private_project._id in ids
# test_return_private_node_list_logged_in_non_contributor
res = app.get(url, auth=non_contrib.auth)
ids = [each['id'] for each in res.json['data']]
assert public_project._id in ids
assert private_project._id not in ids
def test_node_list_does_not_returns_registrations(
self, app, user, public_project, url):
registration = RegistrationFactory(
project=public_project, creator=user)
res = app.get(url, auth=user.auth)
ids = [each['id'] for each in res.json['data']]
assert registration._id not in ids
def test_node_list_has_root(
self, app, user, url, public_project, private_project,
deleted_project):
res = app.get(url, auth=user.auth)
projects_with_root = 0
for project in res.json['data']:
if project['relationships'].get('root', None):
projects_with_root += 1
assert projects_with_root != 0
assert all(
[each['relationships'].get(
'root'
) is not None for each in res.json['data']]
)
def test_node_list_has_proper_root(self, app, user, url):
project_one = ProjectFactory(title='Project One', is_public=True)
ProjectFactory(parent=project_one, is_public=True)
res = app.get(url + '?embed=root&embed=parent', auth=user.auth)
for project_json in res.json['data']:
project = AbstractNode.load(project_json['id'])
assert project_json['embeds']['root']['data']['id'] == project.root._id
def test_node_list_sorting(self, app, url):
res = app.get('{}?sort=-created'.format(url))
assert res.status_code == 200
res = app.get('{}?sort=title'.format(url))
assert res.status_code == 200
def test_node_list_embed_region(self, app, url, public_project):
res = app.get('{}?embed=region'.format(url))
assert res.status_code == 200
assert res.json['data'][0]['embeds']['region']['data']['id'] == DEFAULT_REGION_ID
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.enable_bookmark_creation
class TestNodeFiltering:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def tag_one(self):
return 'tag_one'
@pytest.fixture()
def tag_two(self):
return 'tag_two'
@pytest.fixture()
def public_project_one(self, tag_one, tag_two):
public_project_one = ProjectFactory(
title='Public Project One',
description='One',
is_public=True)
public_project_one.add_tag(
tag_one,
Auth(public_project_one.creator),
save=False)
public_project_one.add_tag(
tag_two,
Auth(public_project_one.creator),
save=False)
public_project_one.save()
return public_project_one
@pytest.fixture()
def public_project_two(self, tag_one):
public_project_two = ProjectFactory(
title='Public Project Two',
description='One or Two',
is_public=True)
public_project_two.add_tag(
tag_one,
Auth(public_project_two.creator),
save=True)
return public_project_two
@pytest.fixture()
def public_project_three(self):
return ProjectFactory(title='Unique Test Title', description='three', is_public=True)
@pytest.fixture()
def user_one_private_project(self, user_one):
return ProjectFactory(
title='User One Private Project',
is_public=False,
creator=user_one)
@pytest.fixture()
def user_two_private_project(self, user_two):
return ProjectFactory(
title='User Two Private Project',
is_public=False,
creator=user_two)
@pytest.fixture()
def preprint(self, user_one):
return PreprintFactory(creator=user_one)
@pytest.fixture()
def folder(self):
return CollectionFactory()
@pytest.fixture()
def bookmark_collection(self, user_one):
return find_bookmark_collection(user_one)
@pytest.fixture()
def url(self):
return '/{}nodes/'.format(API_BASE)
def test_filtering(
self, app, user_one, public_project_one,
public_project_two, public_project_three,
user_one_private_project, user_two_private_project,
preprint):
# test_filtering_by_id
url = '/{}nodes/?filter[id]={}'.format(
API_BASE, public_project_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
ids = [each['id'] for each in res.json['data']]
assert public_project_one._id in ids
assert len(ids) == 1
# test_filtering_by_multiple_ids
url = '/{}nodes/?filter[id]={},{}'.format(
API_BASE, public_project_one._id, public_project_two._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
ids = [each['id'] for each in res.json['data']]
assert public_project_one._id in ids
assert public_project_two._id in ids
assert len(ids) == 2
# test_filtering_by_multiple_ids_one_private
url = '/{}nodes/?filter[id]={},{}'.format(
API_BASE, public_project_one._id, user_two_private_project._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
ids = [each['id'] for each in res.json['data']]
assert public_project_one._id in ids
assert user_two_private_project._id not in ids
assert len(ids) == 1
# test_filtering_by_multiple_ids_brackets_in_query_params
url = '/{}nodes/?filter[id]=[{}, {}]'.format(
API_BASE, public_project_one._id, public_project_two._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
ids = [each['id'] for each in res.json['data']]
assert public_project_one._id in ids
assert public_project_two._id in ids
assert len(ids) == 2
# test_filtering_on_title_not_equal
url = '/{}nodes/?filter[title][ne]=Public%20Project%20One'.format(
API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 4
titles = [each['attributes']['title'] for each in data]
assert public_project_one.title not in titles
assert public_project_two.title in titles
assert public_project_three.title in titles
assert user_one_private_project.title in titles
# test_filtering_on_description_not_equal
url = '/{}nodes/?filter[description][ne]=reason%20is%20shook'.format(
API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 5
descriptions = [each['attributes']['description'] for each in data]
assert public_project_one.description in descriptions
assert public_project_three.description in descriptions
assert user_one_private_project.description in descriptions
# test_filtering_on_preprint
url = '/{}nodes/?filter[preprint]=true'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
ids = [each['id'] for each in data]
preprints = Node.objects.filter(
preprint_file__isnull=False
).exclude(_is_preprint_orphan=True)
assert len(data) == len(preprints)
assert preprint.node._id in ids
assert public_project_one._id not in ids
assert public_project_two._id not in ids
assert public_project_three._id not in ids
# test_filtering_out_preprint
url = '/{}nodes/?filter[preprint]=false'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
ids = [each['id'] for each in data]
assert preprint.node._id not in ids
assert public_project_one._id in ids
assert public_project_two._id in ids
assert public_project_three._id in ids
def test_filtering_by_category(self, app, user_one):
project_one = ProjectFactory(creator=user_one, category='hypothesis')
project_two = ProjectFactory(creator=user_one, category='procedure')
url = '/{}nodes/?filter[category]=hypothesis'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert project_one._id in ids
assert project_two._id not in ids
def test_filtering_by_public(self, app, user_one):
public_project = ProjectFactory(creator=user_one, is_public=True)
private_project = ProjectFactory(creator=user_one, is_public=False)
url = '/{}nodes/?filter[public]=false'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
# No public projects returned
assert not any([each['attributes']['public'] for each in node_json])
ids = [each['id'] for each in node_json]
assert public_project._id not in ids
assert private_project._id in ids
url = '/{}nodes/?filter[public]=true'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
# No private projects returned
assert all([each['attributes']['public'] for each in node_json])
ids = [each['id'] for each in node_json]
assert private_project._id not in ids
assert public_project._id in ids
def test_filtering_by_public_toplevel(self, app, user_one):
public_project = ProjectFactory(creator=user_one, is_public=True)
private_project = ProjectFactory(creator=user_one, is_public=False)
url = '/{}nodes/?filter[public]=false&filter[parent]=null'.format(
API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
# No public projects returned
assert not any([each['attributes']['public'] for each in node_json])
ids = [each['id'] for each in node_json]
assert public_project._id not in ids
assert private_project._id in ids
url = '/{}nodes/?filter[public]=true&filter[parent]=null'.format(
API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
# No private projects returned
assert all([each['attributes']['public'] for each in node_json])
ids = [each['id'] for each in node_json]
assert private_project._id not in ids
assert public_project._id in ids
def test_filtering_tags(
self, app, public_project_one, public_project_two,
tag_one, tag_two):
url = '/{}nodes/?filter[tags]={}'.format(API_BASE, tag_one)
res = app.get(url, auth=public_project_one.creator.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert public_project_two._id in ids
# test_filter_two_tags
url = '/{}nodes/?filter[tags]={}&filter[tags]={}'.format(
API_BASE, tag_one, tag_two)
res = app.get(url, auth=public_project_one.creator.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert public_project_two._id not in ids
# test_filter_no_tags
project_no_tag = ProjectFactory(
title='Project No Tags', is_public=True)
url = '/{}nodes/?filter[tags]=null'.format(API_BASE)
res = app.get(url, auth=project_no_tag.creator.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id not in ids
assert public_project_two._id not in ids
assert project_no_tag._id in ids
def test_filtering_multiple_fields(self, app, user_one):
project_public_one = ProjectFactory(
is_public=True, title='test', creator=user_one)
project_private_one = ProjectFactory(
is_public=False, title='test', creator=user_one)
project_public_two = ProjectFactory(
is_public=True,
title='kitten',
creator=user_one,
description='test')
project_private_two = ProjectFactory(
is_public=False, title='kitten', creator=user_one)
project_public_three = ProjectFactory(
is_public=True, title='test', creator=user_one)
project_public_four = ProjectFactory(
is_public=True,
title='test',
creator=user_one,
description='test')
for project in [
project_public_one, project_public_two,
project_public_three, project_private_one,
project_private_two]:
project.created = '2016-10-25 00:00:00.000000+00:00'
project.save()
project_public_four.created = '2016-10-28 00:00:00.000000+00:00'
project_public_four.save()
expected = [
project_public_one._id,
project_public_two._id,
project_public_three._id]
url = '/{}nodes/?filter[public]=true&filter[title,description]=test&filter[date_created]=2016-10-25'.format(
API_BASE)
res = app.get(url, auth=user_one.auth)
actual = [node['id'] for node in res.json['data']]
assert len(expected) == len(actual)
assert set(expected) == set(actual)
def test_filtering_tags_exact(
self, app, user_one,
public_project_one,
public_project_two):
public_project_one.add_tag('logic', Auth(user_one))
public_project_two.add_tag('logic', Auth(user_one))
public_project_one.add_tag('reason', Auth(user_one))
res = app.get(
'/{}nodes/?filter[tags]=reason'.format(
API_BASE
),
auth=user_one.auth
)
assert len(res.json.get('data')) == 1
def test_filtering_tags_capitalized_query(
self, app, user_one, public_project_one):
public_project_one.add_tag('covfefe', Auth(user_one))
res = app.get(
'/{}nodes/?filter[tags]=COVFEFE'.format(
API_BASE
),
auth=user_one.auth
)
assert len(res.json.get('data')) == 1
def test_filtering_tags_capitalized_tag(
self, app, user_one, public_project_one):
public_project_one.add_tag('COVFEFE', Auth(user_one))
res = app.get(
'/{}nodes/?filter[tags]=covfefe'.format(
API_BASE
),
auth=user_one.auth
)
assert len(res.json.get('data')) == 1
def test_filtering_on_multiple_tags(
self, app, user_one, public_project_one):
public_project_one.add_tag('lovechild', Auth(user_one))
public_project_one.add_tag('flowerchild', Auth(user_one))
res = app.get(
'/{}nodes/?filter[tags]=lovechild&filter[tags]=flowerchild'.format(
API_BASE
),
auth=user_one.auth
)
assert len(res.json.get('data')) == 1
def test_filtering_on_multiple_tags_must_match_both(
self, app, user_one, public_project_one):
public_project_one.add_tag('lovechild', Auth(user_one))
res = app.get(
'/{}nodes/?filter[tags]=lovechild&filter[tags]=flowerchild'.format(
API_BASE
),
auth=user_one.auth
)
assert len(res.json.get('data')) == 0
def test_filtering_tags_returns_distinct(
self, app, user_one, public_project_one):
# regression test for returning multiple of the same file
public_project_one.add_tag('cat', Auth(user_one))
public_project_one.add_tag('cAt', Auth(user_one))
public_project_one.add_tag('caT', Auth(user_one))
public_project_one.add_tag('CAT', Auth(user_one))
res = app.get(
'/{}nodes/?filter[tags]=cat'.format(
API_BASE
),
auth=user_one.auth
)
assert len(res.json.get('data')) == 1
def test_filtering_contributors(
self, app, user_one, user_one_private_project,
preprint):
res = app.get(
'/{}nodes/?filter[contributors]={}'.format(
API_BASE, user_one._id
),
auth=user_one.auth
)
assert len(res.json.get('data')) == 2
def test_filtering_contributors_bad_id(self, app, user_one):
res = app.get(
'/{}nodes/?filter[contributors]=alovechilddresseduplikeaflowerchild'.format(
API_BASE
),
auth=user_one.auth
)
assert len(res.json.get('data')) == 0
def test_get_projects(
self, app, user_one, public_project_one,
public_project_two, public_project_three,
user_one_private_project, user_two_private_project,
folder, bookmark_collection, url):
# test_get_all_projects_with_no_filter_logged_in
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert public_project_two._id in ids
assert public_project_three._id in ids
assert user_one_private_project._id in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_get_all_projects_with_no_filter_not_logged_in
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert public_project_two._id in ids
assert public_project_three._id in ids
assert user_one_private_project._id not in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_get_one_project_with_exact_filter_logged_in
url = '/{}nodes/?filter[title]=Project%20One'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert public_project_two._id not in ids
assert public_project_three._id not in ids
assert user_one_private_project._id not in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_get_one_project_with_exact_filter_not_logged_in
url = '/{}nodes/?filter[title]=Project%20One'.format(API_BASE)
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert public_project_two._id not in ids
assert public_project_three._id not in ids
assert user_one_private_project._id not in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_get_some_projects_with_substring_logged_in
url = '/{}nodes/?filter[title]=Two'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id not in ids
assert public_project_two._id in ids
assert public_project_three._id not in ids
assert user_one_private_project._id not in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_get_some_projects_with_substring_not_logged_in
url = '/{}nodes/?filter[title]=Two'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id not in ids
assert public_project_two._id in ids
assert public_project_three._id not in ids
assert user_one_private_project._id not in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_get_only_public_or_my_projects_with_filter_logged_in
url = '/{}nodes/?filter[title]=Project'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert public_project_two._id in ids
assert public_project_three._id not in ids
assert user_one_private_project._id in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_get_only_public_projects_with_filter_not_logged_in
url = '/{}nodes/?filter[title]=Project'.format(API_BASE)
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert public_project_two._id in ids
assert public_project_three._id not in ids
assert user_one_private_project._id not in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_alternate_filtering_field_logged_in
url = '/{}nodes/?filter[description]=One%20or%20Two'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id not in ids
assert public_project_two._id in ids
assert public_project_three._id not in ids
assert user_one_private_project._id not in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
# test_alternate_filtering_field_not_logged_in
url = '/{}nodes/?filter[description]=reason'.format(API_BASE)
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id not in ids
assert public_project_three._id not in ids
assert user_one_private_project._id not in ids
assert user_two_private_project._id not in ids
assert folder._id not in ids
assert bookmark_collection._id not in ids
def test_incorrect_filtering_field_not_logged_in(self, app):
url = '/{}nodes/?filter[notafield]=bogus'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 400
errors = res.json['errors']
assert len(errors) == 1
assert errors[0]['detail'] == '\'notafield\' is not a valid field for this endpoint.'
def test_filtering_on_root(self, app, user_one):
root = ProjectFactory(is_public=True)
child = ProjectFactory(parent=root, is_public=True)
ProjectFactory(parent=root, is_public=True)
ProjectFactory(parent=child, is_public=True)
# create some unrelated projects
ProjectFactory(
title='A theory on why reason has a ridiculously large project',
is_public=True)
ProjectFactory(
title='How one intern changed thousands of lines within a codebase',
is_public=True)
url = '/{}nodes/?filter[root]={}'.format(API_BASE, root._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
root_nodes = AbstractNode.objects.filter(root__guids___id=root._id)
assert len(res.json['data']) == root_nodes.count()
def test_filtering_on_parent(self, app):
root = ProjectFactory(is_public=True)
parent_one = NodeFactory(parent=root, is_public=True)
parent_two = NodeFactory(is_public=True, parent=root)
child_one = NodeFactory(parent=parent_one, is_public=True)
child_two = NodeFactory(parent=parent_one, is_public=True)
url = '/{}nodes/?filter[parent]={}'.format(API_BASE, parent_one._id)
res = app.get(url)
assert res.status_code == 200
guids = [each['id'] for each in res.json['data']]
assert child_one._id in guids
assert child_two._id in guids
assert parent_one._id not in guids
assert parent_two._id not in guids
def test_filtering_on_null_parent(self, app):
# add some nodes TO be included
new_user = AuthUserFactory()
root = ProjectFactory(is_public=True)
root_two = ProjectFactory(is_public=True)
# Build up a some of nodes not to be included
child_one = ProjectFactory(parent=root, is_public=True)
child_two = ProjectFactory(parent=root, is_public=True)
grandchild = ProjectFactory(parent=child_one, is_public=True)
url = '/{}nodes/?filter[parent]=null'.format(API_BASE)
res = app.get(url, auth=new_user.auth)
assert res.status_code == 200
public_root_nodes = Node.objects.filter(is_public=True).get_roots()
assert len(res.json['data']) == public_root_nodes.count()
guids = [each['id'] for each in res.json['data']]
assert root._id in guids
assert root_two._id in guids
assert child_one._id not in guids
assert child_two._id not in guids
assert grandchild._id not in guids
def test_preprint_filter_excludes_orphans(
self, app, user_one, preprint, public_project_one,
public_project_two, public_project_three):
orphan = PreprintFactory(creator=preprint.node.creator)
orphan._is_preprint_orphan = True
orphan.save()
url = '/{}nodes/?filter[preprint]=true'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
ids = [each['id'] for each in data]
assert preprint.node._id in ids
assert orphan._id not in ids
assert public_project_one._id not in ids
assert public_project_two._id not in ids
assert public_project_three._id not in ids
def test_deleted_preprint_file_not_in_filtered_results(
self, app, user_one, preprint):
orphan = PreprintFactory(creator=preprint.node.creator)
# orphan the preprint by deleting the file
orphan.node.preprint_file = None
orphan.node.save()
url = '/{}nodes/?filter[preprint]=true'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
ids = [each['id'] for each in data]
assert preprint.node._id in ids
assert orphan.node._id not in ids
def test_deleted_preprint_file_in_preprint_false_filtered_results(
self, app, user_one, preprint):
orphan = PreprintFactory(creator=preprint.node.creator)
# orphan the preprint by deleting the file
orphan.node.preprint_file = None
orphan.node.save()
orphan.refresh_from_db()
url = '/{}nodes/?filter[preprint]=false'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
ids = [each['id'] for each in data]
assert preprint.node._id not in ids
assert orphan.node._id in ids
def test_unpublished_preprint_not_in_preprint_true_filter_results(
self, app, user_one, preprint):
unpublished = PreprintFactory(
creator=preprint.node.creator,
is_published=False)
assert not unpublished.is_published
url = '/{}nodes/?filter[preprint]=true'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
ids = [each['id'] for each in data]
assert preprint.node._id in ids
assert unpublished.node._id not in ids
def test_unpublished_preprint_in_preprint_false_filter_results(
self, app, user_one, preprint):
unpublished = PreprintFactory(
creator=preprint.node.creator,
is_published=False)
assert not unpublished.is_published
url = '/{}nodes/?filter[preprint]=false'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
data = res.json['data']
ids = [each['id'] for each in data]
assert preprint.node._id not in ids
assert unpublished.node._id in ids
def test_nodes_list_filter_multiple_field(
self, app, public_project_one, public_project_two,
public_project_three, user_one):
url = '/{}nodes/?filter[title,description]=One'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert public_project_one._id in ids
assert 'One' in public_project_one.title
assert public_project_two._id in ids
assert 'One' in public_project_two.description
assert public_project_three._id not in ids
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.enable_implicit_clean
class TestNodeCreate:
@pytest.fixture()
def institution_one(self):
return InstitutionFactory()
@pytest.fixture()
def user_one(self, institution_one):
auth_user = AuthUserFactory()
auth_user.affiliated_institutions.add(institution_one)
return auth_user
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def url(self):
return '/{}nodes/'.format(API_BASE)
@pytest.fixture()
def title(self):
return 'Rheisen is bored'
@pytest.fixture()
def description(self):
return 'Pytest conversions are tedious'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def region(self):
return RegionFactory(name='Frankfort', _id='eu-central-1')
@pytest.fixture()
def url_with_region_query_param(self, region, url):
return url + '?region={}'.format(region._id)
@pytest.fixture()
def public_project(self, title, description, category, institution_one):
return {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': category,
'public': True,
},
'relationships': {
'affiliated_institutions': {
'data': [
{
'type': 'institutions',
'id': institution_one._id,
}
]
}
},
}
}
@pytest.fixture()
def private_project(self, title, description, category):
return {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': category,
'public': False
}
}
}
def test_create_node_errors(
self, app, user_one, public_project,
private_project, url):
# test_node_create_invalid_data
res = app.post_json_api(
url, 'Incorrect data',
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
res = app.post_json_api(
url, ['Incorrect data'],
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
# test_creates_public_project_logged_out
res = app.post_json_api(url, public_project, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_creates_private_project_logged_out
res = app.post_json_api(url, private_project, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
def test_creates_public_project_logged_in(
self, app, user_one, public_project, url, institution_one):
res = app.post_json_api(
url, public_project,
expect_errors=True,
auth=user_one.auth)
assert res.status_code == 201
self_link = res.json['data']['links']['self']
assert res.json['data']['attributes']['title'] == public_project['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == public_project['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == public_project['data']['attributes']['category']
assert res.json['data']['relationships']['affiliated_institutions']['links']['self']['href'] == \
'{}relationships/institutions/'.format(self_link)
assert res.content_type == 'application/vnd.api+json'
pid = res.json['data']['id']
project = AbstractNode.load(pid)
assert project.logs.latest().action == NodeLog.AFFILIATED_INSTITUTION_ADDED
assert institution_one in project.affiliated_institutions.all()
def test_creates_private_project_logged_in_contributor(
self, app, user_one, private_project, url):
res = app.post_json_api(url, private_project, auth=user_one.auth)
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == private_project['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == private_project['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == private_project['data']['attributes']['category']
pid = res.json['data']['id']
project = AbstractNode.load(pid)
assert project.logs.latest().action == NodeLog.PROJECT_CREATED
def test_create_from_template_errors(self, app, user_one, user_two, url):
# test_404_on_create_from_template_of_nonexistent_project
template_from_id = 'thisisnotavalidguid'
templated_project_data = {
'data': {
'type': 'nodes',
'attributes':
{
'title': 'No title',
'category': 'project',
'template_from': template_from_id,
}
}
}
res = app.post_json_api(
url, templated_project_data,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 404
# test_403_on_create_from_template_of_unauthorized_project
template_from = ProjectFactory(creator=user_two, is_public=True)
templated_project_data = {
'data': {
'type': 'nodes',
'attributes':
{
'title': 'No permission',
'category': 'project',
'template_from': template_from._id,
}
}
}
res = app.post_json_api(
url, templated_project_data,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 403
def test_creates_project_from_template(self, app, user_one, category, url):
template_from = ProjectFactory(creator=user_one, is_public=True)
template_component = ProjectFactory(
creator=user_one, is_public=True, parent=template_from)
templated_project_title = 'Templated Project'
templated_project_data = {
'data': {
'type': 'nodes',
'attributes':
{
'title': templated_project_title,
'category': category,
'template_from': template_from._id,
}
}
}
res = app.post_json_api(
url, templated_project_data,
auth=user_one.auth)
assert res.status_code == 201
json_data = res.json['data']
new_project_id = json_data['id']
new_project = AbstractNode.load(new_project_id)
assert new_project.title == templated_project_title
assert new_project.description == ''
assert not new_project.is_public
assert len(new_project.nodes) == len(template_from.nodes)
assert new_project.nodes[0].title == template_component.title
def test_creates_project_creates_project_and_sanitizes_html(
self, app, user_one, category, url):
title = '<em>Cool</em> <strong>Project</strong>'
description = 'An <script>alert("even cooler")</script> project'
res = app.post_json_api(url, {
'data': {
'attributes': {
'title': title,
'description': description,
'category': category,
'public': True
},
'type': 'nodes'
}
}, auth=user_one.auth)
project_id = res.json['data']['id']
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
url = '/{}nodes/{}/'.format(API_BASE, project_id)
project = AbstractNode.load(project_id)
assert project.logs.latest().action == NodeLog.PROJECT_CREATED
res = app.get(url, auth=user_one.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == category
def test_create_component_inherit_contributors(
self, app, user_one, user_two, title, category):
parent_project = ProjectFactory(creator=user_one)
parent_project.add_contributor(
user_two, permissions=[permissions.READ], save=True)
url = '/{}nodes/{}/children/?inherit_contributors=true'.format(
API_BASE, parent_project._id)
component_data = {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'category': category,
}
}
}
res = app.post_json_api(url, component_data, auth=user_one.auth)
assert res.status_code == 201
json_data = res.json['data']
new_component_id = json_data['id']
new_component = AbstractNode.load(new_component_id)
assert len(new_component.contributors) == 2
assert len(
new_component.contributors
) == len(parent_project.contributors)
def test_create_component_with_tags(self, app, user_one, title, category):
parent_project = ProjectFactory(creator=user_one)
url = '/{}nodes/{}/children/'.format(API_BASE, parent_project._id)
component_data = {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'category': category,
'tags': ['test tag 1', 'test tag 2']
}
}
}
res = app.post_json_api(url, component_data, auth=user_one.auth)
assert res.status_code == 201
json_data = res.json['data']
new_component_id = json_data['id']
new_component = AbstractNode.load(new_component_id)
assert len(new_component.tags.all()) == 2
tag1, tag2 = new_component.tags.all()
assert tag1.name == 'test tag 1'
assert tag2.name == 'test tag 2'
def test_create_component_inherit_contributors_with_unregistered_contributor(
self, app, user_one, title, category):
parent_project = ProjectFactory(creator=user_one)
parent_project.add_unregistered_contributor(
fullname='far', email='foo@bar.baz',
permissions=[permissions.READ],
auth=Auth(user=user_one), save=True)
url = '/{}nodes/{}/children/?inherit_contributors=true'.format(
API_BASE, parent_project._id)
component_data = {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'category': category,
}
}
}
res = app.post_json_api(url, component_data, auth=user_one.auth)
assert res.status_code == 201
json_data = res.json['data']
new_component_id = json_data['id']
new_component = AbstractNode.load(new_component_id)
assert len(new_component.contributors) == 2
assert len(
new_component.contributors
) == len(parent_project.contributors)
def test_create_project_with_region_relationship(
self, app, user_one, region, institution_one, private_project, url):
private_project['data']['relationships'] = {
'region': {
'data': {
'type': 'region',
'id': region._id
}
}
}
res = app.post_json_api(
url, private_project, auth=user_one.auth
)
assert res.status_code == 201
region_id = res.json['data']['relationships']['region']['data']['id']
assert region_id == region._id
institution_two = InstitutionFactory()
user_one.affiliated_institutions.add(institution_two)
private_project['data']['relationships'] = {
'affiliated_institutions': {
'data': [
{
'type': 'institutions',
'id': institution_one._id
},
{
'type': 'institutions',
'id': institution_two._id
}
]
},
'region': {
'data': {
'type': 'region',
'id': region._id
}
}
}
res = app.post_json_api(
url, private_project, auth=user_one.auth
)
assert res.status_code == 201
region_id = res.json['data']['relationships']['region']['data']['id']
assert region_id == region._id
node_id = res.json['data']['id']
node = AbstractNode.load(node_id)
assert institution_one in node.affiliated_institutions.all()
assert institution_two in node.affiliated_institutions.all()
def test_create_project_with_region_query_param(
self, app, user_one, region, private_project, url_with_region_query_param):
res = app.post_json_api(
url_with_region_query_param, private_project, auth=user_one.auth
)
assert res.status_code == 201
pid = res.json['data']['id']
project = AbstractNode.load(pid)
node_settings = project.get_addon('osfstorage')
assert node_settings.region_id == region.id
def test_create_project_with_no_region_specified(self, app, user_one, private_project, url):
res = app.post_json_api(
url, private_project, auth=user_one.auth
)
assert res.status_code == 201
project = AbstractNode.load(res.json['data']['id'])
node_settings = project.get_addon('osfstorage')
# NodeSettings just left at default region on creation
assert node_settings.region_id == 1
def test_create_project_with_bad_region_query_param(
self, app, user_one, region, private_project, url):
bad_region_id = 'bad-region-1'
res = app.post_json_api(
url + '?region={}'.format(bad_region_id), private_project,
auth=user_one.auth, expect_errors=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Region {} is invalid.'.format(bad_region_id)
def test_create_project_errors(
self, app, user_one, title, description, category, url):
# test_creates_project_no_type
project = {
'data': {
'attributes': {
'title': title,
'description': description,
'category': category,
'public': False
}
}
}
res = app.post_json_api(
url, project, auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_creates_project_incorrect_type
project = {
'data': {
'attributes': {
'title': title,
'description': description,
'category': category,
'public': False
},
'type': 'Wrong type.'
}
}
res = app.post_json_api(
url, project, auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
# test_creates_project_properties_not_nested
project = {
'data': {
'title': title,
'description': description,
'category': category,
'public': False,
'type': 'nodes'
}
}
res = app.post_json_api(
url, project, auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field is required.'
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes/category'
# test_create_project_invalid_title
project = {
'data': {
'type': 'nodes',
'attributes': {
'title': 'A' * 201,
'description': description,
'category': category,
'public': False,
}
}
}
res = app.post_json_api(
url, project, auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Title cannot exceed 200 characters.'
@pytest.mark.django_db
class TestNodeBulkCreate:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def url(self):
return '/{}nodes/'.format(API_BASE)
@pytest.fixture()
def title(self):
return 'Rheisen is bored'
@pytest.fixture()
def description(self):
return 'Pytest conversions are tedious'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def public_project(self, title, description, category):
return {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': category,
'public': True
}
}
@pytest.fixture()
def private_project(self, title, description, category):
return {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': category,
'public': False
}
}
@pytest.fixture()
def empty_project(self):
return {
'type': 'nodes',
'attributes': {
'title': '',
'description': '',
'category': ''
}
}
def test_bulk_create(
self, app, user_one, public_project, private_project,
empty_project, title, category, url):
# test_bulk_create_nodes_blank_request
res = app.post_json_api(
url, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
# test_bulk_create_all_or_nothing
res = app.post_json_api(
url,
{'data': [public_project, empty_project]},
bulk=True, auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
# test_bulk_create_logged_out
res = app.post_json_api(
url,
{'data': [public_project, private_project]},
bulk=True, expect_errors=True)
assert res.status_code == 401
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
# test_bulk_create_error_formatting
res = app.post_json_api(
url,
{'data': [empty_project, empty_project]},
bulk=True, auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert len(res.json['errors']) == 2
errors = res.json['errors']
assert errors[0]['source'] == {'pointer': '/data/0/attributes/title'}
assert errors[1]['source'] == {'pointer': '/data/1/attributes/title'}
assert errors[0]['detail'] == 'This field may not be blank.'
assert errors[1]['detail'] == 'This field may not be blank.'
# test_bulk_create_limits
node_create_list = {'data': [public_project] * 101}
res = app.post_json_api(
url, node_create_list,
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
# test_bulk_create_no_type
payload = {
'data': [{
'attributes': {
'category': category,
'title': title
}
}]
}
res = app.post_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/0/type'
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
# test_bulk_create_incorrect_type
payload = {
'data': [
public_project, {
'type': 'Incorrect type.',
'attributes': {
'category': category,
'title': title
}
}
]
}
res = app.post_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
# test_bulk_create_no_attributes
payload = {'data': [public_project, {'type': 'nodes', }]}
res = app.post_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/1/attributes/category'
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
# test_bulk_create_no_title
payload = {
'data': [
public_project, {
'type': 'nodes',
'attributes': {
'category': category
}
}
]
}
res = app.post_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/1/attributes/title'
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
# test_ugly_payload
payload = 'sdf;jlasfd'
res = app.post_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 0
def test_bulk_create_logged_in(
self, app, user_one, public_project,
private_project, url):
res = app.post_json_api(
url,
{'data': [public_project, private_project]},
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 201
assert len(res.json['data']) == 2
assert res.json['data'][0]['attributes']['title'] == public_project['attributes']['title']
assert res.json['data'][0]['attributes']['category'] == public_project['attributes']['category']
assert res.json['data'][0]['attributes']['description'] == public_project['attributes']['description']
assert res.json['data'][1]['attributes']['title'] == private_project['attributes']['title']
assert res.json['data'][1]['attributes']['category'] == public_project['attributes']['category']
assert res.json['data'][1]['attributes']['description'] == public_project['attributes']['description']
assert res.content_type == 'application/vnd.api+json'
res = app.get(url, auth=user_one.auth)
assert len(res.json['data']) == 2
id_one = res.json['data'][0]['id']
id_two = res.json['data'][1]['id']
res = app.delete_json_api(
url,
{
'data': [
{'id': id_one, 'type': 'nodes'},
{'id': id_two, 'type': 'nodes'}
]
},
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 204
@pytest.mark.django_db
class TestNodeBulkUpdate:
@pytest.fixture()
def title(self):
return 'Rheisen is bored'
@pytest.fixture()
def new_title(self):
return 'Rheisen is very bored'
@pytest.fixture()
def description(self):
return 'Pytest conversions are tedious'
@pytest.fixture()
def new_description(self):
return 'Pytest conversions are death'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def new_category(self):
return 'project'
@pytest.fixture()
def public_project_one(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user)
@pytest.fixture()
def public_project_two(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user)
@pytest.fixture()
def public_payload(
self, public_project_one, public_project_two,
new_title, new_description, new_category):
return {
'data': [
{
'id': public_project_one._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': new_category,
'public': True
}
},
{
'id': public_project_two._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': new_category,
'public': True
}
}
]
}
@pytest.fixture()
def url(self):
return '/{}nodes/'.format(API_BASE)
@pytest.fixture()
def private_project_one(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user)
@pytest.fixture()
def private_project_two(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user)
@pytest.fixture()
def private_payload(
self, private_project_one, private_project_two,
new_title, new_description, new_category):
return {
'data': [
{
'id': private_project_one._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': new_category,
'public': False
}
},
{
'id': private_project_two._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': new_category,
'public': False
}
}
]
}
@pytest.fixture()
def empty_payload(self, public_project_one, public_project_two):
return {
'data': [
{
'id': public_project_one._id,
'type': 'nodes',
'attributes': {
'title': '',
'description': '',
'category': ''
}
},
{
'id': public_project_two._id,
'type': 'nodes',
'attributes': {
'title': '',
'description': '',
'category': ''
}
}
]
}
def test_bulk_update_errors(
self, app, user, public_project_one,
public_project_two, private_project_one,
private_project_two, public_payload,
private_payload, empty_payload, title,
new_title, new_category, url):
# test_bulk_update_nodes_blank_request
res = app.put_json_api(
url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
# test_bulk_update_blank_but_not_empty_title
payload = {
'data': [
{
'id': public_project_one._id,
'type': 'nodes',
'attributes': {
'title': 'This shouldn\'t update.',
'category': 'instrumentation'
}
},
{
'id': public_project_two._id,
'type': 'nodes',
'attributes': {
'title': '',
'category': 'hypothesis'
}
}
]
}
public_project_one_url = '/{}nodes/{}/'.format(
API_BASE, public_project_one._id)
res = app.put_json_api(
url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
res = app.get(public_project_one_url)
assert res.json['data']['attributes']['title'] == title
# test_bulk_update_public_projects_one_not_found
payload = {'data': [
{
'id': '12345',
'type': 'nodes',
'attributes': {
'title': new_title,
'category': new_category
}
}, public_payload['data'][0]
]}
res = app.put_json_api(
url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Could not find all objects to update.'
public_project_one_url = '/{}nodes/{}/'.format(
API_BASE, public_project_one._id)
res = app.get(public_project_one_url)
assert res.json['data']['attributes']['title'] == title
# test_bulk_update_public_projects_logged_out
res = app.put_json_api(
url, public_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
public_project_one_url = '/{}nodes/{}/'.format(
API_BASE, public_project_one._id)
public_project_two_url = '/{}nodes/{}/'.format(
API_BASE, public_project_two._id)
res = app.get(public_project_one_url)
assert res.json['data']['attributes']['title'] == title
res = app.get(public_project_two_url)
assert res.json['data']['attributes']['title'] == title
# test_bulk_update_private_projects_logged_out
res = app.put_json_api(
url, private_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
private_project_one_url = '/{}nodes/{}/'.format(
API_BASE, private_project_one._id)
private_project_two_url = '/{}nodes/{}/'.format(
API_BASE, private_project_two._id)
res = app.get(private_project_one_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
res = app.get(private_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
# test_bulk_update_private_projects_logged_in_non_contrib
non_contrib = AuthUserFactory()
res = app.put_json_api(
url, private_payload,
auth=non_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
private_project_one_url = '/{}nodes/{}/'.format(
API_BASE, private_project_one._id)
private_project_two_url = '/{}nodes/{}/'.format(
API_BASE, private_project_two._id)
res = app.get(private_project_one_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
res = app.get(private_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
# test_bulk_update_projects_send_dictionary_not_list
res = app.put_json_api(
url,
{'data': {
'id': public_project_one._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'category': 'project'
}
}},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
# test_bulk_update_error_formatting
res = app.put_json_api(
url, empty_payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 2
errors = res.json['errors']
assert errors[0]['source'] == {'pointer': '/data/0/attributes/title'}
assert errors[1]['source'] == {'pointer': '/data/1/attributes/title'}
assert errors[0]['detail'] == 'This field may not be blank.'
assert errors[1]['detail'] == 'This field may not be blank.'
# test_bulk_update_id_not_supplied
res = app.put_json_api(
url,
{'data': [
public_payload['data'][1],
{
'type': 'nodes',
'attributes': {
'title': new_title,
'category': new_category
}
}
]},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 1
assert res.json['errors'][0]['source']['pointer'] == '/data/1/id'
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
public_project_two_url = '/{}nodes/{}/'.format(
API_BASE, public_project_two._id)
res = app.get(public_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
# test_bulk_update_type_not_supplied
res = app.put_json_api(
url,
{'data': [
public_payload['data'][1],
{
'id': public_project_one._id,
'attributes': {
'title': new_title,
'category': new_category
}
}
]},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 1
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
public_project_two_url = '/{}nodes/{}/'.format(
API_BASE, public_project_two._id)
res = app.get(public_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
# test_bulk_update_incorrect_type
res = app.put_json_api(
url,
{
'data': [
public_payload['data'][1],
{
'id': public_project_one._id,
'type': 'Incorrect',
'attributes': {
'title': new_title,
'category': new_category
}
}
]
},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
public_project_two_url = '/{}nodes/{}/'.format(
API_BASE, public_project_two._id)
res = app.get(public_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
# test_bulk_update_limits
node_update_list = {'data': [public_payload['data'][0]] * 101}
res = app.put_json_api(
url, node_update_list, auth=user.auth,
expect_errors=True, bulk=True)
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# test_bulk_update_no_title_or_category
new_payload = {
'id': public_project_one._id,
'type': 'nodes',
'attributes': {}}
res = app.put_json_api(
url,
{'data': [
public_payload['data'][1],
new_payload
]},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
public_project_two_url = '/{}nodes/{}/'.format(
API_BASE, public_project_two._id)
res = app.get(public_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
def test_bulk_update_private_projects_logged_in_read_only_contrib(
self, app, user, private_project_one, private_project_two,
title, private_payload, url):
read_contrib = AuthUserFactory()
private_project_one.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
private_project_two.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
res = app.put_json_api(
url, private_payload,
auth=read_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
private_project_one_url = '/{}nodes/{}/'.format(
API_BASE, private_project_one._id)
private_project_two_url = '/{}nodes/{}/'.format(
API_BASE, private_project_two._id)
res = app.get(private_project_one_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
res = app.get(private_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
def test_bulk_update_public_projects_logged_in(
self, app, user, public_project_one,
public_project_two, public_payload,
new_title, url):
res = app.put_json_api(url, public_payload, auth=user.auth, bulk=True)
assert res.status_code == 200
assert ({public_project_one._id, public_project_two._id} ==
{res.json['data'][0]['id'], res.json['data'][1]['id']})
assert res.json['data'][0]['attributes']['title'] == new_title
assert res.json['data'][1]['attributes']['title'] == new_title
def test_bulk_update_with_tags(self, app, user, public_project_one, url):
new_payload = {
'data': [{
'id': public_project_one._id,
'type': 'nodes',
'attributes': {
'title': 'New title',
'category': 'project',
'tags': ['new tag']
}
}]
}
res = app.put_json_api(
url, new_payload,
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 200
assert res.json['data'][0]['attributes']['tags'] == ['new tag']
def test_bulk_update_private_projects_logged_in_contrib(
self, app, user, private_project_one,
private_project_two, private_payload,
new_title, url):
res = app.put_json_api(url, private_payload, auth=user.auth, bulk=True)
assert res.status_code == 200
assert ({private_project_one._id, private_project_two._id} == {
res.json['data'][0]['id'], res.json['data'][1]['id']})
assert res.json['data'][0]['attributes']['title'] == new_title
assert res.json['data'][1]['attributes']['title'] == new_title
@pytest.mark.django_db
class TestNodeBulkPartialUpdate:
@pytest.fixture()
def title(self):
return 'Rachel is great'
@pytest.fixture()
def new_title(self):
return 'Rachel is awesome'
@pytest.fixture()
def description(self):
return 'Such a cool person'
@pytest.fixture()
def new_description(self):
return 'Such an amazing person'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def new_category(self):
return 'project'
@pytest.fixture()
def public_project_one(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user)
@pytest.fixture()
def public_project_two(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user)
@pytest.fixture()
def public_payload(
self, public_project_one, public_project_two, new_title):
return {
'data': [
{
'id': public_project_one._id,
'type': 'nodes',
'attributes': {
'title': new_title,
}
},
{
'id': public_project_two._id,
'type': 'nodes',
'attributes': {
'title': new_title,
}
}
]
}
@pytest.fixture()
def url(self):
return '/{}nodes/'.format(API_BASE)
@pytest.fixture()
def private_project_one(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user)
@pytest.fixture()
def private_project_two(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user)
@pytest.fixture()
def private_payload(
self, private_project_one, private_project_two, new_title):
return {
'data': [
{
'id': private_project_one._id,
'type': 'nodes',
'attributes': {
'title': new_title
}
},
{
'id': private_project_two._id,
'type': 'nodes',
'attributes': {
'title': new_title
}
}
]
}
@pytest.fixture()
def empty_payload(self, public_project_one, public_project_two):
return {
'data': [
{
'id': public_project_one._id,
'type': 'nodes',
'attributes': {
'title': ''
}
},
{
'id': public_project_two._id,
'type': 'nodes',
'attributes': {
'title': ''
}
}
]
}
def test_bulk_partial_update_errors(
self, app, user, public_project_one,
public_project_two, private_project_one,
private_project_two, title, new_title,
public_payload, private_payload,
empty_payload, url):
# test_bulk_patch_nodes_blank_request
res = app.patch_json_api(
url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
# test_bulk_partial_update_public_projects_one_not_found
payload = {'data': [
{
'id': '12345',
'type': 'nodes',
'attributes': {
'title': new_title
}
},
public_payload['data'][0]
]}
res = app.patch_json_api(
url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Could not find all objects to update.'
public_project_one_url = '/{}nodes/{}/'.format(
API_BASE, public_project_one._id)
res = app.get(public_project_one_url)
assert res.json['data']['attributes']['title'] == title
# test_bulk_partial_update_public_projects_logged_out
res = app.patch_json_api(
url, public_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
public_project_one_url = '/{}nodes/{}/'.format(
API_BASE, public_project_one._id)
public_project_two_url = '/{}nodes/{}/'.format(
API_BASE, public_project_two._id)
res = app.get(public_project_one_url)
assert res.json['data']['attributes']['title'] == title
res = app.get(public_project_two_url)
assert res.json['data']['attributes']['title'] == title
# test_bulk_partial_update_private_projects_logged_out
res = app.patch_json_api(
url, private_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
private_project_one_url = '/{}nodes/{}/'.format(
API_BASE, private_project_one._id)
private_project_two_url = '/{}nodes/{}/'.format(
API_BASE, private_project_two._id)
res = app.get(private_project_one_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
res = app.get(private_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
# test_bulk_partial_update_private_projects_logged_in_non_contrib
non_contrib = AuthUserFactory()
res = app.patch_json_api(
url, private_payload,
auth=non_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
private_project_one_url = '/{}nodes/{}/'.format(
API_BASE, private_project_one._id)
private_project_two_url = '/{}nodes/{}/'.format(
API_BASE, private_project_two._id)
res = app.get(private_project_one_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
res = app.get(private_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
# test_bulk_partial_update_projects_send_dictionary_not_list
res = app.patch_json_api(
url,
{'data': {
'id': public_project_one._id,
'attributes': {
'title': new_title,
'category': 'project'
}
}},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
# test_bulk_partial_update_error_formatting
res = app.patch_json_api(
url, empty_payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 2
errors = res.json['errors']
assert errors[0]['source'] == {'pointer': '/data/0/attributes/title'}
assert errors[1]['source'] == {'pointer': '/data/1/attributes/title'}
assert errors[0]['detail'] == 'This field may not be blank.'
assert errors[1]['detail'] == 'This field may not be blank.'
# test_bulk_partial_update_id_not_supplied
res = app.patch_json_api(
url,
{
'data': [{
'type': 'nodes',
'attributes': {'title': new_title}
}]
}, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 1
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
# test_bulk_partial_update_limits
node_update_list = {'data': [public_payload['data'][0]] * 101}
res = app.patch_json_api(
url, node_update_list, auth=user.auth,
expect_errors=True, bulk=True)
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
def test_bulk_partial_update_public_projects_logged_in(
self, app, user, public_project_one, public_project_two,
new_title, public_payload, url):
res = app.patch_json_api(
url, public_payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 200
assert ({public_project_one._id, public_project_two._id} ==
{res.json['data'][0]['id'], res.json['data'][1]['id']})
assert res.json['data'][0]['attributes']['title'] == new_title
assert res.json['data'][1]['attributes']['title'] == new_title
def test_bulk_partial_update_private_projects_logged_in_contrib(
self, app, user, private_project_one, private_project_two,
new_title, private_payload, url):
res = app.patch_json_api(
url, private_payload, auth=user.auth, bulk=True)
assert res.status_code == 200
assert ({private_project_one._id, private_project_two._id} ==
{res.json['data'][0]['id'], res.json['data'][1]['id']})
assert res.json['data'][0]['attributes']['title'] == new_title
assert res.json['data'][1]['attributes']['title'] == new_title
def test_bulk_partial_update_private_projects_logged_in_read_only_contrib(
self, app, user, private_project_one, private_project_two,
title, private_payload, url):
read_contrib = AuthUserFactory()
private_project_one.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
private_project_two.add_contributor(
read_contrib, permissions=[permissions.READ], save=True)
res = app.patch_json_api(
url, private_payload, auth=read_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
private_project_one_url = '/{}nodes/{}/'.format(
API_BASE, private_project_one._id)
private_project_two_url = '/{}nodes/{}/'.format(
API_BASE, private_project_two._id)
res = app.get(private_project_one_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
res = app.get(private_project_two_url, auth=user.auth)
assert res.json['data']['attributes']['title'] == title
def test_bulk_partial_update_privacy_has_no_effect_on_tags(
self, app, user, public_project_one, url):
public_project_one.add_tag('tag1', Auth(
public_project_one.creator), save=True)
payload = {
'id': public_project_one._id,
'type': 'nodes',
'attributes': {
'public': False}}
res = app.patch_json_api(
url, {'data': [payload]},
auth=user.auth, bulk=True)
assert res.status_code == 200
public_project_one.reload()
assert list(
public_project_one.tags.values_list('name', flat=True)
) == ['tag1']
assert public_project_one.is_public is False
@pytest.mark.django_db
class TestNodeBulkUpdateSkipUneditable:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def title(self):
return 'A painting of reason'
@pytest.fixture()
def new_title(self):
return 'A reason for painting'
@pytest.fixture()
def description(self):
return 'Truly a masterful work of reasoning'
@pytest.fixture()
def new_description(self):
return 'An insight into the reason for art'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def new_category(self):
return 'project'
@pytest.fixture()
def user_one_public_project_one(
self, user_one, title,
description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user_one)
@pytest.fixture()
def user_one_public_project_two(
self, user_one, title,
description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user_one)
@pytest.fixture()
def user_two_public_project_one(
self, user_two, title,
description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user_two)
@pytest.fixture()
def user_two_public_project_two(
self, user_two, title,
description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user_two)
@pytest.fixture()
def public_payload(
self, user_one_public_project_one,
user_one_public_project_two,
user_two_public_project_one,
user_two_public_project_two,
new_title, new_description,
new_category):
return {
'data': [
{
'id': user_one_public_project_one._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': new_category,
'public': True
}
},
{
'id': user_one_public_project_two._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': new_category,
'public': True
}
},
{
'id': user_two_public_project_one._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': new_category,
'public': True
}
},
{
'id': user_two_public_project_two._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': new_category,
'public': True
}
}
]
}
@pytest.fixture()
def url(self):
return '/{}nodes/?skip_uneditable=True'.format(API_BASE)
def test_bulk_update_skips(
self, app, user_one,
user_one_public_project_one,
user_one_public_project_two,
user_two_public_project_one,
user_two_public_project_two,
title, public_payload):
# test_skip_uneditable_bulk_update_query_param_required
nodes_url = '/{}nodes/'.format(API_BASE)
res = app.put_json_api(
nodes_url, public_payload,
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
user_one_public_project_one.reload()
user_one_public_project_two.reload()
user_two_public_project_one.reload()
user_two_public_project_two.reload()
assert user_one_public_project_one.title == title
assert user_one_public_project_two.title == title
assert user_two_public_project_one.title == title
assert user_two_public_project_two.title == title
# test_skip_uneditable_equals_false_bulk_update
skip_uneditable_url = '/{}nodes/?skip_uneditable=False'.format(
API_BASE)
res = app.put_json_api(
skip_uneditable_url,
public_payload,
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
user_one_public_project_one.reload()
user_one_public_project_two.reload()
user_two_public_project_one.reload()
user_two_public_project_two.reload()
assert user_one_public_project_one.title == title
assert user_one_public_project_two.title == title
assert user_two_public_project_one.title == title
assert user_two_public_project_two.title == title
# test_skip_uneditable_bulk_partial_update_query_param_required
url = '/{}nodes/'.format(API_BASE)
res = app.patch_json_api(
url, public_payload,
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
user_one_public_project_one.reload()
user_one_public_project_two.reload()
user_two_public_project_one.reload()
user_two_public_project_two.reload()
assert user_one_public_project_one.title == title
assert user_one_public_project_two.title == title
assert user_two_public_project_one.title == title
assert user_two_public_project_two.title == title
def test_skip_uneditable_bulk_update(
self, app, user_one,
user_one_public_project_one,
user_one_public_project_two,
user_two_public_project_one,
user_two_public_project_two,
title, new_title, public_payload, url):
res = app.put_json_api(
url, public_payload,
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 200
edited = res.json['data']
skipped = res.json['errors']
assert_items_equal(
[edited[0]['id'], edited[1]['id']],
[user_one_public_project_one._id,
user_one_public_project_two._id]
)
assert_items_equal(
[skipped[0]['_id'], skipped[1]['_id']],
[user_two_public_project_one._id,
user_two_public_project_two._id]
)
user_one_public_project_one.reload()
user_one_public_project_two.reload()
user_two_public_project_one.reload()
user_two_public_project_two.reload()
assert user_one_public_project_one.title == new_title
assert user_one_public_project_two.title == new_title
assert user_two_public_project_one.title == title
assert user_two_public_project_two.title == title
def test_skip_uneditable_bulk_partial_update(
self, app, user_one,
user_one_public_project_one,
user_one_public_project_two,
user_two_public_project_one,
user_two_public_project_two,
title, new_title, public_payload, url):
res = app.patch_json_api(
url, public_payload,
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 200
edited = res.json['data']
skipped = res.json['errors']
assert_items_equal(
[edited[0]['id'], edited[1]['id']],
[user_one_public_project_one._id,
user_one_public_project_two._id]
)
assert_items_equal(
[skipped[0]['_id'], skipped[1]['_id']],
[user_two_public_project_one._id,
user_two_public_project_two._id]
)
user_one_public_project_one.reload()
user_one_public_project_two.reload()
user_two_public_project_one.reload()
user_two_public_project_two.reload()
assert user_one_public_project_one.title == new_title
assert user_one_public_project_two.title == new_title
assert user_two_public_project_one.title == title
assert user_two_public_project_two.title == title
@pytest.mark.django_db
class TestNodeBulkDelete:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def public_project_one(self, user_one):
return ProjectFactory(
title='Project One',
is_public=True,
creator=user_one,
category='project')
@pytest.fixture()
def public_project_two(self, user_one):
return ProjectFactory(
title='Project Two',
description='One Three',
is_public=True,
creator=user_one)
@pytest.fixture()
def public_project_parent(self, user_one):
return ProjectFactory(
title='Project with Component',
description='Project with component',
is_public=True,
creator=user_one)
@pytest.fixture()
def public_component(self, user_one, public_project_parent):
return NodeFactory(parent=public_project_parent, creator=user_one)
@pytest.fixture()
def user_one_private_project(self, user_one):
return ProjectFactory(
title='User One Private Project',
is_public=False,
creator=user_one)
@pytest.fixture()
def user_two_private_project(self, user_two):
return ProjectFactory(
title='User Two Private Project',
is_public=False,
creator=user_two)
@pytest.fixture()
def url(self):
return '/{}nodes/'.format(API_BASE)
@pytest.fixture()
def public_project_one_url(self, public_project_one):
return '/{}nodes/{}/'.format(API_BASE, public_project_one._id)
@pytest.fixture()
def public_project_two_url(self, public_project_two):
return '/{}nodes/{}/'.format(API_BASE, public_project_two._id)
@pytest.fixture()
def user_one_private_project_url(self, user_one_private_project):
return '/{}nodes/{}/'.format(API_BASE, user_one_private_project._id)
@pytest.fixture()
def public_payload(self, public_project_one, public_project_two):
return {
'data': [
{
'id': public_project_one._id,
'type': 'nodes'
},
{
'id': public_project_two._id,
'type': 'nodes'
}
]
}
@pytest.fixture()
def public_query_params(self, public_project_one, public_project_two):
return 'id={},{}'.format(
public_project_one._id,
public_project_two._id)
@pytest.fixture()
def type_query_param(self):
return 'type=nodes'
@pytest.fixture()
def private_payload(self, user_one_private_project):
return {
'data': [
{
'id': user_one_private_project._id,
'type': 'nodes'
}
]
}
@pytest.fixture()
def private_query_params(self, user_one_private_project):
return 'id={}'.format(user_one_private_project._id)
def test_bulk_delete_errors(
self, app, user_one, public_project_one,
public_project_two, user_one_private_project,
public_payload, private_payload,
type_query_param, public_query_params, url):
# test_bulk_delete_with_query_params_and_payload
res_url = '{}?{}&{}'.format(url, type_query_param, public_query_params)
res = app.delete_json_api(
res_url, public_payload,
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == u'A bulk DELETE can only have a body or query parameters, not both.'
# test_bulk_delete_with_query_params_no_type
res_url = '{}?{}'.format(url, public_query_params)
res = app.delete_json_api(
res_url, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == u'Type query parameter is also required for a bulk DELETE using query parameters.'
# test_bulk_delete_with_query_params_wrong_type
res_url = '{}?{}&{}'.format(
url, public_query_params, 'type=node_not_nodes')
res = app.delete_json_api(
res_url, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == u'Type needs to match type expected at this endpoint.'
# test_bulk_delete_nodes_blank_request
res = app.delete_json_api(
url, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
# test_bulk_delete_no_type
payload = {'data': [
{'id': public_project_one._id},
{'id': public_project_two._id}
]}
res = app.delete_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /type.'
# test_bulk_delete_no_id
payload = {'data': [
{'type': 'nodes'},
{'id': 'nodes'}
]}
res = app.delete_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data/id.'
# test_bulk_delete_dict_inside_data
res = app.delete_json_api(
url,
{'data': {
'id': public_project_one._id,
'type': 'nodes'
}},
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
# test_bulk_delete_invalid_type
res = app.delete_json_api(
url,
{'data': [{
'type': 'Wrong type',
'id': public_project_one._id
}]},
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
# test_bulk_delete_private_projects_logged_out
res = app.delete_json_api(
url, private_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_bulk_delete_limits
new_payload = {
'data': [{
'id': user_one_private_project._id,
'type': 'nodes'
}] * 101
}
res = app.delete_json_api(
url, new_payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# test_bulk_delete_no_payload
res = app.delete_json_api(
url, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
def test_bulk_delete_with_query_params(
self, app, user_one, url,
type_query_param, public_query_params):
url = '{}?{}&{}'.format(url, type_query_param, public_query_params)
res = app.delete_json_api(url, auth=user_one.auth, bulk=True)
assert res.status_code == 204
def test_bulk_delete_public_projects_logged_in(
self, app, user_one,
public_project_one,
public_project_two,
public_payload,
url, public_project_one_url):
res = app.delete_json_api(
url, public_payload,
auth=user_one.auth, bulk=True)
assert res.status_code == 204
res = app.get(
public_project_one_url,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 410
public_project_one.reload()
public_project_two.reload()
def test_bulk_delete_public_projects_logged_out(
self, app, user_one, public_payload,
url, public_project_one_url,
public_project_two_url):
res = app.delete_json_api(
url, public_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
res = app.get(
public_project_one_url,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 200
res = app.get(
public_project_two_url,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 200
def test_bulk_delete_private_projects_logged_in_contributor(
self, app, user_one,
user_one_private_project,
private_payload, url,
user_one_private_project_url):
res = app.delete_json_api(
url, private_payload,
auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 204
res = app.get(
user_one_private_project_url,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 410
user_one_private_project.reload()
def test_bulk_delete_private_projects_logged_in_non_contributor(
self, app, user_one, user_two,
private_payload,
url, user_one_private_project_url):
res = app.delete_json_api(
url, private_payload,
auth=user_two.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
res = app.get(user_one_private_project_url, auth=user_one.auth)
assert res.status_code == 200
def test_bulk_delete_private_projects_logged_in_read_only_contributor(
self, app, user_one, user_two,
user_one_private_project,
private_payload, url,
user_one_private_project_url):
user_one_private_project.add_contributor(
user_two, permissions=[permissions.READ], save=True)
res = app.delete_json_api(
url, private_payload,
auth=user_two.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
res = app.get(user_one_private_project_url, auth=user_one.auth)
assert res.status_code == 200
def test_bulk_delete_all_or_nothing(
self, app, user_one, user_two,
user_one_private_project,
user_two_private_project, url,
user_one_private_project_url):
new_payload = {'data': [
{'id': user_one_private_project._id, 'type': 'nodes'},
{'id': user_two_private_project._id, 'type': 'nodes'}
]}
res = app.delete_json_api(
url, new_payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
res = app.get(user_one_private_project_url, auth=user_one.auth)
assert res.status_code == 200
url = '/{}nodes/{}/'.format(API_BASE, user_two_private_project._id)
res = app.get(url, auth=user_two.auth)
assert res.status_code == 200
def test_bulk_delete_invalid_payload_one_not_found(
self, app, user_one, public_payload, public_project_one_url, url):
new_payload = {
'data': [
public_payload['data'][0], {
'id': '12345', 'type': 'nodes'}
]
}
res = app.delete_json_api(
url, new_payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Could not find all objects to delete.'
res = app.get(public_project_one_url, auth=user_one.auth)
assert res.status_code == 200
def test_bulk_delete_project_with_component(
self, app, user_one,
public_project_parent,
public_project_one,
public_component, url):
new_payload = {'data': [
{'id': public_project_parent._id, 'type': 'nodes'},
{'id': public_project_one._id, 'type': 'nodes'}
]}
res = app.delete_json_api(
url, new_payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
new_payload = {'data': [
{'id': public_project_parent._id, 'type': 'nodes'},
{'id': public_component._id, 'type': 'nodes'}
]}
res = app.delete_json_api(
url, new_payload, auth=user_one.auth, bulk=True)
assert res.status_code == 204
# Regression test for PLAT-859
def test_bulk_delete_project_with_already_deleted_component(
self, app, user_one,
public_project_parent,
public_project_one,
public_component, url):
public_component.is_deleted = True
public_component.save()
new_payload = {'data': [
{'id': public_project_parent._id, 'type': 'nodes'},
{'id': public_project_one._id, 'type': 'nodes'}
]}
res = app.delete_json_api(
url, new_payload, auth=user_one.auth, bulk=True)
assert res.status_code == 204
# Regression test for PLAT-889
def test_bulk_delete_project_with_linked_node(
self, app, user_one,
public_project_parent,
public_component, url):
node_link = NodeFactory(is_public=True, creator=user_one)
public_project_parent.add_pointer(node_link, auth=Auth(user_one))
new_payload = {'data': [
{'id': public_project_parent._id, 'type': 'nodes'},
{'id': public_component._id, 'type': 'nodes'}
]}
res = app.delete_json_api(
url, new_payload, auth=user_one.auth, bulk=True)
assert res.status_code == 204
@pytest.mark.django_db
class TestNodeBulkDeleteSkipUneditable:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def public_project_one(self, user_one):
return ProjectFactory(
title='Project One',
is_public=True,
creator=user_one)
@pytest.fixture()
def public_project_two(self, user_one):
return ProjectFactory(
title='Project Two',
is_public=True,
creator=user_one)
@pytest.fixture()
def public_project_three(self, user_two):
return ProjectFactory(
title='Project Three',
is_public=True,
creator=user_two)
@pytest.fixture()
def public_project_four(self, user_two):
return ProjectFactory(
title='Project Four',
is_public=True,
creator=user_two)
@pytest.fixture()
def payload(
self, public_project_one,
public_project_two,
public_project_three,
public_project_four):
return {
'data': [
{
'id': public_project_one._id,
'type': 'nodes',
},
{
'id': public_project_two._id,
'type': 'nodes',
},
{
'id': public_project_three._id,
'type': 'nodes',
},
{
'id': public_project_four._id,
'type': 'nodes',
}
]
}
@pytest.fixture()
def url(self):
return '/{}nodes/?skip_uneditable=True'.format(API_BASE)
def test_skip_uneditable_bulk_delete(
self, app, user_one,
public_project_three,
public_project_four,
payload, url):
res = app.delete_json_api(url, payload, auth=user_one.auth, bulk=True)
assert res.status_code == 200
skipped = res.json['errors']
assert_items_equal(
[skipped[0]['id'], skipped[1]['id']],
[public_project_three._id, public_project_four._id]
)
res = app.get('/{}nodes/'.format(API_BASE), auth=user_one.auth)
assert_items_equal(
[res.json['data'][0]['id'], res.json['data'][1]['id']],
[public_project_three._id, public_project_four._id]
)
def test_skip_uneditable_bulk_delete_query_param_required(
self, app, user_one, payload):
url = '/{}nodes/'.format(API_BASE)
res = app.delete_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
res = app.get('/{}nodes/'.format(API_BASE), auth=user_one.auth)
assert res.status_code == 200
assert len(res.json['data']) == 4
def test_skip_uneditable_has_admin_permission_for_all_nodes(
self, app, user_one, public_project_one, public_project_two, url):
payload = {
'data': [
{
'id': public_project_one._id,
'type': 'nodes',
},
{
'id': public_project_two._id,
'type': 'nodes',
}
]
}
res = app.delete_json_api(url, payload, auth=user_one.auth, bulk=True)
assert res.status_code == 204
public_project_one.reload()
public_project_two.reload()
assert public_project_one.is_deleted is True
assert public_project_two.is_deleted is True
def test_skip_uneditable_does_not_have_admin_permission_for_any_nodes(
self, app, user_one, public_project_three, public_project_four, url):
payload = {
'data': [
{
'id': public_project_three._id,
'type': 'nodes',
},
{
'id': public_project_four._id,
'type': 'nodes',
}
]
}
res = app.delete_json_api(
url, payload, auth=user_one.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestNodeListPagination:
@pytest.fixture()
def users(self):
return [UserFactory() for _ in range(11)]
@pytest.fixture()
def projects(self, users):
return [
ProjectFactory(
is_public=True, creator=users[0]
) for _ in range(11)
]
@pytest.fixture()
def url(self, users):
return '/{}nodes/'.format(API_BASE)
def test_default_pagination_size(self, app, users, projects, url):
res = app.get(url, auth=Auth(users[0]))
pids = [e['id'] for e in res.json['data']]
for project in projects[1:]:
assert project._id in pids
assert projects[0]._id not in pids
assert res.json['links']['meta']['per_page'] == 10
def test_max_page_size_enforced(self, app, users, projects, url):
res_url = '{}?page[size]={}'.format(url, MAX_PAGE_SIZE + 1)
res = app.get(res_url, auth=Auth(users[0]))
pids = [e['id'] for e in res.json['data']]
for project in projects:
assert project._id in pids
assert res.json['links']['meta']['per_page'] == MAX_PAGE_SIZE
def test_embed_page_size_not_affected(self, app, users, projects, url):
for user in users[1:]:
projects[-1].add_contributor(user, auth=Auth(users[0]), save=True)
res_url = '{}?page[size]={}&embed=contributors'.format(
url, MAX_PAGE_SIZE + 1)
res = app.get(res_url, auth=Auth(users[0]))
pids = [e['id'] for e in res.json['data']]
for project in projects:
assert project._id in pids
assert res.json['links']['meta']['per_page'] == MAX_PAGE_SIZE
uids = [
e['id'] for e in res.json['data'][0]['embeds']['contributors']['data']
]
for user in users[:9]:
contrib_id = '{}-{}'.format(res.json['data'][0]['id'], user._id)
assert contrib_id in uids
assert '{}-{}'.format(
res.json['data'][0]['id'], users[10]._id
) not in uids
assert res.json['data'][0]['embeds']['contributors']['links']['meta']['per_page'] == 10
@pytest.mark.django_db
class TestNodeListFiltering(NodesListFilteringMixin):
@pytest.fixture()
def url(self):
return '/{}nodes/?'.format(API_BASE)
@pytest.mark.django_db
class TestNodeListDateFiltering(NodesListDateFilteringMixin):
@pytest.fixture()
def url(self):
return '/{}nodes/?'.format(API_BASE)
| 35.52957
| 220
| 0.577211
| 13,570
| 118,953
| 4.767133
| 0.032498
| 0.078776
| 0.043036
| 0.037007
| 0.852558
| 0.81586
| 0.771649
| 0.731968
| 0.700618
| 0.675035
| 0
| 0.008806
| 0.313611
| 118,953
| 3,347
| 221
| 35.540185
| 0.783497
| 0.03266
| 0
| 0.689369
| 0
| 0.00036
| 0.097407
| 0.020623
| 0
| 0
| 0
| 0
| 0.183063
| 1
| 0.068468
| false
| 0
| 0.004324
| 0.038919
| 0.117117
| 0.015496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b0268b4a3e7c231f648eddcf16ac5863dae6bd1
| 3,031
|
py
|
Python
|
users.py
|
LesyaLeontyeva/python_training
|
869a5e3f7e8113024752b36c911deb352ca9b98e
|
[
"Apache-2.0"
] | null | null | null |
users.py
|
LesyaLeontyeva/python_training
|
869a5e3f7e8113024752b36c911deb352ca9b98e
|
[
"Apache-2.0"
] | null | null | null |
users.py
|
LesyaLeontyeva/python_training
|
869a5e3f7e8113024752b36c911deb352ca9b98e
|
[
"Apache-2.0"
] | null | null | null |
class UserHelper:
def __init__(self, app):
self.app = app
def open_webpage(self):
wd = self.wd
wd.find_element_by_link_text("home").click()
if not (wd.current_url.endswith("/") and len(wd.find_elements_by_name("searchform")) > 0):
wd.get("http://localhost/addressbook/")
def open_user_creation(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
wd.find_element_by_link_text("add new").click()
def create(self, users):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
self.open_user_creation()
wd.find_element_by_name("firstname").send_keys(users.firs_name)
wd.find_element_by_name("middlename").send_keys(users.middle_name)
wd.find_element_by_name("lastname").send_keys(users.last_name)
wd.find_element_by_name("nickname").send_keys(users.nick_name)
wd.find_element_by_name("title").send_keys(users.title)
wd.find_element_by_name("company").send_keys(users.companyname)
wd.find_element_by_name("address").send_keys(users.address)
wd.find_element_by_name("home").send_keys(users.home)
wd.find_element_by_name("mobile").send_keys(users.mobile)
wd.find_element_by_name("work").send_keys(users.work)
wd.find_element_by_name("fax").send_keys(users.fax)
wd.find_element_by_name("email").send_keys(users.email)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def delete_first_user(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
wd.find_element_by_xpath("//div[@id='content']/form[2]/input[2]").click()
def return_to_users_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
def edit_first_user(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
wd.find_element_by_name("firstname").send_keys("1")
wd.find_element_by_name("middlename").send_keys("2")
wd.find_element_by_name("lastname").send_keys("3")
wd.find_element_by_name("nickname").send_keys("4")
wd.find_element_by_name("title").send_keys("5")
wd.find_element_by_name("company").send_keys("6")
wd.find_element_by_name("address").send_keys("7")
wd.find_element_by_name("home").send_keys("8")
wd.find_element_by_name("mobile").send_keys("9")
wd.find_element_by_name("work").send_keys("10")
wd.find_element_by_name("fax").send_keys("11")
wd.find_element_by_name("email").send_keys("12")
wd.find_element_by_name("update").click()
def count(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
return len(wd.find_elements_by_name("selected[]"))
| 40.959459
| 98
| 0.665787
| 461
| 3,031
| 4.008677
| 0.195228
| 0.126623
| 0.260281
| 0.300325
| 0.767316
| 0.757035
| 0.719156
| 0.719156
| 0.294372
| 0.24513
| 0
| 0.009604
| 0.17552
| 3,031
| 74
| 99
| 40.959459
| 0.729892
| 0
| 0
| 0.258621
| 0
| 0
| 0.140501
| 0.055409
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b23c3ec4ed48917c39b31820be03db71139a754
| 51
|
py
|
Python
|
multilingual_t5/r_ta_en/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
multilingual_t5/r_ta_en/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
multilingual_t5/r_ta_en/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
"""r_ta_en dataset."""
from .r_ta_en import RTaEn
| 12.75
| 26
| 0.705882
| 10
| 51
| 3.2
| 0.7
| 0.1875
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 3
| 27
| 17
| 0.727273
| 0.313725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8689178b87193aa4fef9e8db2a15263ae5a5ab9b
| 203
|
py
|
Python
|
tccli/services/mdl/__init__.py
|
hapsyou/tencentcloud-cli-intl-en
|
fa8ba71164484f9a2be4b983080a1de08606c0b0
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/mdl/__init__.py
|
hapsyou/tencentcloud-cli-intl-en
|
fa8ba71164484f9a2be4b983080a1de08606c0b0
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/mdl/__init__.py
|
hapsyou/tencentcloud-cli-intl-en
|
fa8ba71164484f9a2be4b983080a1de08606c0b0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from tccli.services.mdl.mdl_client import register_arg
from tccli.services.mdl.mdl_client import get_actions_info
from tccli.services.mdl.mdl_client import AVAILABLE_VERSION_LIST
| 40.6
| 64
| 0.827586
| 32
| 203
| 5
| 0.53125
| 0.16875
| 0.31875
| 0.375
| 0.65625
| 0.65625
| 0.65625
| 0
| 0
| 0
| 0
| 0.005376
| 0.083744
| 203
| 4
| 65
| 50.75
| 0.854839
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8691ec0c75b139e2811d2cb1ab97ef8ff7c0c998
| 198
|
py
|
Python
|
tests/test-global-ptrs1/test_data.py
|
bogdanm/udynlink
|
4119954862d5d4c96212f29f282e5e2560158406
|
[
"Apache-2.0"
] | 96
|
2017-10-10T12:52:48.000Z
|
2022-03-05T02:28:08.000Z
|
tests/test-global-ptrs1/test_data.py
|
bogdanm/udynlink
|
4119954862d5d4c96212f29f282e5e2560158406
|
[
"Apache-2.0"
] | 5
|
2019-03-27T10:23:07.000Z
|
2022-01-11T03:49:17.000Z
|
tests/test-global-ptrs1/test_data.py
|
bogdanm/udynlink
|
4119954862d5d4c96212f29f282e5e2560158406
|
[
"Apache-2.0"
] | 20
|
2018-06-21T15:36:36.000Z
|
2022-01-10T00:47:22.000Z
|
# Test global pointers to data and code
test_data = {
"desc": "Global pointers to data and code",
"modules": [["mod_global_ptrs1.c"]],
"required": ["Running test 'mod_global_ptrs1'"]
}
| 24.75
| 51
| 0.656566
| 27
| 198
| 4.62963
| 0.518519
| 0.224
| 0.256
| 0.32
| 0.432
| 0.432
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.191919
| 198
| 7
| 52
| 28.285714
| 0.76875
| 0.186869
| 0
| 0
| 0
| 0
| 0.628931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
86c7d0832a68fc7b8196847fb8da7ca6c512e4f3
| 169
|
py
|
Python
|
todo/admin.py
|
beshrkayali/fbmtodo
|
a7e82edd8d59a83c92bcef412954f05ba2e8a34e
|
[
"MIT"
] | null | null | null |
todo/admin.py
|
beshrkayali/fbmtodo
|
a7e82edd8d59a83c92bcef412954f05ba2e8a34e
|
[
"MIT"
] | null | null | null |
todo/admin.py
|
beshrkayali/fbmtodo
|
a7e82edd8d59a83c92bcef412954f05ba2e8a34e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from todo.models import TodoList, TodoItem, User
admin.site.register(User)
admin.site.register(TodoList)
admin.site.register(TodoItem)
| 24.142857
| 48
| 0.822485
| 24
| 169
| 5.791667
| 0.5
| 0.194245
| 0.366906
| 0.302158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08284
| 169
| 6
| 49
| 28.166667
| 0.896774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8126cbba00df09e00de1720de057f2d4da85ee3b
| 25,746
|
py
|
Python
|
src/natcap/opal/tests/test_offsets.py
|
natcap/opal
|
7b960d51344483bae30d14ccfa6004bd550f3737
|
[
"BSD-3-Clause"
] | 1
|
2020-04-15T23:23:27.000Z
|
2020-04-15T23:23:27.000Z
|
src/natcap/opal/tests/test_offsets.py
|
natcap/opal
|
7b960d51344483bae30d14ccfa6004bd550f3737
|
[
"BSD-3-Clause"
] | null | null | null |
src/natcap/opal/tests/test_offsets.py
|
natcap/opal
|
7b960d51344483bae30d14ccfa6004bd550f3737
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import os
import shutil
from shapely.geometry import Polygon
from natcap.opal.tests import vector, COLOMBIA_SRS
from natcap.opal.tests import test_smoke
from natcap.opal import offsets
class OffsetTest(unittest.TestCase):
def test_select_set_multifactor_bio(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290, 'ecosystem': 'a'},
2: {'area': 123, 'carbon': 382, 'sediment': 1348, 'ecosystem': 'a'},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234, 'ecosystem': 'b'},
4: {'area': 149, 'carbon': 192, 'sediment': 1019, 'ecosystem': 'b'},
}
bio_requirements = {
'a': {
'mitigation_area': 200,
},
'b': {
'mitigation_area': 100,
}
}
expected_results = [1, 3] # largest parcels selected first
selected_parcels = offsets.select_set_multifactor(parcels_dict,
bio_requirements)
self.assertEqual(selected_parcels, expected_results)
def test_select_set_multifactor_bio_no_ecosystem(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290, 'ecosystem': 'a'},
2: {'area': 123, 'carbon': 382, 'sediment': 1348, 'ecosystem': 'a'},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234, 'ecosystem': 'b'},
4: {'area': 149, 'carbon': 192, 'sediment': 1019, 'ecosystem': 'b'},
}
bio_requirements = {
'a': {
'mitigation_area': 200,
},
'b': {
'mitigation_area': 100,
},
'c': { # no available parcels for this ecosystem.
'mitigation_area': 50,
}
}
expected_results = [1, 3] # largest parcels selected first
selected_parcels = offsets.select_set_multifactor(parcels_dict,
bio_requirements)
self.assertEqual(selected_parcels, expected_results)
def test_select_set_multifactor_bio_mega(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290, 'ecosystem': 'a'},
2: {'area': 123, 'carbon': 382, 'sediment': 1348, 'ecosystem': 'a'},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234, 'ecosystem': 'b'},
4: {'area': 149, 'carbon': 192, 'sediment': 1019, 'ecosystem': 'b'},
}
bio_requirements = {
'a': {
'mitigation_area': 100,
},
'b': {
'mitigation_area': 100,
}
}
prop_offset = 2.0
expected_results = [1, 3]
selected_parcels = offsets.select_set_multifactor(parcels_dict,
bio_requirements, proportion_offset=prop_offset)
self.assertEqual(selected_parcels, expected_results)
def test_select_set_multifactor_bio_hydro(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290, 'ecosystem': 'a'},
2: {'area': 123, 'carbon': 382, 'sediment': 1348, 'ecosystem': 'a'},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234, 'ecosystem': 'b'},
4: {'area': 149, 'carbon': 192, 'sediment': 1019, 'ecosystem': 'b'},
}
bio_requirements = {
'a': {
'mitigation_area': 200,
},
'b': {
'mitigation_area': 100,
}
}
es_requirements = {
1: {
'sediment': 2500,
'parcels': [1, 2, 3],
}
}
expected_results = [1, 2, 4]
selected_parcels = offsets.select_set_multifactor(parcels_dict,
bio_requirements, es_requirements)
def test_select_set_multifactor_hydro(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290, 'ecosystem': 'a'},
2: {'area': 123, 'carbon': 382, 'sediment': 1348, 'ecosystem': 'a'},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234, 'ecosystem': 'b'},
4: {'area': 149, 'carbon': 192, 'sediment': 1019, 'ecosystem': 'b'},
}
es_requirements = {
1: {
'sediment': 4000,
'parcels': [1, 2, 3],
}
}
expected_parcels = [3]
selected_parcels = offsets.select_set_multifactor(parcels_dict,
es_hydro_req=es_requirements)
self.assertEqual(selected_parcels, expected_parcels)
def test_select_set_multifactor_hydro_mega(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290, 'ecosystem': 'a'},
2: {'area': 123, 'carbon': 382, 'sediment': 1348, 'ecosystem': 'a'},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234, 'ecosystem': 'b'},
4: {'area': 149, 'carbon': 192, 'sediment': 1019, 'ecosystem': 'b'},
}
es_requirements = {
1: {
'sediment': 4000,
'parcels': [1, 2, 3],
}
}
prop_offset = 4.75
expected_parcels = [2, 3]
selected_parcels = offsets.select_set_multifactor(parcels_dict,
es_hydro_req=es_requirements, proportion_offset=prop_offset)
self.assertEqual(selected_parcels, expected_parcels)
def test_select_set_multifactor_global(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290, 'ecosystem': 'a'},
2: {'area': 123, 'carbon': 382, 'sediment': 1348, 'ecosystem': 'a'},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234, 'ecosystem': 'b'},
4: {'area': 149, 'carbon': 192, 'sediment': 1019, 'ecosystem': 'b'},
}
global_reqs = {
'carbon': 4000,
'sediment': 500,
}
expected_parcels = [1, 3]
selected_parcels = offsets.select_set_multifactor(parcels_dict,
es_global_req=global_reqs)
self.assertEqual(expected_parcels, selected_parcels)
def test_select_set_multifactor_bio_hydro_global(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290, 'ecosystem': 'a'},
2: {'area': 123, 'carbon': 382, 'sediment': 1348, 'ecosystem': 'a'},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234, 'ecosystem': 'b'},
4: {'area': 149, 'carbon': 192, 'sediment': 1019, 'ecosystem': 'b'},
}
bio_requirements = {
'a': {
'mitigation_area': 200,
},
'b': {
'mitigation_area': 100,
}
}
es_requirements = {
1: {
'sediment': 2500,
'parcels': [1, 2, 3],
}
}
global_reqs = {
'carbon': 4000,
'sediment': 500,
}
expected_parcels = [1, 2, 3, 4]
selected_parcels = offsets.select_set_multifactor(parcels_dict,
bio_requirements, es_requirements, global_reqs)
self.assertEqual(expected_parcels, selected_parcels)
def test_select_set(self):
parcels_dict = {
1: {'area': 400, 'carbon': 3500, 'sediment': 290},
2: {'area': 123, 'carbon': 382, 'sediment': 1348},
3: {'area': 8392, 'carbon': 1910, 'sediment': 18234},
4: {'area': 149, 'carbon': 192, 'sediment': 1019},
}
requirements = [
('area', 350),
('sediment', 2000),
('carbon', 100),
]
selected_parcels = offsets.select_set(parcels_dict, requirements)
# Parcel 3 meets all three requirements.
self.assertEqual(selected_parcels, [3])
def test_select_set_2(self):
parcels = {
1: {'area': 101, 'carbon': 201, 'sediment': 301},
2: {'area': 102, 'carbon': 202, 'sediment': 302},
3: {'area': 103, 'carbon': 203, 'sediment': 303},
4: {'area': 104, 'carbon': 204, 'sediment': 304},
}
requirements = [
('area', 100),
('carbon', 400),
('sediment', 900),
]
self.assertEqual(offsets.select_set(parcels, requirements), [2, 3, 4])
def test_locate_biodiversity_offsets(self):
eco_a = test_smoke.square((20, 20), 10)
eco_b = test_smoke.square((60, 20), 10)
fields = {
'ecosystem': str,
'LCI': float,
}
field_values = [
{'ecosystem': 'eco_a', 'LCI': 0.3},
{'ecosystem': 'eco_b', 'LCI': 0.2},
]
natural_ecosystems = vector([eco_a, eco_b], COLOMBIA_SRS, fields,
field_values, format='ESRI Shapefile')
known_biodiversity_impacts = {
'eco_a': {
'min_impacted_parcel_area': 50,
'min_lci': 0.1,
},
}
expected_parcels = {
0: {
'ecosystem': 'eco_a',
'area': 100.0,
'lci': 0.3,
},
}
self.assertEqual(offsets.locate_biodiversity_offsets(natural_ecosystems,
known_biodiversity_impacts), expected_parcels)
def test_locate_biodiversity_offsets_all(self):
eco_a = test_smoke.square((20, 20), 10)
eco_b = test_smoke.square((60, 20), 10)
fields = {
'ecosystem': str,
'LCI': float,
}
field_values = [
{'ecosystem': 'eco_a', 'LCI': 0.3},
{'ecosystem': 'eco_b', 'LCI': 0.2},
]
natural_ecosystems = vector([eco_a, eco_b], COLOMBIA_SRS, fields,
field_values, format='ESRI Shapefile')
expected_parcels = {
0: {
'ecosystem': 'eco_a',
'area': 100.0,
'lci': 0.3,
},
1: {
'ecosystem': 'eco_b',
'area': 100.0,
'lci': 0.2,
},
}
self.assertEqual(offsets.locate_biodiversity_offsets(natural_ecosystems,
{}), expected_parcels)
def test_locate_biodiversity_offsets_bio_and_all(self):
eco_a = test_smoke.square((20, 20), 10)
eco_b = test_smoke.square((60, 20), 10)
fields = {
'ecosystem': str,
'LCI': float,
}
field_values = [
{'ecosystem': 'eco_a', 'LCI': 0.3},
{'ecosystem': 'eco_b', 'LCI': 0.2},
]
natural_ecosystems = vector([eco_a, eco_b], COLOMBIA_SRS, fields,
field_values, format='ESRI Shapefile')
known_biodiversity_impacts = {
'eco_a': {
'min_impacted_parcel_area': 50,
'min_lci': 0.1,
},
}
expected_parcels = {
0: {
'ecosystem': 'eco_a',
'area': 100.0,
'lci': 0.3,
},
1: {
'ecosystem': 'eco_b',
'area': 100.0,
'lci': 0.2,
},
}
self.assertEqual(offsets.locate_biodiversity_offsets(natural_ecosystems,
known_biodiversity_impacts, include_all_ecosystems=True),
expected_parcels)
def test_select_offsets(self):
ecosystems_polygons = [test_smoke.square((x, 20), 10)
for x in [20, 60, 100, 140]]
# Screening should screen out this parcel.
ecosystems_polygons += [test_smoke.square((160, 20), 10)]
ecosystems_fields = {
'ecosystem': str,
'LCI': float,
'carbon': float,
'nutrient': float,
'sediment': float,
}
ecosystems_attributes = [
{'ecosystem': 'eco_a', 'LCI': 0.1, 'carbon': 822.82,
'nutrient': 357.12, 'sediment': 395.82},
{'ecosystem': 'eco_b', 'LCI': 0.1, 'carbon': 396.34,
'nutrient': 934.77, 'sediment': 121.27},
{'ecosystem': 'eco_a', 'LCI': 0.1, 'carbon': 831.61,
'nutrient': 136.92, 'sediment': 379.30},
{'ecosystem': 'eco_b', 'LCI': 0.1, 'carbon': 402.12,
'nutrient': 829.71, 'sediment': 571.09},
{'ecosystem': 'eco_b', 'LCI': 0.1, 'carbon': 123.42,
'nutrient': 14.34, 'sediment': 592.01},
]
ecosystems_vector = vector(ecosystems_polygons, COLOMBIA_SRS,
ecosystems_fields, ecosystems_attributes, format='ESRI Shapefile')
biodiversity_impacts = {
'eco_a': {
'min_impacted_parcel_area': 50,
'min_lci': 0.1,
'max_threat': None,
'min_richness': None,
'mitigation_area': 125.0,
}
}
impacts_polygons = map(lambda x: test_smoke.square((x, 50), 10), [40,
80])
impact_columns = {
'carbon': float,
'sediment': float,
'nutrient': float,
}
impact_field_values = [
{'carbon': 123.43, 'nutrient': 898.12, 'sediment': 901.23},
{'carbon': 817.93, 'nutrient': 671.58, 'sediment': 173.61},
]
impact_parcels_vector = vector(impacts_polygons, COLOMBIA_SRS,
impact_columns, impact_field_values, format='ESRI Shapefile')
output_workspace = os.path.join(os.getcwd(), 'test_select_offsets')
output_vector = os.path.join(output_workspace, 'output_vector.shp')
output_json = os.path.join(output_workspace, 'output_json.json')
if os.path.exists(output_workspace):
shutil.rmtree(output_workspace)
os.makedirs(output_workspace)
offset_tuple = offsets._select_offsets(ecosystems_vector,
impact_parcels_vector, biodiversity_impacts, output_vector,
output_json)
self.assertEqual(offset_tuple, ([0, 2], {}))
def test_select_offsets_no_impacts(self):
ecosystems_polygons = map(lambda x: test_smoke.square((x, 20), 10), [20, 60,
100, 140])
ecosystems_fields = {
'ecosystem': str,
'LCI': float,
'carbon': float,
'nutrient': float,
'sediment': float,
}
ecosystems_attributes = [
{'ecosystem': 'eco_a', 'LCI': 0.1, 'carbon': 822.82,
'nutrient': 357.12, 'sediment': 395.82},
{'ecosystem': 'eco_b', 'LCI': 0.1, 'carbon': 396.34,
'nutrient': 934.77, 'sediment': 121.27},
{'ecosystem': 'eco_a', 'LCI': 0.1, 'carbon': 831.61,
'nutrient': 136.92, 'sediment': 379.30},
{'ecosystem': 'eco_b', 'LCI': 0.1, 'carbon': 402.12,
'nutrient': 829.71, 'sediment': 571.09},
]
ecosystems_vector = vector(ecosystems_polygons, COLOMBIA_SRS,
ecosystems_fields, ecosystems_attributes, format='ESRI Shapefile')
biodiversity_impacts = {
'eco_a': {
'min_impacted_parcel_area': 0,
'min_lci': 0,
'max_threat': None,
'min_richness': None,
'mitigation_area': 0,
},
'eco_b': {
'min_impacted_parcel_area': 0,
'min_lci': 0,
'max_threat': None,
'min_richness': None,
'mitigation_area': 0,
}
}
impacts_polygons = map(lambda x: test_smoke.square((x, 50), 10), [40,
80])
impact_columns = {
'carbon': float,
'sediment': float,
'nutrient': float,
}
impact_field_values = [
{'carbon': 0.0, 'nutrient': 0.0, 'sediment': 0.0},
{'carbon': 0.0, 'nutrient': 0.0, 'sediment': 0.0},
]
impact_parcels_vector = vector(impacts_polygons, COLOMBIA_SRS,
impact_columns, impact_field_values, format='ESRI Shapefile')
output_workspace = os.path.join(os.getcwd(), 'test_select_offsets')
output_vector = os.path.join(output_workspace, 'output_vector.shp')
output_json = os.path.join(output_workspace, 'output_json.json')
if os.path.exists(output_workspace):
shutil.rmtree(output_workspace)
os.makedirs(output_workspace)
offset_tuple = offsets._select_offsets(ecosystems_vector,
impact_parcels_vector, biodiversity_impacts, output_vector,
output_json)
self.assertEqual(offset_tuple, ([0, 1, 2, 3], {}))
def test_select_offsets_scheme_es_and_bio(self):
ecosystems_polygons = map(lambda x: test_smoke.square((x, 20), 10), [20, 60,
100, 140])
ecosystems_fields = {
'ecosystem': str,
'LCI': float,
'carbon': float,
'nutrient': float,
'sediment': float,
}
ecosystems_attributes = [
{'ecosystem': 'eco_a', 'LCI': 0.1, 'carbon': 822.82,
'nutrient': 357.12, 'sediment': 395.82},
{'ecosystem': 'eco_b', 'LCI': 0.1, 'carbon': 396.34,
'nutrient': 934.77, 'sediment': 121.27},
{'ecosystem': 'eco_a', 'LCI': 0.1, 'carbon': 831.61,
'nutrient': 136.92, 'sediment': 379.30},
{'ecosystem': 'eco_b', 'LCI': 0.1, 'carbon': 402.12,
'nutrient': 829.71, 'sediment': 571.09},
]
ecosystems_vector = vector(ecosystems_polygons, COLOMBIA_SRS,
ecosystems_fields, ecosystems_attributes, format='ESRI Shapefile')
biodiversity_impacts = {
'eco_a': {
'min_impacted_parcel_area': 0,
'min_lci': 0,
'max_threat': None,
'min_richness': None,
'mitigation_area': 0,
},
'eco_b': {
'min_impacted_parcel_area': 0,
'min_lci': 0,
'max_threat': None,
'min_richness': None,
'mitigation_area': 0,
}
}
impacts_polygons = map(lambda x: test_smoke.square((x, 50), 10), [40,
80])
impact_columns = {
'carbon': float,
'sediment': float,
'nutrient': float,
}
impact_field_values = [
{'carbon': 0.0, 'nutrient': 0.0, 'sediment': 0.0},
{'carbon': 0.0, 'nutrient': 0.0, 'sediment': 0.0},
]
impact_parcels_vector = vector(impacts_polygons, COLOMBIA_SRS,
impact_columns, impact_field_values, format='ESRI Shapefile')
output_workspace = os.path.join(os.getcwd(), 'test_select_offsets')
output_vector = os.path.join(output_workspace, 'output_vector.shp')
output_json = os.path.join(output_workspace, 'output_json.json')
if os.path.exists(output_workspace):
shutil.rmtree(output_workspace)
os.makedirs(output_workspace)
offset_tuple = offsets._select_offsets(ecosystems_vector,
impact_parcels_vector, biodiversity_impacts, output_vector,
output_json)
self.assertEqual(offset_tuple, ([0, 1, 2, 3], {}))
def translate_percent_overlap_to_sshed_data(self):
sample_dict = {
0: {
"Aoi": 0.0,
"Area": 24769554.721130043,
"Carbon": 660488.5930480957,
"City": 0.0,
"Distance": 105299.1312844463,
"Ecosystem": "Bosques naturales",
"Hydrozone": 0.0,
"Lci": 0.030022655251212,
"Nitrogen": 15000.409298000042,
"Sediment": 52037.28566128289,
"municipalities": {
"CASABE_05893": 1.0,
"CONCORDIA_47205": 1.0,
"SAN LUIS_47798": 1.0
}
},
4: {
"Aoi": 0.0,
"Area": 39053053.690533355,
"Carbon": 1046098.0106658936,
"City": 0.0,
"Distance": 111860.43849765629,
"Ecosystem": "Bosques naturales",
"Hydrozone": 0.0,
"Lci": 0.017458507801602,
"Nitrogen": 22452.06991350127,
"Sediment": 60673.52671940386,
"municipalities": {
"CASABE_05893": 0.998,
"CONCORDIA_47205": 1.0,
"SAN PABLO_13670": 0.001,
}
},
}
expected_parcels = {
0: {
'carbon': 660488.5930480957,
'nutrient': 15000.409298000042,
'sediment': 52037.28566128289,
'area': 24769554.721130043,
'ecosystem': "Bosques naturales",
'overlap': {
'CASABE_05893': 1.0,
'CONCORDIA_47205': 1.0,
'SAN LUIS_47798': 1.0,
}
},
4: {
"area": 39053053.690533355,
"carbon": 1046098.0106658936,
"nutrient": 22452.06991350127,
"sediment": 60673.52671940386,
"ecosystem": "Bosques naturales",
'overlap': {
'CASABE_05893': 0.998,
'CONCORDIA_47205': 1.0,
'SAN PABLO_13670': 0.001,
}
},
}
returned_parcels = offsets.translate_parcel_data(sample_dict)
self.assertEqual(expected_parcels, returned_parcels)
def test_translate_es_impacts(self):
sample_dict = {
0: {
"Aoi": 0.0,
"Area": 24769554.721130043,
"Carbon": 660488.5930480957,
"City": 0.0,
"Distance": 105299.1312844463,
"Ecosystem": "Bosques naturales",
"Hydrozone": 0.0,
"Lci": 0.030022655251212,
"Nitrogen": 15000.409298000042,
"Sediment": 52037.28566128289,
"municipalities": {
"CASABE_05893": 1.0,
"CONCORDIA_47205": 1.0,
"SAN LUIS_47798": 1.0
}
},
4: {
"Aoi": 0.0,
"Area": 39053053.690533355,
"Carbon": 1046098.0106658936,
"City": 0.0,
"Distance": 111860.43849765629,
"Ecosystem": "Bosques naturales",
"Hydrozone": 0.0,
"Lci": 0.017458507801602,
"Nitrogen": 22452.06991350127,
"Sediment": 60673.52671940386,
"municipalities": {
"CASABE_05893": 0.998,
"CONCORDIA_47205": 1.0,
"SAN PABLO_13670": 0.001,
}
},
}
expected_parcels = {
'CASABE_05893': {
'carbon': 1704494.4076926573,
'sediment': 112589.46532724795,
'nutrient': 37407.575071674306,
},
'CONCORDIA_47205': {
'carbon': 1706586.6037139893,
'sediment': 112710.81238068675,
'nutrient': 37452.47921150131,
},
'SAN PABLO_13670': {
'carbon': 1046.0980106658935,
'sediment': 60.67352671940386,
'nutrient': 22.45206991350127,
},
'SAN LUIS_47798': {
'carbon': 660488.5930480957,
'sediment': 52037.28566128289,
'nutrient': 15000.409298000042,
}
}
returned_parcels = offsets.translate_es_impacts(sample_dict)
self.assertEqual(expected_parcels, returned_parcels)
def test_group_offsets_by_sshed(self):
sample_dict = {
0: {
"Aoi": 0.0,
"Area": 24769554.721130043,
"Carbon": 660488.5930480957,
"City": 0.0,
"Distance": 105299.1312844463,
"Ecosystem": "Bosques naturales",
"Hydrozone": 0.0,
"Lci": 0.030022655251212,
"Nitrogen": 15000.409298000042,
"Sediment": 52037.28566128289,
"municipalities": {
"CASABE_05893": 1.0,
"CONCORDIA_47205": 1.0,
"SAN LUIS_47798": 1.0
}
},
4: {
"Aoi": 0.0,
"Area": 39053053.690533355,
"Carbon": 1046098.0106658936,
"City": 0.0,
"Distance": 111860.43849765629,
"Ecosystem": "Bosques naturales",
"Hydrozone": 0.0,
"Lci": 0.017458507801602,
"Nitrogen": 22452.06991350127,
"Sediment": 60673.52671940386,
"municipalities": {
"CASABE_05893": 0.998,
"CONCORDIA_47205": 1.0,
"SAN PABLO_13670": 0.001,
}
},
}
expected_groupings = {
"CASABE_05893": [0, 4],
"CONCORDIA_47205": [0, 4],
"SAN PABLO_13670": [4],
"SAN LUIS_47798": [0],
}
returned_groupings = offsets.group_offset_parcels_by_sshed(sample_dict)
self.assertEqual(returned_groupings, expected_groupings)
| 36.160112
| 84
| 0.491921
| 2,421
| 25,746
| 5.017761
| 0.108633
| 0.012183
| 0.026342
| 0.011771
| 0.867139
| 0.839891
| 0.824251
| 0.805565
| 0.802107
| 0.794452
| 0
| 0.139756
| 0.373844
| 25,746
| 711
| 85
| 36.21097
| 0.613796
| 0.007069
| 0
| 0.673981
| 0
| 0
| 0.168917
| 0.006574
| 0
| 0
| 0
| 0
| 0.028213
| 1
| 0.029781
| false
| 0
| 0.010972
| 0
| 0.04232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d49e80f416526d3bc9a6714fe0affa18b6e2624e
| 5,500
|
py
|
Python
|
data/chempy/tinker/make99_simple.py
|
dualword/pymol-open-source
|
abc307745d7d231af4f77f984ebd64f1b428cef8
|
[
"CNRI-Python"
] | 636
|
2018-06-21T20:46:36.000Z
|
2022-03-30T13:07:47.000Z
|
data/chempy/tinker/make99_simple.py
|
dualword/pymol-open-source
|
abc307745d7d231af4f77f984ebd64f1b428cef8
|
[
"CNRI-Python"
] | 218
|
2018-06-25T00:10:59.000Z
|
2022-03-23T14:15:48.000Z
|
data/chempy/tinker/make99_simple.py
|
dualword/pymol-open-source
|
abc307745d7d231af4f77f984ebd64f1b428cef8
|
[
"CNRI-Python"
] | 192
|
2018-06-21T17:33:10.000Z
|
2022-03-31T17:53:03.000Z
|
import string
f = open("parm99_wld.dat")
g = open("parm_simple.dat")
h = open("parm99_simple.dat",'w')
while 1:
l = f.readline()
if not len(string.strip(l)):
break
h.write(l)
g.readline() # skip first line
while 1:
l = g.readline()
h.write(l)
if not len(string.strip(l)):
break
l=f.readline()
h.write(l)
g.readline() # skip this stuff for now
# BONDS
while 1:
l = f.readline()
if not len(string.strip(l)):
break
h.write(l)
while 1:
l = g.readline()
if not len(string.strip(l)):
break
h.write(l)
for l in [
'T2-S 166.0 2.038 WLD from S -S ',
'DJ-N 490.0 1.335 WLD from C -N ',
'C -J3 490.0 1.335 WLD from C -N ',
'D4-H1 340.0 1.090 WLD from CT-H1',
'C -D4 317.0 1.522 WLD from C -CT',
'D4-N 337.0 1.449 WLD from CT-N',
]:
h.write(l+"\n")
h.write("\n")
# ANGLES
while 1:
l = f.readline()
if not len(string.strip(l)):
break
h.write(l)
while 1:
l = g.readline()
if not len(string.strip(l)):
break
h.write(l)
for l in [
'H -N -DJ 50.0 120.00 WLD from C -N -H ',
'A -J3-C 50.0 120.00 WLD from C -N -H ',
'C -J3-D4 50.0 121.90 WLD from C -N -CT',
'CT-S -T2 68.0 103.70 WLD from CT-S -S ',
'CT-N -DJ 50.0 121.90 WLD from C -N -CT',
'CT-C -J3 70.0 116.60 WLD from CT-C -N ',
'D4-T2-S 68.0 103.70 WLD from CT-S -S ',
'D4-DJ-N 70.0 116.60 WLD from CT-C -N ',
'DJ-N -CT 50.0 121.90 WLD from C -N -CT',
'J3-C -O 80.0 122.90 WLD from N -C -O ',
'N -DJ-Q1 80.0 122.90 WLD from N -C -O ',
'N -DJ-DJ 70.0 120.00 WLD from CA-C -OH',
'C -N -D4 50.0 121.90 WLD from C -N -CT',
'D4-C -N 70.0 116.60 WLD from CT-C -N ',
'D4-C -O 80.0 120.40 WLD from CT-C -O ',
'C -D4-D4 63.0 111.10 WLD from C -CT-CT',
'C -D4-H1 50.0 109.50 WLD from C -CT-H1',
'D4-N -H 50.0 118.04 WLD from CT-N -H ',
'H1-D4-N 50.0 109.50 WLD from H1-CT-N ',
'C -D4-N 63.0 110.10 WLD from C -CT-N ',
'D4-D4-N 80.0 109.70 WLD from CT-CT-N ',
'D4-D4-H1 50.0 109.50 WLD from CT-CT-H1',
]:
h.write(l+"\n")
h.write("\n")
# TORSIONS
while 1:
l = f.readline()
if not len(string.strip(l)):
break
h.write(l)
while 1:
l = g.readline()
if not len(string.strip(l)):
break
h.write(l)
for l in [
'X -C -J3-X 4 10.00 180.0 2. WLD from X -C -N -X',
'X -DJ-N -X 4 10.00 180.0 2. WLD from X -C -N -X',
'DA-DJ-DJ-Q1 4 0.00 180.0 2. WLD on benzamide',
'DJ-DJ-DJ-Q1 4 0.00 180.0 2. WLD on benzamide',
'D4-T2-S -CT 1 3.50 0.0 -2. WLD from CT-S-S-CT',
'D4-T2-S -CT 1 0.60 0.0 3. WLD from CT-S-S-CT',
'N -D4-DJ-J3 1 2.000 180.000 2. WLD from N-CT-C -N ',
'DJ-CT-C -N 1 2.000 180.000 2. WLD from N-CT-C -N ',
'C -J3-D4-DJ 1 0.850 180.000 -2. WLD from C-N -CT-C ',
'C -J3-D4-DJ 1 0.800 0.000 1. WLD from C-N -CT-C ',
'DJ-N -CT-C 1 0.850 180.000 -2. WLD from C-N -CT-C ',
'DJ-N -CT-C 1 0.800 0.000 1. WLD from C-N -CT-C ',
'D4-D4-J3-C 1 0.50 180.0 -4. WLD from CT-CT-N -C',
'D4-D4-J3-C 1 0.15 180.0 -3. WLD from CT-CT-N -C',
'D4-D4-J3-C 1 0.53 0.0 1. WLD from CT-CT-N -C',
'CT-CT-N -DJ 1 0.50 180.0 -4. WLD from CT-CT-N -C',
'CT-CT-N -DJ 1 0.15 180.0 -3. WLD from CT-CT-N -C',
'CT-CT-N -DJ 1 0.53 0.0 1. WLD from CT-CT-N -C',
'D4-D4-DJ-N 1 0.100 0.0 -4. WLD from CT-CT-C -N',
'D4-D4-DJ-N 1 0.07 0.0 2. WLD from CT-CT-C -N',
'CT-CT-C -J3 1 0.100 0.0 -4. WLD from CT-CT-C -N',
'CT-CT-C -J3 1 0.07 0.0 2. WLD from CT-CT-C -N',
'H -N -DJ-Q1 1 2.50 180.0 -2. WLD from H -N -C -O',
'H -N -DJ-Q1 1 2.00 0.0 1. WLD from H -N -C -O',
'A -J3-C -O 1 2.50 180.0 -2. WLD from H -N -C -O',
'A -J3-C -O 1 2.00 0.0 1. WLD from H -N -C -O',
]:
h.write(l+"\n")
h.write("\n")
while 1:
l = f.readline()
if not len(string.strip(l)):
break
h.write(l)
while 1:
l = g.readline()
if not len(string.strip(l)):
break
h.write(l)
h.write("\n")
while 1:
l = f.readline()
h.write(l)
if l[0:4]=='MOD4':
break
while 1:
l = g.readline()
if l[0:4]=='MOD4':
break
while 1:
l = f.readline()
if not len(string.strip(l)):
break
h.write(l)
while 1:
l = g.readline()
if not len(string.strip(l)):
break
h.write(l)
h.write("\n")
h.write(f.readline())
g.readline()
h.write(f.readline())
g.readline()
h.write(f.readline())
g.readline()
while 1:
l = f.readline()
if not l: break
if l[0:6]=='TINKER':
h.write(l)
while 1:
l = g.readline()
if not l: break
if l[0:6]=='TINKER':
h.write(l)
| 26.829268
| 82
| 0.438727
| 1,066
| 5,500
| 2.260788
| 0.090994
| 0.151037
| 0.085892
| 0.06971
| 0.848548
| 0.80249
| 0.749378
| 0.709544
| 0.668465
| 0.615353
| 0
| 0.166516
| 0.395091
| 5,500
| 204
| 83
| 26.960784
| 0.55786
| 0.011091
| 0
| 0.639752
| 0
| 0.149068
| 0.636598
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006211
| 0
| 0.006211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4b4c0eca8183968d2474a76a5af6cb25943d7e6
| 49
|
py
|
Python
|
scan_models/proconos/__init__.py
|
ssdemajia/ids-backend
|
188af247befa44596f62c660c24b05474d1ba29f
|
[
"MIT"
] | 1
|
2020-05-22T09:52:33.000Z
|
2020-05-22T09:52:33.000Z
|
scan_models/proconos/__init__.py
|
ssdemajia/ids-backend
|
188af247befa44596f62c660c24b05474d1ba29f
|
[
"MIT"
] | 8
|
2021-03-18T21:22:40.000Z
|
2022-03-11T23:32:48.000Z
|
scan_models/proconos/__init__.py
|
ssdemajia/ids-backend
|
188af247befa44596f62c660c24b05474d1ba29f
|
[
"MIT"
] | null | null | null |
from .scan import proconos_resolve, proconos_scan
| 49
| 49
| 0.877551
| 7
| 49
| 5.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d4cc668a815fe00c944470fe10e696f10b2136f2
| 262
|
py
|
Python
|
acoular/demo/__init__.py
|
ishine/acoular
|
4d790517adb38dc012b1f06966262b94f3625358
|
[
"BSD-3-Clause"
] | 294
|
2015-03-24T09:19:12.000Z
|
2022-03-11T02:59:11.000Z
|
acoular/demo/__init__.py
|
ishine/acoular
|
4d790517adb38dc012b1f06966262b94f3625358
|
[
"BSD-3-Clause"
] | 45
|
2015-11-06T15:15:22.000Z
|
2022-03-18T07:05:30.000Z
|
acoular/demo/__init__.py
|
ishine/acoular
|
4d790517adb38dc012b1f06966262b94f3625358
|
[
"BSD-3-Clause"
] | 100
|
2015-05-05T15:18:57.000Z
|
2022-03-21T09:48:05.000Z
|
# coding=UTF-8
#------------------------------------------------------------------------------
# Copyright (c) 2007-2021, Acoular Development Team.
#------------------------------------------------------------------------------
from . import acoular_demo
| 37.428571
| 80
| 0.274809
| 14
| 262
| 5.071429
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0375
| 0.083969
| 262
| 7
| 81
| 37.428571
| 0.258333
| 0.835878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d4d80f876cbbd08e59366a453971f0927304035f
| 239
|
py
|
Python
|
semeval2020/preprocessor/tsne_preprocessor.py
|
DavidRother/semeval2020-task1
|
715f82afb8b282669d59ff610b63714d19db4618
|
[
"MIT"
] | 8
|
2020-12-02T23:18:59.000Z
|
2021-12-19T11:19:28.000Z
|
semeval2020/preprocessor/tsne_preprocessor.py
|
DavidRother/semeval2020-task1
|
715f82afb8b282669d59ff610b63714d19db4618
|
[
"MIT"
] | 1
|
2020-05-24T15:22:26.000Z
|
2020-05-25T08:08:07.000Z
|
semeval2020/preprocessor/tsne_preprocessor.py
|
DavidRother/semeval2020-task1
|
715f82afb8b282669d59ff610b63714d19db4618
|
[
"MIT"
] | null | null | null |
from semeval2020.factory_hub import preprocessor_factory
from sklearn.manifold import TSNE
preprocessor_factory.register("TSNE", TSNE)
preprocessor_factory.register("TSNE_AE", TSNE)
preprocessor_factory.register("TSNE_AE_Language", TSNE)
| 34.142857
| 56
| 0.853556
| 30
| 239
| 6.533333
| 0.4
| 0.387755
| 0.352041
| 0.47449
| 0.556122
| 0.377551
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 0.062762
| 239
| 6
| 57
| 39.833333
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.112971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d4d8d9a0446111d24169cb19abb7db2b7a4e29d1
| 29,954
|
py
|
Python
|
notifications/tests/test_notifications.py
|
konradko/directory-api
|
e9cd05b1deaf575e94352c46ddbd1857d8119fda
|
[
"MIT"
] | 1
|
2021-11-06T12:08:26.000Z
|
2021-11-06T12:08:26.000Z
|
notifications/tests/test_notifications.py
|
konradko/directory-api
|
e9cd05b1deaf575e94352c46ddbd1857d8119fda
|
[
"MIT"
] | null | null | null |
notifications/tests/test_notifications.py
|
konradko/directory-api
|
e9cd05b1deaf575e94352c46ddbd1857d8119fda
|
[
"MIT"
] | null | null | null |
from datetime import timedelta, datetime
from unittest.mock import patch, MagicMock, PropertyMock
import pytest
from freezegun import freeze_time
from django.core import mail
from django.utils import timezone
from buyer.tests.factories import BuyerFactory
from company.tests.factories import CompanyFactory
from notifications import email, notifications
from notifications.models import (
SupplierEmailNotification,
)
from notifications.tests.factories import (
AnonymousUnsubscribeFactory,
AnonymousEmailNotificationFactory,
SupplierEmailNotificationFactory,
)
from supplier.tests.factories import SupplierFactory
LAST_LOGIN_API_METHOD = (
'directory_sso_api_client.user.UserAPIClient.get_last_login')
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_doesnt_send_ver_code_email_when_user_has_input_ver_code(mock_task):
eight_days_ago = timezone.now() - timedelta(days=8)
sixteen_days_ago = timezone.now() - timedelta(days=16)
SupplierFactory(
company__verified_with_code=True,
company__date_verification_letter_sent=eight_days_ago)
SupplierFactory(
company__verified_with_code=True,
company__date_verification_letter_sent=sixteen_days_ago)
supplier_with_reminder = SupplierFactory(
company__verified_with_code=True,
company__date_verification_letter_sent=sixteen_days_ago)
SupplierEmailNotificationFactory(
supplier=supplier_with_reminder,
category='verification_code_not_given',
date_sent=eight_days_ago
)
notifications.verification_code_not_given()
assert mock_task.delay.called is False
# just the one created in setup
assert SupplierEmailNotification.objects.all().count() == 1
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_sends_ver_code_email_when_not_input_for_8_days(mock_task, settings):
expected_subject = email.VerificationWaitingNotification.subject
seven_days_ago = timezone.now() - timedelta(days=7)
eight_days_ago = timezone.now() - timedelta(days=8)
nine_days_ago = timezone.now() - timedelta(days=9)
SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=seven_days_ago)
supplier = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=eight_days_ago)
supplier_with_reminder = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=nine_days_ago)
SupplierEmailNotificationFactory(
supplier=supplier_with_reminder,
category='verification_code_not_given',
date_sent=(timezone.now() - timedelta(days=1))
)
notifications.verification_code_not_given()
assert mock_task.delay.call_count == 1
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['recipient_email'] == supplier.company_email
assert call_args['subject'] == expected_subject
assert call_args['from_email'] == settings.FAB_FROM_EMAIL
assert SupplierEmailNotification.objects.all().count() == 2
@freeze_time()
@pytest.mark.django_db
@patch('notifications.email.VerificationWaitingNotification.zendesk_url',
PropertyMock(return_value='http://help.zendesk.com'))
@patch('core.tasks.send_email')
def test_ver_code_email_has_expected_vars_in_template(mock_task, settings):
settings.VERIFICATION_CODE_URL = 'http://great.gov.uk/verrrrify'
expected_url = 'http://great.gov.uk/verrrrify'
eight_days_ago = timezone.now() - timedelta(days=8)
supplier = SupplierFactory(
company__date_verification_letter_sent=eight_days_ago,
company__verified_with_code=False, date_joined=eight_days_ago)
notifications.verification_code_not_given()
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['from_email'] == settings.FAB_FROM_EMAIL
assert supplier.name in call_args['text_body']
assert supplier.name in call_args['html_body']
assert expected_url in call_args['text_body']
assert expected_url in call_args['html_body']
assert 'http://help.zendesk.com' in call_args['text_body']
assert 'http://help.zendesk.com' in call_args['html_body']
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_sends_ver_code_email_when_not_input_for_16_days(mock_task, settings):
expected_subject = email.VerificationStillWaitingNotification.subject
fifteen_days_ago = timezone.now() - timedelta(days=15)
sixteen_days_ago = timezone.now() - timedelta(days=16)
seventeen_days_ago = timezone.now() - timedelta(days=17)
supplier15 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=fifteen_days_ago)
supplier16 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=sixteen_days_ago)
supplier17 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=seventeen_days_ago)
SupplierEmailNotificationFactory(
supplier=supplier15,
category='verification_code_not_given',
date_sent=(timezone.now() - timedelta(days=7))
)
SupplierEmailNotificationFactory(
supplier=supplier16,
category='verification_code_not_given',
date_sent=(timezone.now() - timedelta(days=8))
)
SupplierEmailNotificationFactory(
supplier=supplier17,
category='verification_code_not_given',
date_sent=(timezone.now() - timedelta(days=9))
)
SupplierEmailNotificationFactory(
supplier=supplier17,
category='verification_code_2nd_email',
date_sent=(timezone.now() - timedelta(days=1))
)
notifications.verification_code_not_given()
assert mock_task.delay.call_count == 1
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['from_email'] == settings.FAB_FROM_EMAIL
assert call_args['recipient_email'] == supplier16.company_email
assert call_args['subject'] == expected_subject
assert SupplierEmailNotification.objects.all().count() == 5
@freeze_time()
@pytest.mark.django_db
@patch('notifications.email.VerificationStillWaitingNotification.zendesk_url',
PropertyMock(return_value='http://help.zendesk.com'))
@patch('core.tasks.send_email')
def test_ver_code_email2_has_expected_vars_in_template(mock_task, settings):
settings.VERIFICATION_CODE_URL = 'http://great.gov.uk/verrrrify'
sixteen_days_ago = timezone.now() - timedelta(days=16)
supplier = SupplierFactory(
company__date_verification_letter_sent=sixteen_days_ago,
company__verified_with_code=False, date_joined=sixteen_days_ago)
notifications.verification_code_not_given()
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['from_email'] == settings.FAB_FROM_EMAIL
assert supplier.name in call_args['text_body']
assert supplier.name in call_args['html_body']
assert 'http://great.gov.uk/verrrrify' in call_args['text_body']
assert 'http://great.gov.uk/verrrrify' in call_args['html_body']
assert 'http://help.zendesk.com' in call_args['text_body']
assert 'http://help.zendesk.com' in call_args['html_body']
@freeze_time('2016-12-16 19:11')
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_sends_ver_code_email_when_8_days_passed_but_not_to_the_minute(
mock_task, settings
):
supplier2_verification_sent = datetime(2016, 12, 8, 23, 59, 59)
supplier1 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=datetime(2016, 12, 8, 0, 0, 1))
supplier2 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=supplier2_verification_sent)
notifications.verification_code_not_given()
assert len(mock_task.delay.call_args_list) == 2
call_args = mock_task.delay.call_args_list
assert call_args[0][1]['recipient_email'] == supplier1.company_email
assert call_args[1][1]['recipient_email'] == supplier2.company_email
@freeze_time('2016-12-16 19:11')
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_sends_ver_code_email_when_16_days_passed_but_not_to_the_minute(
mock_task, settings
):
supplier2_verification_sent = datetime(2016, 11, 30, 23, 59, 59)
supplier1 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=datetime(2016, 11, 30, 0, 0, 1))
supplier2 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=supplier2_verification_sent)
SupplierEmailNotificationFactory(
supplier=supplier1,
category='verification_code_not_given',
date_sent=datetime(2016, 11, 8, 23, 59, 59)
)
SupplierEmailNotificationFactory(
supplier=supplier2,
category='verification_code_not_given',
date_sent=datetime(2016, 11, 8, 23, 59, 59)
)
notifications.verification_code_not_given()
assert mock_task.delay.call_count == 2
assert len(mock_task.delay.call_args_list) == 2
call_args = mock_task.delay.call_args_list
assert call_args[0][1]['recipient_email'] == supplier1.company_email
assert call_args[1][1]['recipient_email'] == supplier2.company_email
assert SupplierEmailNotification.objects.all().count() == 4
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_doesnt_send_ver_code_email_if_email_already_sent(mock_task):
eight_days_ago = timezone.now() - timedelta(days=8)
sixteen_days_ago = timezone.now() - timedelta(days=16)
supplier1 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=eight_days_ago)
supplier2 = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=sixteen_days_ago)
SupplierEmailNotificationFactory(
supplier=supplier1, category='verification_code_not_given')
SupplierEmailNotificationFactory(
supplier=supplier2, category='verification_code_2nd_email')
SupplierEmailNotificationFactory(
supplier=supplier2, category='verification_code_not_given',
date_sent=eight_days_ago)
notifications.verification_code_not_given()
assert mock_task.delay.called is False
# what we created in data setup, no new obj created
assert SupplierEmailNotification.objects.all().count() == 3
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_ver_code_email_uses_settings_for_no_of_days_and_subject_for_email1(
mock_task, settings
):
expected_subject = email.VerificationWaitingNotification.subject
settings.VERIFICATION_CODE_NOT_GIVEN_DAYS = 1
settings.VERIFICATION_CODE_NOT_GIVEN_SUBJECT = 'bla bla'
one_day_ago = timezone.now() - timedelta(days=1)
eight_days_ago = timezone.now() - timedelta(days=8)
SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=eight_days_ago)
supplier = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=one_day_ago)
notifications.verification_code_not_given()
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['recipient_email'] == supplier.company_email
assert call_args['subject'] == expected_subject
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_ver_code_email_uses_settings_for_no_of_days_and_subject_for_email2(
mock_task, settings
):
expected_subject = email.VerificationStillWaitingNotification.subject
settings.VERIFICATION_CODE_NOT_GIVEN_DAYS_2ND_EMAIL = 1
settings.VERIFICATION_CODE_NOT_GIVEN_SUBJECT_2ND_EMAIL = 'bla bla'
one_day_ago = timezone.now() - timedelta(days=1)
sixteen_days_ago = timezone.now() - timedelta(days=16)
SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=sixteen_days_ago)
supplier = SupplierFactory(
company__verified_with_code=False,
company__date_verification_letter_sent=one_day_ago)
SupplierEmailNotificationFactory(
supplier=supplier, category='verification_code_not_given',
date_sent=(timezone.now() - timedelta(days=8)))
mail.outbox = [] # reset after emails sent by signals
notifications.verification_code_not_given()
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['recipient_email'] == supplier.company_email
assert call_args['subject'] == expected_subject
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_sends_ver_code_email_to_expected_users(mock_task):
eight_days_ago = timezone.now() - timedelta(days=8)
twelve_days_ago = timezone.now() - timedelta(days=12)
sixteen_days_ago = timezone.now() - timedelta(days=16)
SupplierFactory.create_batch(
3, company__verified_with_code=True,
company__date_verification_letter_sent=eight_days_ago)
suppliers8 = SupplierFactory.create_batch(
3, company__verified_with_code=False,
company__date_verification_letter_sent=eight_days_ago)
SupplierFactory.create_batch(
3, company__verified_with_code=False,
company__date_verification_letter_sent=twelve_days_ago)
suppliers16 = SupplierFactory.create_batch(
3, company__verified_with_code=False,
company__date_verification_letter_sent=sixteen_days_ago)
SupplierFactory.create_batch(
3, company__verified_with_code=True,
company__date_verification_letter_sent=sixteen_days_ago)
SupplierEmailNotificationFactory(
supplier=suppliers8[2], category='verification_code_not_given')
SupplierEmailNotificationFactory(
supplier=suppliers16[2], category='verification_code_2nd_email')
for supplier in suppliers16:
SupplierEmailNotificationFactory(
supplier=supplier, category='verification_code_not_given',
date_sent=eight_days_ago)
SupplierEmailNotificationFactory(
supplier=suppliers8[1], category='hasnt_logged_in')
SupplierEmailNotificationFactory(
supplier=suppliers16[1], category='hasnt_logged_in')
notifications.verification_code_not_given()
assert mock_task.delay.call_count == 4
call_args = mock_task.delay.call_args_list
assert len(call_args) == 4
assert call_args[0][1]['recipient_email'] == suppliers8[1].company_email
assert call_args[1][1]['recipient_email'] == suppliers8[0].company_email
assert call_args[2][1]['recipient_email'] == suppliers16[1].company_email
assert call_args[3][1]['recipient_email'] == suppliers16[0].company_email
objs = SupplierEmailNotification.objects.all()
assert objs.count() == 11
@freeze_time('2017-01-31 17:13:34')
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_sends_log_in_email_when_not_logged_in_for_30_days(mock_task):
expected_subject = email.HasNotLoggedInRecentlyNotification.subject
suppliers = SupplierFactory.create_batch(3)
mocked_json = [
{'id': suppliers[1].sso_id, 'last_login': '2017-01-01T21:04:39Z'},
]
mocked_api = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=mocked_json)
)
)
with patch(LAST_LOGIN_API_METHOD, mocked_api):
notifications.hasnt_logged_in()
mocked_api.assert_called_once_with(
start=datetime(2017, 1, 1, 0, 0, 0, 0),
end=datetime(2017, 1, 1, 23, 59, 59, 999999)
)
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['recipient_email'] == suppliers[1].company_email
assert call_args['subject'] == expected_subject
assert suppliers[1].name in call_args['text_body']
assert suppliers[1].name in call_args['html_body']
@freeze_time('2017-01-31 17:13:34')
@pytest.mark.django_db
@patch('core.tasks.send_email')
@patch('notifications.email.HasNotLoggedInRecentlyNotification.zendesk_url',
PropertyMock(return_value='http://help.zendesk.com'))
def test_log_in_email_has_expected_vars_in_template(mock_task, settings):
settings.HASNT_LOGGED_IN_URL = 'http://great.gov.uk/looooogin?next=a'
settings.HASNT_LOGGED_IN_UTM = 'utm=1'
expected_url = 'http://great.gov.uk/looooogin?next=a&utm=1'
supplier = SupplierFactory()
mocked_json = [
{'id': supplier.sso_id, 'last_login': '2017-01-01T21:04:39Z'},
]
mocked_api = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=mocked_json)
)
)
mail.outbox = [] # reset after emails sent by signals
with patch(LAST_LOGIN_API_METHOD, mocked_api):
notifications.hasnt_logged_in()
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['from_email'] == settings.FAB_FROM_EMAIL
assert supplier.name in call_args['text_body']
assert supplier.name in call_args['html_body']
assert expected_url in call_args['text_body']
assert expected_url in call_args['html_body']
assert 'http://help.zendesk.com' in call_args['text_body']
assert 'http://help.zendesk.com' in call_args['html_body']
@freeze_time('2016-12-09 12:30:00')
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_doesnt_send_log_in_email_when_api_returns_no_users(mock_task):
mocked_api = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=[])
)
)
with patch(LAST_LOGIN_API_METHOD, mocked_api):
notifications.hasnt_logged_in()
assert mock_task.delay.called is False
@freeze_time('2017-04-01 12:00:00')
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_log_in_email_uses_settings_for_no_of_days_and_subject(
mock_task, settings):
settings.HASNT_LOGGED_IN_DAYS = 1
expected_subject = email.HasNotLoggedInRecentlyNotification.subject
supplier = SupplierFactory()
mocked_json = [
{'id': supplier.sso_id, 'last_login': '2017-03-31T01:54:15Z'},
]
mocked_api = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=mocked_json)
)
)
with patch(LAST_LOGIN_API_METHOD, mocked_api):
notifications.hasnt_logged_in()
mocked_api.assert_called_once_with(
start=datetime(2017, 3, 31, 0, 0, 0, 0),
end=datetime(2017, 3, 31, 23, 59, 59, 999999),
)
call_args = mock_task.delay.call_args_list
assert len(call_args) == 1
assert call_args[0][1]['subject'] == expected_subject
@freeze_time('2017-04-01 12:00:00')
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_doesnt_send_log_in_email_if_log_in_email_already_sent(mock_task):
suppliers = SupplierFactory.create_batch(2)
SupplierEmailNotificationFactory(
supplier=suppliers[0], category='no_case_studies')
SupplierEmailNotificationFactory(
supplier=suppliers[1], category='hasnt_logged_in')
mocked_json = [
{'id': suppliers[0].sso_id, 'last_login': '2017-03-02T02:14:15Z'},
{'id': suppliers[1].sso_id, 'last_login': '2017-03-02T13:18:15Z'},
]
mocked_api = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=mocked_json)
)
)
with patch(LAST_LOGIN_API_METHOD, mocked_api):
notifications.hasnt_logged_in()
assert mock_task.delay.call_count == 1
call_args = mock_task.delay.call_args_list
assert len(call_args) == 1
assert call_args[0][1]['recipient_email'] == suppliers[0].company_email
assert SupplierEmailNotification.objects.all().count() == 3
@freeze_time('2017-04-01 12:00:00')
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_sends_log_in_email_to_expected_users(mock_task, settings):
suppliers = SupplierFactory.create_batch(4)
mocked_json = [
{'id': suppliers[0].sso_id, 'last_login': '2017-03-02T02:14:15Z'},
{'id': suppliers[1].sso_id, 'last_login': '2017-03-02T13:18:15Z'},
{'id': suppliers[2].sso_id, 'last_login': '2017-03-02T15:43:15Z'},
]
mocked_api = MagicMock(
return_value=MagicMock(
json=MagicMock(return_value=mocked_json)
)
)
SupplierEmailNotificationFactory(
supplier=suppliers[1], category='no_case_studies')
SupplierEmailNotificationFactory(
supplier=suppliers[0], category='hasnt_logged_in')
with patch(LAST_LOGIN_API_METHOD, mocked_api):
notifications.hasnt_logged_in()
assert mock_task.delay.call_count == 2
call_args = mock_task.delay.call_args_list
assert len(call_args) == 2
assert call_args[0][1]['recipient_email'] == suppliers[1].company_email
assert call_args[1][1]['recipient_email'] == suppliers[2].company_email
expected_url = settings.FAB_NOTIFICATIONS_UNSUBSCRIBE_URL
assert expected_url in call_args[0][1]['text_body']
objs = SupplierEmailNotification.objects.all()
assert objs.count() == 4
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_new_companies_in_sector(mock_task, settings):
settings.NEW_COMPANIES_IN_SECTOR_FREQUENCY_DAYS = 3
expected_subject = email.NewCompaniesInSectorNotification.subject
days_ago_three = datetime.utcnow() - timedelta(days=3)
days_ago_four = datetime.utcnow() - timedelta(days=4)
buyer_one = BuyerFactory.create(sector='AEROSPACE')
buyer_two = BuyerFactory.create(sector='AEROSPACE')
buyer_three = BuyerFactory.create(sector='CONSTRUCTION')
company_one = CompanyFactory(
sectors=['AEROSPACE'], date_published=days_ago_three,
)
company_two = CompanyFactory(
sectors=['AEROSPACE'], date_published=days_ago_four,
)
company_three = CompanyFactory(
sectors=['CONSTRUCTION'], date_published=days_ago_three,
)
notifications.new_companies_in_sector()
call_args_list = mock_task.delay.call_args_list
assert len(call_args_list) == 3
email_one = list(
filter(
lambda x: x[1]['recipient_email'] == buyer_one.email,
call_args_list
)
)[0][1]
email_two = list(
filter(
lambda x: x[1]['recipient_email'] == buyer_two.email,
call_args_list
)
)[0][1]
email_three = list(
filter(
lambda x: x[1]['recipient_email'] == buyer_three.email,
call_args_list
)
)[0][1]
assert email_one['recipient_email'] == buyer_one.email
assert email_one['subject'] == expected_subject
assert company_one.name in email_one['text_body']
assert company_two.name not in email_one['text_body']
assert email_two['recipient_email'] == buyer_two.email
assert email_two['subject'] == expected_subject
assert company_one.name in email_two['text_body']
assert company_two.name not in email_two['text_body']
assert company_three.name not in email_two['text_body']
assert email_three['recipient_email'] == buyer_three.email
assert email_three['subject'] == expected_subject
assert company_one.name not in email_three['text_body']
assert company_two.name not in email_three['text_body']
assert company_three.name in email_three['text_body']
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_new_companies_in_sector_exclude_unsbscribed(mock_task, settings):
settings.NEW_COMPANIES_IN_SECTOR_FREQUENCY_DAYS = 3
settings.NEW_COMPANIES_IN_SECTOR_SUBJECT = 'test subject'
days_ago_three = datetime.utcnow() - timedelta(days=3)
buyer_one = BuyerFactory.create(sector='AEROSPACE')
buyer_two = BuyerFactory.create(sector='AEROSPACE')
AnonymousUnsubscribeFactory(email=buyer_two.email)
CompanyFactory(sectors=['AEROSPACE'], date_published=days_ago_three)
notifications.new_companies_in_sector()
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['recipient_email'] == buyer_one.email
assert call_args['from_email'] == settings.FAS_FROM_EMAIL
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_new_companies_in_sector_exclude_suppliers_without_companies(
mock_task, settings):
settings.NEW_COMPANIES_IN_SECTOR_FREQUENCY_DAYS = 3
settings.NEW_COMPANIES_IN_SECTOR_SUBJECT = 'test subject'
BuyerFactory.create(sector='AEROSPACE')
notifications.new_companies_in_sector()
assert mock_task.delay.called is False
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_new_companies_in_sector_exclude_already_sent_recently(
mock_task, settings):
settings.NEW_COMPANIES_IN_SECTOR_FREQUENCY_DAYS = 3
settings.NEW_COMPANIES_IN_SECTOR_SUBJECT = 'test subject'
days_ago_three = datetime.utcnow() - timedelta(days=3)
buyer_one = BuyerFactory.create(sector='AEROSPACE')
buyer_two = BuyerFactory.create(sector='AEROSPACE')
notification = AnonymousEmailNotificationFactory(email=buyer_two.email)
notification.date_sent = days_ago_three
notification.save()
CompanyFactory(sectors=['AEROSPACE'], date_published=days_ago_three)
notifications.new_companies_in_sector()
assert len(mock_task.delay.call_args_list) == 1
assert mock_task.delay.call_args[1]['recipient_email'] == buyer_one.email
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_new_companies_in_sector_include_already_sent_long_time_ago(
mock_task, settings):
settings.NEW_COMPANIES_IN_SECTOR_FREQUENCY_DAYS = 3
settings.NEW_COMPANIES_IN_SECTOR_SUBJECT = 'test subject'
days_ago_three = datetime.utcnow() - timedelta(days=3)
days_ago_four = datetime.utcnow() - timedelta(days=4)
buyer_one = BuyerFactory.create(sector='AEROSPACE')
notification = AnonymousEmailNotificationFactory(email=buyer_one.email)
notification.date_sent = days_ago_four
notification.save()
CompanyFactory(sectors=['AEROSPACE'], date_published=days_ago_three)
notifications.new_companies_in_sector()
assert len(mock_task.delay.call_args_list) == 1
assert mock_task.delay.call_args[1]['recipient_email'] == buyer_one.email
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_new_companies_in_sector_records_notification(mock_task, settings):
settings.NEW_COMPANIES_IN_SECTOR_FREQUENCY_DAYS = 3
days_ago_three = datetime.utcnow() - timedelta(days=3)
buyer_one = BuyerFactory.create(sector='AEROSPACE')
CompanyFactory(sectors=['AEROSPACE'], date_published=days_ago_three)
notifications.new_companies_in_sector()
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['recipient_email'] == buyer_one.email
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_new_companies_in_sector_single_email_per_buyer(mock_task, settings):
settings.NEW_COMPANIES_IN_SECTOR_FREQUENCY_DAYS = 3
days_ago_three = datetime.utcnow() - timedelta(days=3)
buyer = BuyerFactory.create(sector='AEROSPACE', email='jim@example.com')
BuyerFactory.create(sector='AIRPORTS', email='jim@example.com')
company_one = CompanyFactory(
sectors=['AEROSPACE'], date_published=days_ago_three
)
company_two = CompanyFactory(
sectors=['AIRPORTS'], date_published=days_ago_three
)
notifications.new_companies_in_sector()
assert len(mock_task.delay.call_args_list) == 1
assert mock_task.delay.call_args[1]['recipient_email'] == buyer.email
assert company_one.name in mock_task.delay.call_args[1]['text_body']
assert company_two.name in mock_task.delay.call_args[1]['text_body']
@freeze_time()
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_new_companies_in_sector_company_multiple_sectors(mock_task, settings):
settings.NEW_COMPANIES_IN_SECTOR_FREQUENCY_DAYS = 3
days_ago_three = datetime.utcnow() - timedelta(days=3)
BuyerFactory.create(sector='AEROSPACE', email='jim@example.com')
BuyerFactory.create(sector='AIRPORTS', email='jim@example.com')
company_one = CompanyFactory(
sectors=['AEROSPACE', 'AIRPORTS'], date_published=days_ago_three
)
company_two = CompanyFactory(
sectors=['AIRPORTS'], date_published=days_ago_three
)
notifications.new_companies_in_sector()
unsubscribe_url = (
'http://supplier.trade.great:8005/unsubscribe?email='
'jim%40example.com%3A2Kkc4EAEos2htrZXeLj73CSVBWA'
)
assert len(mock_task.delay.call_args_list) == 1
assert company_one.name in mock_task.delay.call_args[1]['text_body']
assert company_two.name in mock_task.delay.call_args[1]['text_body']
assert unsubscribe_url in mock_task.delay.call_args[1]['text_body']
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_supplier_unsubscribed(mock_task):
supplier = SupplierFactory()
notifications.supplier_unsubscribed(supplier)
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['recipient_email'] == supplier.company_email
assert supplier.name in call_args['text_body']
@pytest.mark.django_db
@patch('core.tasks.send_email')
def test_anonymous_unsubscribed(mock_task):
notifications.anonymous_unsubscribed(
recipient_email='jim@example.com'
)
assert len(mock_task.delay.call_args_list) == 1
call_args = mock_task.delay.call_args[1]
assert call_args['recipient_email'] == 'jim@example.com'
| 38.255428
| 79
| 0.745443
| 3,865
| 29,954
| 5.397671
| 0.065977
| 0.048318
| 0.034273
| 0.041559
| 0.881411
| 0.84618
| 0.808743
| 0.761384
| 0.712731
| 0.668201
| 0
| 0.024174
| 0.153435
| 29,954
| 782
| 80
| 38.304348
| 0.798525
| 0.004974
| 0
| 0.649311
| 0
| 0
| 0.117852
| 0.042752
| 0
| 0
| 0
| 0
| 0.182236
| 1
| 0.041348
| false
| 0.003063
| 0.018377
| 0
| 0.059724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4de281f3061eece69c0327324f7a63c2dd087c7
| 10,037
|
py
|
Python
|
understat/understat.py
|
arkjinli/understat
|
09d8d839f07d433e5eefde2592f570986661ac93
|
[
"MIT"
] | null | null | null |
understat/understat.py
|
arkjinli/understat
|
09d8d839f07d433e5eefde2592f570986661ac93
|
[
"MIT"
] | null | null | null |
understat/understat.py
|
arkjinli/understat
|
09d8d839f07d433e5eefde2592f570986661ac93
|
[
"MIT"
] | null | null | null |
from understat.constants import BASE_URL, LEAGUE_URL, PLAYER_URL, TEAM_URL
from understat.utils import (filter_by_positions, filter_data, get_data,
to_league_name)
class Understat():
def __init__(self, session):
self.session = session
async def get_stats(self, options=None, **kwargs):
"""Returns a list containing stats of every league, grouped by month.
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of dictionaries.
:rtype: list
"""
stats = await get_data(self.session, BASE_URL, "statData")
if options:
kwargs = options
filtered_data = filter_data(stats, kwargs)
return filtered_data
async def get_teams(self, league_name, season, options=None, **kwargs):
"""Returns a list containing information about all the teams in
the given league in the given season.
:param league_name: The league's name.
:type league_name: str
:param season: The season.
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:type season: str or int
:return: A list of the league's table as seen on Understat's
league overview.
:rtype: list
"""
url = LEAGUE_URL.format(to_league_name(league_name), season)
teams_data = await get_data(self.session, url, "teamsData")
if options:
kwargs = options
filtered_data = filter_data(list(teams_data.values()), kwargs)
return filtered_data
async def get_league_players(
self, league_name, season, options=None, **kwargs):
"""Returns a list containing information about all the players in
the given league in the given season.
:param league_name: The league's name.
:type league_name: str
:param season: The season.
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:type season: str or int
:return: A list of the players as seen on Understat's league overview.
:rtype: list
"""
url = LEAGUE_URL.format(to_league_name(league_name), season)
players_data = await get_data(self.session, url, "playersData")
if options:
kwargs = options
filtered_data = filter_data(players_data, kwargs)
return filtered_data
async def get_league_results(
self, league_name, season, options=None, **kwargs):
"""Returns a list containing information about all the results
(matches) played by the teams in the given league in the given season.
:param league_name: The league's name.
:type league_name: str
:param season: The season.
:type season: str or int
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: A list of the results as seen on Understat's league overview.
:rtype: list
"""
url = LEAGUE_URL.format(to_league_name(league_name), season)
dates_data = await get_data(self.session, url, "datesData")
results = [r for r in dates_data if r["isResult"]]
if options:
kwargs = options
filtered_data = filter_data(results, kwargs)
return filtered_data
async def get_league_fixtures(
self, league_name, season, options=None, **kwargs):
"""Returns a list containing information about all the upcoming
fixtures of the given league in the given season.
:param league_name: The league's name.
:type league_name: str
:param season: The season.
:type season: str or int
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: A list of the fixtures as seen on Understat's league overview.
:rtype: list
"""
url = LEAGUE_URL.format(to_league_name(league_name), season)
dates_data = await get_data(self.session, url, "datesData")
fixtures = [f for f in dates_data if not f["isResult"]]
if options:
kwargs = options
filtered_data = filter_data(fixtures, kwargs)
return filtered_data
async def get_player_shots(self, player_id, options=None, **kwargs):
"""Returns the player with the given ID's shot data.
:param player_id: The player's Understat ID.
:type player_id: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the player's shot data.
:rtype: list
"""
url = PLAYER_URL.format(player_id)
shots_data = await get_data(self.session, url, "shotsData")
if options:
kwargs = options
filtered_data = filter_data(shots_data, kwargs)
return filtered_data
async def get_player_matches(self, player_id, options=None, **kwargs):
"""Returns the player with the given ID's matches data.
:param player_id: The player's Understat ID.
:type player_id: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the player's matches data.
:rtype: list
"""
url = PLAYER_URL.format(player_id)
matches_data = await get_data(self.session, url, "matchesData")
if options:
kwargs = options
filtered_data = filter_data(matches_data, kwargs)
return filtered_data
async def get_player_stats(self, player_id, positions=None):
"""Returns the player with the given ID's min / max stats, per
position(s).
:param player_id: The player's Understat ID.
:type player_id: int or str
:param positions: Positions to filter the data by, defaults to None.
:param positions: list, optional
:return: List of the player's stats per position.
:rtype: list
"""
url = PLAYER_URL.format(player_id)
player_stats = await get_data(self.session, url, "minMaxPlayerStats")
player_stats = filter_by_positions(player_stats, positions)
return player_stats
async def get_player_grouped_stats(self, player_id):
"""Returns the player with the given ID's grouped stats (as seen at
the top of a player's page).
:param player_id: The player's Understat ID.
:type player_id: int or str
:return: Dictionary of the player's grouped stats.
:rtype: dict
"""
url = PLAYER_URL.format(player_id)
player_stats = await get_data(self.session, url, "groupsData")
return player_stats
async def get_team_stats(self, team_name, season):
"""Returns a team's stats, as seen on their page on Understat, in the
given season.
:param team_name: A team's name, e.g. Manchester United.
:type team_name: str
:param season: A season / year, e.g. 2018.
:type season: int or str
:return: A dictionary containing a team's stats.
:rtype: dict
"""
url = TEAM_URL.format(team_name.replace(" ", "_"), season)
team_stats = await get_data(self.session, url, "statisticsData")
return team_stats
async def get_team_results(
self, team_name, season, options=None, **kwargs):
"""Returns a team's results in the given season.
:param team_name: A team's name.
:type team_name: str
:param season: The season.
:type season: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the team's results in the given season.
:rtype: list
"""
url = TEAM_URL.format(team_name.replace(" ", "_"), season)
dates_data = await get_data(self.session, url, "datesData")
results = [r for r in dates_data if r["isResult"]]
if options:
kwargs = options
filtered_data = filter_data(results, kwargs)
return filtered_data
async def get_team_fixtures(
self, team_name, season, options=None, **kwargs):
"""Returns a team's upcoming fixtures in the given season.
:param team_name: A team's name.
:type team_name: str
:param season: The season.
:type season: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the team's upcoming fixtures in the given season.
:rtype: list
"""
url = TEAM_URL.format(team_name.replace(" ", "_"), season)
dates_data = await get_data(self.session, url, "datesData")
fixtures = [f for f in dates_data if not f["isResult"]]
if options:
kwargs = options
filtered_data = filter_data(fixtures, kwargs)
return filtered_data
async def get_team_players(
self, team_name, season, options=None, **kwargs):
"""Returns a team's player statistics in the given season.
:param team_name: A team's name.
:type team_name: str
:param season: The season.
:type season: int or str
:param options: Options to filter the data by, defaults to None.
:param options: dict, optional
:return: List of the team's players' statistics in the given season.
:rtype: list
"""
url = TEAM_URL.format(team_name.replace(" ", "_"), season)
players_data = await get_data(self.session, url, "playersData")
if options:
kwargs = options
filtered_data = filter_data(players_data, kwargs)
return filtered_data
| 34.255973
| 79
| 0.627578
| 1,318
| 10,037
| 4.640364
| 0.081184
| 0.034336
| 0.022891
| 0.034009
| 0.836494
| 0.826357
| 0.808371
| 0.769294
| 0.704872
| 0.671517
| 0
| 0.000562
| 0.291422
| 10,037
| 292
| 80
| 34.373288
| 0.859393
| 0
| 0
| 0.673469
| 0
| 0
| 0.036229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010204
| false
| 0
| 0.020408
| 0
| 0.173469
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
078e2fd420e9fce3ec8b1d26c6ee8124b1fe1ab9
| 33
|
py
|
Python
|
cupy_alias/cudnn.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 142
|
2018-06-07T07:43:10.000Z
|
2021-10-30T21:06:32.000Z
|
cupy_alias/cudnn.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 282
|
2018-06-07T08:35:03.000Z
|
2021-03-31T03:14:32.000Z
|
cupy_alias/cudnn.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 19
|
2018-06-19T11:07:53.000Z
|
2021-05-13T20:57:04.000Z
|
from clpy.cudnn import * # NOQA
| 16.5
| 32
| 0.69697
| 5
| 33
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 33
| 1
| 33
| 33
| 0.884615
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
079660efc9c7224851ad1cac13a799845dafdbcf
| 189
|
py
|
Python
|
common/src/autogluon/common/__init__.py
|
daobook/autogluon
|
7309118f2ab1c9519f25acf61a283a95af95842b
|
[
"Apache-2.0"
] | 1
|
2022-02-19T13:22:26.000Z
|
2022-02-19T13:22:26.000Z
|
common/src/autogluon/common/__init__.py
|
daobook/autogluon
|
7309118f2ab1c9519f25acf61a283a95af95842b
|
[
"Apache-2.0"
] | 3
|
2021-12-30T20:28:01.000Z
|
2022-02-09T20:19:21.000Z
|
common/src/autogluon/common/__init__.py
|
engsarah365/autogluon
|
bdbaac2d13d14d075b7aa751561f0bbd39927789
|
[
"Apache-2.0"
] | null | null | null |
from .version import __version__
# Fixes logger in Kaggle to show logs in notebook.
from .utils.log_utils import fix_logging_if_kaggle as __fix_logging_if_kaggle
__fix_logging_if_kaggle()
| 31.5
| 77
| 0.846561
| 31
| 189
| 4.580645
| 0.548387
| 0.211268
| 0.253521
| 0.380282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116402
| 189
| 5
| 78
| 37.8
| 0.850299
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
07a42b5dff5707142dd908ead0681eb0d7c4227b
| 11,372
|
py
|
Python
|
tests/cupy_tests/statics_tests/test_meanvar.py
|
casheera/cupy
|
c670b6f6f1282665c9985180c10f498d8591f909
|
[
"MIT"
] | null | null | null |
tests/cupy_tests/statics_tests/test_meanvar.py
|
casheera/cupy
|
c670b6f6f1282665c9985180c10f498d8591f909
|
[
"MIT"
] | null | null | null |
tests/cupy_tests/statics_tests/test_meanvar.py
|
casheera/cupy
|
c670b6f6f1282665c9985180c10f498d8591f909
|
[
"MIT"
] | null | null | null |
import unittest
import pytest
import numpy
import cupy
from cupy import testing
ignore_runtime_warnings = pytest.mark.filterwarnings(
"ignore", category=RuntimeWarning)
@testing.gpu
class TestAverage(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_average_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.average(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_average_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.average(a, axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_average_weights(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
w = testing.shaped_arange((2, 3), xp, dtype)
return xp.average(a, weights=w)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_average_axis_weights(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
w = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.average(a, axis=2, weights=w)
def check_returned(self, a, axis, weights):
average_cpu, sum_weights_cpu = numpy.average(
a, axis, weights, returned=True)
result = cupy.average(
cupy.asarray(a), axis, weights, returned=True)
self.assertTrue(isinstance(result, tuple))
self.assertEqual(len(result), 2)
average_gpu, sum_weights_gpu = result
testing.assert_allclose(average_cpu, average_gpu)
testing.assert_allclose(sum_weights_cpu, sum_weights_gpu)
@testing.for_all_dtypes()
def test_returned(self, dtype):
a = testing.shaped_arange((2, 3), numpy, dtype)
w = testing.shaped_arange((2, 3), numpy, dtype)
self.check_returned(a, axis=1, weights=None)
self.check_returned(a, axis=None, weights=w)
self.check_returned(a, axis=1, weights=w)
@testing.gpu
class TestMeanVar(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_mean_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.mean()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_external_mean_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.mean(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_mean_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.mean(axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_external_mean_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.mean(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_var_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.var()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_var_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.var(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_var_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.var(ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_var_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.var(a, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_var_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.var(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_var_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.var(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_var_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.var(axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_var_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.var(a, axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_std_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.std()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_std_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.std(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_std_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.std(ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_std_all_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.std(a, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_std_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.std(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_std_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.std(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_std_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.std(axis=1, ddof=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_std_axis_ddof(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.std(a, axis=1, ddof=1)
@testing.parameterize(
*testing.product({
'shape': [(3, 4), (30, 40, 50)],
'axis': [None, 0, 1],
'keepdims': [True, False]
})
)
@testing.gpu
class TestNanMean(unittest.TestCase):
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanmean_without_nan(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
return xp.nanmean(a, axis=self.axis, keepdims=self.keepdims)
@ignore_runtime_warnings
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanmean_with_nan_float(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if a.dtype.kind not in 'biu':
a[1, :] = xp.nan
a[:, 3] = xp.nan
return xp.nanmean(a, axis=self.axis, keepdims=self.keepdims)
@testing.gpu
class TestNanMeanAdditional(unittest.TestCase):
@ignore_runtime_warnings
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanmean_out(self, xp, dtype):
a = testing.shaped_random((10, 20, 30), xp, dtype)
z = xp.zeros((20, 30), dtype=dtype)
if a.dtype.kind not in 'biu':
a[1, :] = xp.nan
a[:, 3] = xp.nan
xp.nanmean(a, axis=0, out=z)
return z
@testing.slow
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanmean_huge(self, xp, dtype):
a = testing.shaped_random((1024, 512), xp, dtype)
if a.dtype.kind not in 'biu':
a[:512, :256] = xp.nan
return xp.nanmean(a, axis=1)
@testing.numpy_cupy_allclose(rtol=1e-4)
def test_nanmean_float16(self, xp):
a = testing.shaped_arange((2, 3), xp, numpy.float16)
a[0][0] = xp.nan
return xp.nanmean(a)
@ignore_runtime_warnings
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanmean_all_nan(self, xp):
a = xp.zeros((3, 4))
a[:] = xp.nan
return xp.nanmean(a)
@testing.parameterize(
*testing.product({
'shape': [(3, 4), (4, 3, 5)],
'axis': [None, 0, 1],
'keepdims': [True, False],
'ddof': [0, 1]
}))
@testing.gpu
class TestNanVarStd(unittest.TestCase):
@ignore_runtime_warnings
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanvar(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype=dtype)
if a.dtype.kind not in 'biu':
a[0, :] = xp.nan
return xp.nanvar(
a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims)
@ignore_runtime_warnings
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanstd(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype=dtype)
if a.dtype.kind not in 'biu':
a[0, :] = xp.nan
return xp.nanstd(
a, axis=self.axis, ddof=self.ddof, keepdims=self.keepdims)
@testing.gpu
class TestNanVarStdAdditional(unittest.TestCase):
@ignore_runtime_warnings
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanvar_out(self, xp, dtype):
a = testing.shaped_random((10, 20, 30), xp, dtype)
z = xp.zeros((20, 30))
if a.dtype.kind not in 'biu':
a[1, :] = xp.nan
a[:, 3] = xp.nan
xp.nanvar(a, axis=0, out=z)
return z
@testing.slow
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanvar_huge(self, xp, dtype):
a = testing.shaped_random((1024, 512), xp, dtype)
if a.dtype.kind not in 'biu':
a[:512, :256] = xp.nan
return xp.nanvar(a, axis=1)
@testing.numpy_cupy_allclose(rtol=1e-4)
def test_nanvar_float16(self, xp):
a = testing.shaped_arange((4, 5), xp, numpy.float16)
a[0][0] = xp.nan
return xp.nanvar(a, axis=0)
@ignore_runtime_warnings
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanstd_out(self, xp, dtype):
a = testing.shaped_random((10, 20, 30), xp, dtype)
z = xp.zeros((20, 30))
if a.dtype.kind not in 'biu':
a[1, :] = xp.nan
a[:, 3] = xp.nan
xp.nanstd(a, axis=0, out=z)
return z
@testing.slow
@testing.for_all_dtypes(no_float16=True, no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nanstd_huge(self, xp, dtype):
a = testing.shaped_random((1024, 512), xp, dtype)
if a.dtype.kind not in 'biu':
a[:512, :256] = xp.nan
return xp.nanstd(a, axis=1)
@testing.numpy_cupy_allclose(rtol=1e-4)
def test_nanstd_float16(self, xp):
a = testing.shaped_arange((4, 5), xp, numpy.float16)
a[0][0] = xp.nan
return xp.nanstd(a, axis=1)
| 32.124294
| 70
| 0.639729
| 1,676
| 11,372
| 4.128878
| 0.062649
| 0.070809
| 0.087861
| 0.131792
| 0.886127
| 0.873844
| 0.858815
| 0.813873
| 0.807659
| 0.807659
| 0
| 0.030916
| 0.226345
| 11,372
| 353
| 71
| 32.215297
| 0.755626
| 0
| 0
| 0.597173
| 0
| 0
| 0.006243
| 0
| 0
| 0
| 0
| 0
| 0.014134
| 1
| 0.141343
| false
| 0
| 0.017668
| 0
| 0.318021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
07c0a8343acea07f85f14615bb56b8fa7617c907
| 9,391
|
py
|
Python
|
CA-project/combined_logger_rainbow_critical.py
|
simewu/openssl
|
b4b95c47e8efc25630bd3db91aa1977b591317a1
|
[
"OpenSSL"
] | null | null | null |
CA-project/combined_logger_rainbow_critical.py
|
simewu/openssl
|
b4b95c47e8efc25630bd3db91aa1977b591317a1
|
[
"OpenSSL"
] | null | null | null |
CA-project/combined_logger_rainbow_critical.py
|
simewu/openssl
|
b4b95c47e8efc25630bd3db91aa1977b591317a1
|
[
"OpenSSL"
] | null | null | null |
# A separate file is saved for every individual algorithm, but samples themselves are overwritten
import os
import time
import datetime
num_samples = int(input("Enter number of times to loop: \n"))
openssl_dir = os.path.expanduser('~/openssl')
def initCert(algorithm, bits = ''):
if algorithm == 'rsa':
myCmd = f'{openssl_dir}/apps/openssl req -x509 -new -newkey rsa:{bits} -keyout key_CA_{algorithm}{bits}.key -out key_CA_{algorithm}{bits}.pem -pkeyopt rsa_keygen_bits:{bits} -nodes -subj "/CN=oqstest CA" -days 365 -config {openssl_dir}/apps/openssl.cnf > /dev/null 2>&1'
os.system(myCmd)
if algorithm == 'secp':
myCmd =f'{openssl_dir}/apps/openssl ecparam -out key_CA_{algorithm}{bits}.key -name {algorithm}{bits} -genkey > /dev/null 2>&1'
os.system(myCmd)
myCmd =f'{openssl_dir}/apps/openssl req -new -key key_CA_{algorithm}{bits}.key -x509 -nodes -days 365 -out key_CA_{algorithm}{bits}.pem -nodes -subj "/CN=oqstest CA" -days 365 -config {openssl_dir}/apps/openssl.cnf > /dev/null 2>&1'
os.system(myCmd)
else:
myCmd = f'{openssl_dir}/apps/openssl req -x509 -new -newkey {algorithm} -keyout key_CA_{algorithm}{bits}.key -out key_CA_{algorithm}{bits}.pem -nodes -subj "/CN=oqstest CA" -days 365 -config {openssl_dir}/apps/openssl.cnf > /dev/null 2>&1'
os.system(myCmd)
def genKey(algorithm, num_samples, bits = ''):
if algorithm == 'rsa':
myCmd = f'{openssl_dir}/apps/openssl genpkey -algorithm rsa -out {openssl_dir}/CA-project/csr/key_srv_{algorithm}{bits}.key -pkeyopt rsa_keygen_bits:{bits} > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
if algorithm == 'secp':
myCmd =f'{openssl_dir}/apps/openssl ecparam -out key_srv_{algorithm}{bits}.key -name secp{bits} -genkey > /dev/null 2>&1'
for i in range(num_samples):
os.system(myCmd)
else:
myCmd = f'{openssl_dir}/apps/openssl genpkey -algorithm {algorithm} -out {openssl_dir}/CA-project/csr/key_srv_{algorithm}{bits}.key > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
def genCSR(algorithm, num_samples, bits = ''):
if algorithm == 'rsa':
myCmd = f'{openssl_dir}/apps/openssl req -new -key {openssl_dir}/CA-project/csr/key_srv_{algorithm}{bits}.key -out {openssl_dir}/CA-project/csr/key_srv_{algorithm}{bits}.csr -nodes -pkeyopt rsa_keygen_bits:{bits} -subj \'/CN=oqstest server\' -config {openssl_dir}/apps/openssl.cnf > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
if algorithm == 'secp':
myCmd =f'{openssl_dir}/apps/openssl req -newkey ec:key_CA_{algorithm}{bits}.key -keyout ec_PRIVATEKEY.key -out key_srv_{algorithm}{bits}.csr -nodes -subj \'/CN=oqstest server\' -config {openssl_dir}/apps/openssl.cnf > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
else:
myCmd = f'{openssl_dir}/apps/openssl req -new -key {openssl_dir}/CA-project/csr/key_srv_{algorithm}{bits}.key -out {openssl_dir}/CA-project/csr/key_srv_{algorithm}{bits}.csr -nodes -subj \'/CN=oqstest server\' -config {openssl_dir}/apps/openssl.cnf > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
def genCert(algorithm, num_samples, bits =''):
if algorithm == 'secp':
myCmd=f'{openssl_dir}/apps/openssl x509 -req -in key_srv_{algorithm}{bits}.csr -out key_crt_{algorithm}{bits}.pem -CA key_CA_{algorithm}{bits}.pem -CAkey key_CA_{algorithm}{bits}.key -CAcreateserial -days 365 > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
else:
myCmd = f'{openssl_dir}/apps/openssl x509 -req -in {openssl_dir}/CA-project/csr/key_srv_{algorithm}{bits}.csr -out {openssl_dir}/CA-project/crt/key_crt_{algorithm}{bits}.pem -CA key_CA_{algorithm}{bits}.pem -CAkey key_CA_{algorithm}{bits}.key -CAcreateserial -days 365 > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
def certVerify(algorithm, num_samples, bits = ''):
if algorithm == 'secp':
myCmd = f'{openssl_dir}/apps/openssl verify -CAfile key_CA_{algorithm}{bits}.pem key_crt_{algorithm}{bits}.pem key_crt_{algorithm}{bits}.pem > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
else:
myCmd = f'{openssl_dir}/apps/openssl verify -CAfile {openssl_dir}/CA-project/key_CA_{algorithm}{bits}.pem {openssl_dir}/CA-project/crt/key_crt_{algorithm}{bits}.pem {openssl_dir}/CA-project/crt/key_crt_{algorithm}{bits}.pem > /dev/null 2>&1'
for i in range (num_samples):
os.system(myCmd)
algorithms = [
'rainbowVclassic','p521_rainbowVcompressed'
]
algorithms_in_english = [
'Rainbow V Classic','P521 + Rainbow V Compressed'
]
#algorithms = [
# 'rsa','rsa3072_rainbowIcompressed','rsa3072_dilithium2'
#]
#algorithms_in_english = [
# 'RSA','RSA 3072 + Rainbow I Compressed','RSA 3072 + Dilithium2'
#]
def header():
line = 'Timestamp,'
line += 'Timestamp (Seconds),'
line += 'Algorithm,'
line += 'Algorithm (human readable),'
line += 'Avg Key Gen Time (ms),'
line += 'Avg Cert Signing Request Time (ms),'
line += 'Avg Cert Gen Time (ms),'
line += 'Avg Cert Verifying Time (ms),'
return line
def run(file):
rsa_bits_array = [2048, 3072, 4096];
ecdsa_bits_array = ['256k1','384r1','521r1'];
for i, algorithm in enumerate(algorithms):
algorithm_in_english = algorithms_in_english[i]
print(f'Starting {algorithm}...')
time.sleep(0.1)
if algorithm == 'rsa':
for bits in rsa_bits_array:
#print(f'Starting {algorithm} {bits}...')
initCert(algorithm, bits)
t1 = time.time()
genKey(algorithm, num_samples, bits)
t2 = time.time()
avg_key_gen_time = (t2 - t1) / num_samples * 1000
#print('Key generation time: ')
#print(avg_key_gen_time)
time.sleep(0.1)
t1 = time.time()
genCSR(algorithm, num_samples, bits)
t2 = time.time()
avg_cert_signing_request_time = (t2 - t1) / num_samples * 1000
#print('CSR generation time')
#print(avg_cert_signing_request_time)
time.sleep(0.1)
t1 = time.time()
genCert(algorithm, num_samples, bits)
t2 = time.time()
avg_cert_gen_time = (t2 - t1) / num_samples * 1000
#print('Certificate generation time')
#print(avg_cert_gen_time)
time.sleep(0.1)
t1 = time.time()
certVerify(algorithm, num_samples, bits)
t2 = time.time()
avg_cert_verify_time = (t2 - t1) / num_samples * 1000
#print('Certificate verifying time')
#print(avg_cert_verify_time)
time.sleep(0.1)
now = datetime.datetime.now()
time_end = (now - datetime.datetime(1970, 1, 1)).total_seconds()
line = f'{now},{time_end},{algorithm} {bits},{algorithm_in_english} {bits},{avg_key_gen_time},{avg_cert_signing_request_time},{avg_cert_gen_time},{avg_cert_verify_time},'
file.write(line + '\n')
elif algorithm == 'secp':
for bits in ecdsa_bits_array:
#print(f'Starting {algorithm} {bits}...')
initCert(algorithm, bits)
t1 = time.time()
genKey(algorithm, num_samples, bits)
t2 = time.time()
avg_key_gen_time = (t2 - t1) / num_samples * 1000
#print('Key generation time: ')
#print(avg_key_gen_time)
time.sleep(0.1)
t1 = time.time()
genCSR(algorithm, num_samples, bits)
t2 = time.time()
avg_cert_signing_request_time = (t2 - t1) / num_samples * 1000
#print('CSR generation time')
#print(avg_cert_signing_request_time)
time.sleep(0.1)
t1 = time.time()
genCert(algorithm, num_samples, bits)
t2 = time.time()
avg_cert_gen_time = (t2 - t1) / num_samples * 1000
#print('Certificate generation time')
#print(avg_cert_gen_time)
time.sleep(0.1)
t1 = time.time()
certVerify(algorithm, num_samples, bits)
t2 = time.time()
avg_cert_verify_time = (t2 - t1) / num_samples * 1000
#print('Certificate verifying time')
#print(avg_cert_verify_time)
time.sleep(0.1)
now = datetime.datetime.now()
time_end = (now - datetime.datetime(1970, 1, 1)).total_seconds()
line = f'{now},{time_end},{algorithm} {bits},{algorithm_in_english}{bits},{avg_key_gen_time},{avg_cert_signing_request_time},{avg_cert_gen_time},{avg_cert_verify_time},'
file.write(line + '\n')
else:
#print(f'Starting {algorithm}...')
initCert(algorithm)
t1 = time.time()
genKey(algorithm, num_samples)
t2 = time.time()
avg_key_gen_time = (t2 - t1) / num_samples * 1000
#print('Key generation time: ')
#print(avg_key_gen_time)
time.sleep(0.1)
t1 = time.time()
genCSR(algorithm, num_samples)
t2 = time.time()
avg_cert_signing_request_time = (t2 - t1) / num_samples * 1000
#print('CSR generation time: ')
#print(avg_cert_signing_request_time)
time.sleep(0.1)
t1 = time.time()
genCert(algorithm, num_samples)
t2 = time.time()
avg_cert_gen_time = (t2 - t1) / num_samples * 1000
#print('Certificate generation time')
#print(avg_cert_gen_time)
time.sleep(0.1)
t1 = time.time()
certVerify(algorithm, num_samples)
t2 = time.time()
avg_cert_verify_time = (t2 - t1) / num_samples * 1000
#print('Certificate verifying time')
#print(avg_cert_verify_time)
time.sleep(0.1)
now = datetime.datetime.now()
time_end = (now - datetime.datetime(1970, 1, 1)).total_seconds()
line = f'{now},{time_end},{algorithm},{algorithm_in_english},{avg_key_gen_time},{avg_cert_signing_request_time},{avg_cert_gen_time},{avg_cert_verify_time},'
file.write(line + '\n')
fileName = 'critical_rainbow_NEW_LOGGED_OPENSSL_FINAL1.csv'
file = open(fileName, 'w')
file.write(header() + '\n')
run(file)
| 38.805785
| 300
| 0.694175
| 1,409
| 9,391
| 4.418027
| 0.10291
| 0.062651
| 0.04498
| 0.06747
| 0.833092
| 0.797108
| 0.782972
| 0.773333
| 0.758233
| 0.749558
| 0
| 0.031344
| 0.150676
| 9,391
| 241
| 301
| 38.966805
| 0.749122
| 0.115856
| 0
| 0.64881
| 0
| 0.095238
| 0.44775
| 0.279995
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.017857
| 0
| 0.065476
| 0.005952
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed088d64233e5d09e774c763017730b0726c8575
| 36
|
py
|
Python
|
nogfog/snapshot/__init__.py
|
rgozi/nogfog
|
87b18322f95a6782ea077ba367531a90b8bcf35c
|
[
"MIT"
] | null | null | null |
nogfog/snapshot/__init__.py
|
rgozi/nogfog
|
87b18322f95a6782ea077ba367531a90b8bcf35c
|
[
"MIT"
] | null | null | null |
nogfog/snapshot/__init__.py
|
rgozi/nogfog
|
87b18322f95a6782ea077ba367531a90b8bcf35c
|
[
"MIT"
] | null | null | null |
# Created by wangmeng at 2020/12/23
| 18
| 35
| 0.75
| 7
| 36
| 3.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 0.166667
| 36
| 1
| 36
| 36
| 0.633333
| 0.916667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c3ee95711c52c0e2b98aeb4c70466340ee5a2a1
| 42
|
py
|
Python
|
src/my_module.py
|
igorp-lopes/Desafio2-Syngenta-Digital-2021-s1
|
58eeb24793e31555545bb55976f3fab87cd9035d
|
[
"Apache-2.0"
] | null | null | null |
src/my_module.py
|
igorp-lopes/Desafio2-Syngenta-Digital-2021-s1
|
58eeb24793e31555545bb55976f3fab87cd9035d
|
[
"Apache-2.0"
] | null | null | null |
src/my_module.py
|
igorp-lopes/Desafio2-Syngenta-Digital-2021-s1
|
58eeb24793e31555545bb55976f3fab87cd9035d
|
[
"Apache-2.0"
] | null | null | null |
def addOne(number):
return number + 1
| 14
| 21
| 0.666667
| 6
| 42
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.238095
| 42
| 2
| 22
| 21
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
9c4c5c5596e0eb7ba4f01f0762f43f918367e27e
| 239
|
py
|
Python
|
cloudmersive_virus_api_client/api/__init__.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Virus
|
959abe2e58e1b86b3191f1dc6aa6d7373c13a87f
|
[
"Apache-2.0"
] | 3
|
2019-05-04T21:08:14.000Z
|
2021-06-15T07:08:49.000Z
|
cloudmersive_virus_api_client/api/__init__.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Virus
|
959abe2e58e1b86b3191f1dc6aa6d7373c13a87f
|
[
"Apache-2.0"
] | null | null | null |
cloudmersive_virus_api_client/api/__init__.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Virus
|
959abe2e58e1b86b3191f1dc6aa6d7373c13a87f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from cloudmersive_virus_api_client.api.scan_api import ScanApi
from cloudmersive_virus_api_client.api.scan_cloud_storage_api import ScanCloudStorageApi
| 29.875
| 88
| 0.874477
| 34
| 239
| 5.705882
| 0.529412
| 0.164948
| 0.216495
| 0.247423
| 0.381443
| 0.381443
| 0.381443
| 0
| 0
| 0
| 0
| 0.00463
| 0.096234
| 239
| 7
| 89
| 34.142857
| 0.893519
| 0.171548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c59fe87ec906db25361bdbf225cca3197a78288
| 2,076
|
py
|
Python
|
harbor/models/__init__.py
|
angeiv/python-harbor
|
98e1c096f031da4974dc5a5717272f55813c391c
|
[
"MIT"
] | 1
|
2022-01-26T18:08:56.000Z
|
2022-01-26T18:08:56.000Z
|
harbor/models/__init__.py
|
angeiv/python-harbor
|
98e1c096f031da4974dc5a5717272f55813c391c
|
[
"MIT"
] | null | null | null |
harbor/models/__init__.py
|
angeiv/python-harbor
|
98e1c096f031da4974dc5a5717272f55813c391c
|
[
"MIT"
] | 1
|
2022-01-25T18:18:45.000Z
|
2022-01-25T18:18:45.000Z
|
# coding: utf-8
# flake8: noqa
"""
Harbor API
These APIs provide services for manipulating Harbor project. # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from harbor.models.addition_link import AdditionLink
from harbor.models.addition_links import AdditionLinks
from harbor.models.annotations import Annotations
from harbor.models.artifact import Artifact
from harbor.models.audit_log import AuditLog
from harbor.models.cve_allowlist import CVEAllowlist
from harbor.models.cve_allowlist_item import CVEAllowlistItem
from harbor.models.error import Error
from harbor.models.errors import Errors
from harbor.models.execution import Execution
from harbor.models.extra_attrs import ExtraAttrs
from harbor.models.icon import Icon
from harbor.models.instance import Instance
from harbor.models.label import Label
from harbor.models.metadata import Metadata
from harbor.models.metrics import Metrics
from harbor.models.native_report_summary import NativeReportSummary
from harbor.models.platform import Platform
from harbor.models.preheat_policy import PreheatPolicy
from harbor.models.project import Project
from harbor.models.project_deletable import ProjectDeletable
from harbor.models.project_metadata import ProjectMetadata
from harbor.models.project_req import ProjectReq
from harbor.models.project_summary import ProjectSummary
from harbor.models.project_summary_quota import ProjectSummaryQuota
from harbor.models.provider_under_project import ProviderUnderProject
from harbor.models.reference import Reference
from harbor.models.registry import Registry
from harbor.models.registry_credential import RegistryCredential
from harbor.models.repository import Repository
from harbor.models.resource_list import ResourceList
from harbor.models.scan_overview import ScanOverview
from harbor.models.tag import Tag
from harbor.models.task import Task
from harbor.models.vulnerability_summary import VulnerabilitySummary
| 39.923077
| 78
| 0.854528
| 274
| 2,076
| 6.375912
| 0.350365
| 0.200343
| 0.32055
| 0.078993
| 0.0664
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003741
| 0.098748
| 2,076
| 51
| 79
| 40.705882
| 0.929984
| 0.115607
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c7d987d3369df6ac9b19d9232de13cbd1726719
| 370
|
py
|
Python
|
terrascript/resource/tls.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/resource/tls.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/resource/tls.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/resource/tls.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:28:45 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.resource.tls
#
# instead of
#
# >>> import terrascript.resource.hashicorp.tls
#
# This is only available for 'official' and 'partner' providers.
from terrascript.resource.hashicorp.tls import *
| 24.666667
| 73
| 0.737838
| 49
| 370
| 5.571429
| 0.714286
| 0.278388
| 0.161172
| 0.227106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037618
| 0.137838
| 370
| 14
| 74
| 26.428571
| 0.818182
| 0.797297
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92c8107afd3d1f31f233edf0e9313de433483ece
| 12,351
|
py
|
Python
|
mtl/embedders/pretrained.py
|
vandurme/TFMTL
|
5958187900bdf67089a237c523b6caa899f63ac1
|
[
"Apache-2.0"
] | 10
|
2019-05-18T22:23:44.000Z
|
2022-01-25T15:24:45.000Z
|
mtl/embedders/pretrained.py
|
vandurme/TFMTL
|
5958187900bdf67089a237c523b6caa899f63ac1
|
[
"Apache-2.0"
] | 1
|
2020-01-07T15:24:16.000Z
|
2020-01-15T00:39:01.000Z
|
mtl/embedders/pretrained.py
|
vandurme/TFMTL
|
5958187900bdf67089a237c523b6caa899f63ac1
|
[
"Apache-2.0"
] | 1
|
2021-12-02T02:24:06.000Z
|
2021-12-02T02:24:06.000Z
|
# Copyright 2018 Johns Hopkins University. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import json
import numpy as np
import tensorflow as tf
# TODO other word embeddings
from tqdm import tqdm
from mtl.embedders.embed_sequence import get_weighted_embeddings
from mtl.util.load_embeds import (load_pretrained_matrix,
load_pretrianed_vocab_dict)
# TODO refactor
def only_pretrained(word_ids,
vocab_size,
embed_dim,
pretrained_path,
trainable,
**kwargs):
"""Use the pre-trained word embeddings only
:param word_ids: list of word ids
:param vocab_size: size of the vocabulary given in the config file
:param embed_dim: dimension of the embeddings given in the config file
:param pretrained_path: path to the pre-trained word embedding file
:param trainable: whether to train the pred-trained word embeddings
:return: embed lookup layer
"""
if kwargs['is_training']:
tf.logging.info('Loading pretrained embeddings from %s' %
pretrained_path)
pretrained_matrix = load_pretrained_matrix(pretrained_path)
assert pretrained_matrix.shape[
0] == vocab_size, "Given vocab size (%d) not equal to than that " \
"of the pre-trained embedding (%d)!" % (
vocab_size,
pretrained_matrix.shape[0])
assert pretrained_matrix.shape[
1] == embed_dim, "Given embed dim (%d) and that of the " \
"pre-trained embedding (%d) don't match!" % (
embed_dim, pretrained_matrix.shape[1])
# pretrained file name - .txt
# word_embedding_name = os.path.basename(pretrained_path)[:-4]
tf.logging.info(
'Generating embedding lookup layer from %s and the words '
'from the training set' %
pretrained_path)
# initialize word_embedding layer from pre-trained matrix
word_embedding = tf.get_variable(
name='embedding_pretrained',
initializer=tf.constant_initializer(np.float32(pretrained_matrix)),
dtype=tf.float32,
shape=[pretrained_matrix.shape[0], embed_dim],
trainable=trainable)
else:
# not initializing again but only define placeholders with same names
word_embedding = tf.get_variable(
name='embedding_pretrained',
initializer=tf.zeros(shape=[vocab_size, embed_dim],
dtype=tf.float32),
dtype=tf.float32,
trainable=trainable
)
assert word_embedding.shape.as_list() == [vocab_size, embed_dim]
embeddings = tf.contrib.layers.embedding_lookup_unique(word_embedding,
word_ids)
# add a projection layer
if 'proj_dim' in kwargs:
embeddings = tf.layers.dense(embeddings,
kwargs['proj_dim'],
name='projected_embeddings',
activation=None)
if 'weights' in kwargs:
embeddings = get_weighted_embeddings(embeddings,
weights=kwargs['weights'])
return embeddings
def expand_pretrained(word_ids,
vocab_size,
embed_dim,
pretrained_path,
trainable,
**kwargs):
"""Expand training vocab with pretrained
:param word_ids: list of word ids
:param vocab_size: size of the vocabulary given in the config file
:param embed_dim: dimension of the embeddings given in the config file
:param pretrained_path: path to the pre-trained word embedding file
:param trainable: whether to train the pred-trained word embeddings
:return: embed lookup layer
"""
# pretrained file name - .txt
# word_embedding_name = os.path.basename(pretrained_path)[:-4]
tf.logging.info('Loading pretrained embeddings from %s' %
pretrained_path)
pretrained_matrix = load_pretrained_matrix(pretrained_path)
assert pretrained_matrix.shape[
0] <= vocab_size, "Given vocab size (%d) is less than that of " \
"the " \
"pre-trained embedding (%d)!" % (
vocab_size, pretrained_matrix.shape[0])
assert pretrained_matrix.shape[
1] == embed_dim, "Given embed dim (%d) and that of the " \
"pre-trained embedding (%d) don't match!" % (
embed_dim, pretrained_matrix.shape[1])
if kwargs['is_training']:
tf.logging.info(
'Generating embedding lookup layer from %s and the words '
'from the training set' %
pretrained_path)
loaded_embedding = tf.get_variable(
name='embedding_pretrained',
initializer=tf.constant_initializer(np.float32(pretrained_matrix)),
dtype=tf.float32,
shape=[pretrained_matrix.shape[0], embed_dim],
trainable=trainable)
# randomly initialize word embeddings for words that appear in the
# training set but not in pre-trained word embeddings
extra_vocab_num = vocab_size - pretrained_matrix.shape[0]
print(
'There are %d words in the training set(s) that are not found in the '
'pre-trained word embedding dictionary. '
'Randomly initializing word embeddings for them...' % extra_vocab_num)
random_embedding = tf.get_variable(
name='embedding_training',
initializer=tf.random_uniform(shape=[extra_vocab_num, embed_dim],
dtype=tf.float32),
dtype=tf.float32,
trainable=True
)
else:
# not initializing again but only define placeholders with same names
loaded_embedding = tf.get_variable(
name='embedding_pretrained',
initializer=tf.zeros(shape=[pretrained_matrix.shape[0], embed_dim],
dtype=tf.float32),
dtype=tf.float32,
trainable=trainable
)
extra_vocab_num = vocab_size - pretrained_matrix.shape[0]
random_embedding = tf.get_variable(
name='embedding_training',
initializer=tf.zeros(shape=[extra_vocab_num, embed_dim],
dtype=tf.float32),
dtype=tf.float32,
trainable=True
)
word_embedding = tf.concat([random_embedding, loaded_embedding],
axis=0,
name='embedding_combined')
assert word_embedding.shape.as_list() == [vocab_size, embed_dim]
embeddings = tf.contrib.layers.embedding_lookup_unique(word_embedding,
word_ids)
# add a projection layer
if 'proj_dim' in kwargs:
embeddings = tf.layers.dense(embeddings,
kwargs['proj_dim'],
name='projected_embeddings',
activation=None)
if 'weights' in kwargs:
embeddings = get_weighted_embeddings(embeddings,
weights=kwargs['weights'])
return embeddings
def init_pretrained(word_ids,
vocab_size,
embed_dim,
pretrained_path,
reverse_vocab_path,
random_size,
trainable,
**kwargs):
"""Initialize training vocab with pretrained's pre-trained word embeddings,
always trainable
:param word_ids: list of word ids
:param vocab_size: size of the vocabulary given in the config file
:param embed_dim: dimension of the embeddings given in the config file
:param pretrained_path: path to the pre-trained word embedding file
:param reverse_vocab_path: path to the vocab file(id to word mapping of
all the dictionary(extra train + pretrained))
:param trainable: whether to fine-tune the part of word embeddings
initialized from pretrained
:param random_size: size of word embeddings to be
randomly initialized(those not in pretrained)
:return: embed lookup layer
"""
tf.logging.info('Randomly initializing word embeddings for %s words not '
'in pretrained...' % random_size)
if kwargs['is_training']:
random_embedding = tf.get_variable(
name='embedding_training',
initializer=tf.random_uniform(shape=[random_size, embed_dim],
dtype=tf.float32),
dtype=tf.float32,
trainable=True
)
else:
random_embedding = tf.get_variable(
name='embedding_training',
initializer=tf.zeros(shape=[random_size, embed_dim],
dtype=tf.float32),
dtype=tf.float32,
trainable=True
)
# load training vocab
with codecs.open(reverse_vocab_path) as file:
reverse_vocab = json.load(file)
tf.logging.info('Loading pretrained embeddings from %s' % pretrained_path)
pretrained_vocab = load_pretrianed_vocab_dict(pretrained_path)
pretrained_matrix = load_pretrained_matrix(pretrained_path)
assert pretrained_matrix.shape[
1] == embed_dim, "Given embed dim (%d) and that of the " \
"pre-trained embedding (%d) don't match!" % (
embed_dim, pretrained_matrix.shape[1])
# pretrained file name - .txt
# word_embedding_name = os.path.basename(pretrained_path)[:-4]
loaded_matrix = np.zeros([vocab_size - random_size, embed_dim])
for i in tqdm(range(random_size, len(reverse_vocab))):
v = reverse_vocab[str(i)]
loaded_matrix[i - random_size] = pretrained_matrix[
pretrained_vocab.get(v)]
loaded_embedding = tf.get_variable(
name='embedding_pretrained',
initializer=tf.constant_initializer(np.float32(loaded_matrix)),
dtype=tf.float32,
shape=[vocab_size - random_size, embed_dim],
trainable=trainable)
tf.logging.info('Generating embedding lookup layer from %s and the words '
'from the training set' %
pretrained_path)
word_embedding = tf.concat([random_embedding, loaded_embedding],
axis=0,
name='embedding_combined')
assert word_embedding.shape.as_list() == [vocab_size, embed_dim]
embeddings = tf.contrib.layers.embedding_lookup_unique(word_embedding,
word_ids)
# add a projection layer
if 'proj_dim' in kwargs:
embeddings = tf.layers.dense(embeddings,
kwargs['proj_dim'],
name='projected_embeddings',
activation=None)
if 'weights' in kwargs:
embeddings = get_weighted_embeddings(
embeddings, weights=kwargs['weights'])
return embeddings
| 41.033223
| 86
| 0.585135
| 1,326
| 12,351
| 5.269231
| 0.156863
| 0.03206
| 0.045084
| 0.028338
| 0.738514
| 0.720624
| 0.712895
| 0.704594
| 0.704594
| 0.68513
| 0
| 0.007802
| 0.335843
| 12,351
| 300
| 87
| 41.17
| 0.84396
| 0.225731
| 0
| 0.719388
| 0
| 0
| 0.14266
| 0
| 0
| 0
| 0
| 0.003333
| 0.040816
| 1
| 0.015306
| false
| 0
| 0.05102
| 0
| 0.081633
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1315e4accc3506256b2d97595c125ea0cf029945
| 1,878
|
py
|
Python
|
datasets.py
|
leekunpeng/MAGIC
|
f0b3d0838de15d83b97b58e3e54ef6075c1358b2
|
[
"MIT"
] | 11
|
2020-08-20T13:06:27.000Z
|
2022-02-16T05:39:53.000Z
|
datasets.py
|
leekunpeng/MAGIC
|
f0b3d0838de15d83b97b58e3e54ef6075c1358b2
|
[
"MIT"
] | null | null | null |
datasets.py
|
leekunpeng/MAGIC
|
f0b3d0838de15d83b97b58e3e54ef6075c1358b2
|
[
"MIT"
] | 6
|
2021-03-02T20:47:24.000Z
|
2021-12-17T08:49:00.000Z
|
import glob
import numpy as np
import os
import scipy.io as scio
import torch
from torch.utils.data import Dataset
class trainset_loader(Dataset):
def __init__(self, root, dose):
self.file_path = 'input_' + dose
self.files_A = sorted(glob.glob(os.path.join(root, 'train', self.file_path, 'data') + '*.mat'))
def __getitem__(self, index):
file_A = self.files_A[index]
file_B = file_A.replace(self.file_path,'label_single')
file_C = file_A.replace('input','projection')
input_data = scio.loadmat(file_A)['data']
label_data = scio.loadmat(file_B)['data']
prj_data = scio.loadmat(file_C)['data']
input_data = torch.FloatTensor(input_data).unsqueeze_(0)
label_data = torch.FloatTensor(label_data).unsqueeze_(0)
prj_data = torch.FloatTensor(prj_data).unsqueeze_(0)
return input_data, label_data, prj_data
def __len__(self):
return len(self.files_A)
class testset_loader(Dataset):
def __init__(self, root, dose):
self.file_path = 'input_' + dose
self.files_A = sorted(glob.glob(os.path.join(root, 'test', self.file_path, 'data') + '*.mat'))
def __getitem__(self, index):
file_A = self.files_A[index]
file_B = file_A.replace(self.file_path,'label_single')
file_C = file_A.replace('input','projection')
res_name = 'result\\' + file_A[-13:]
input_data = scio.loadmat(file_A)['data']
label_data = scio.loadmat(file_B)['data']
prj_data = scio.loadmat(file_C)['data']
input_data = torch.FloatTensor(input_data).unsqueeze_(0)
label_data = torch.FloatTensor(label_data).unsqueeze_(0)
prj_data = torch.FloatTensor(prj_data).unsqueeze_(0)
return input_data, label_data, prj_data, res_name
def __len__(self):
return len(self.files_A)
| 39.125
| 103
| 0.657082
| 263
| 1,878
| 4.346008
| 0.197719
| 0.03937
| 0.062992
| 0.099738
| 0.864392
| 0.864392
| 0.864392
| 0.864392
| 0.813648
| 0.813648
| 0
| 0.005413
| 0.212993
| 1,878
| 47
| 104
| 39.957447
| 0.76793
| 0
| 0
| 0.682927
| 0
| 0
| 0.06656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.146341
| 0.04878
| 0.439024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
132ad4298368a948d0317c0e6f5bb1fed9388019
| 21,038
|
py
|
Python
|
coronet/bingrad_common_updated.py
|
sabuj7177/CovidProject
|
b4b7bcfa5ace165520507f489dc74da7b695e2f0
|
[
"Apache-2.0"
] | null | null | null |
coronet/bingrad_common_updated.py
|
sabuj7177/CovidProject
|
b4b7bcfa5ace165520507f489dc74da7b695e2f0
|
[
"Apache-2.0"
] | null | null | null |
coronet/bingrad_common_updated.py
|
sabuj7177/CovidProject
|
b4b7bcfa5ace165520507f489dc74da7b695e2f0
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# FLAGS = tf.app.flags.FLAGS
# tf.app.flags.DEFINE_integer('max_steps',370000,
# """Number of batches to run.""")
# tf.app.flags.DEFINE_string('net', 'alexnet',
# """The net to train (inception_v3, alexnet, vgg_16, vgg_a).""")
# tf.app.flags.DEFINE_integer('size_to_binarize', 1,
# """The min number of parameters to enable binarizing.""")
def ternary_encoder(input_data):
"""Encoding and compressing the signs """
a = tf.sign(input_data) # -1, 0, 1
a = tf.add(a, 1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
a = tf.reshape(a, [-1])
pad_size = 4 - tf.compat.v1.mod(tf.size(a), 4)
pad = tf.range(0.0, pad_size)
a = tf.concat([a, pad], 0)
a_split1, a_split2, a_split3, a_split4 = tf.split(a, 4) # assume the size is dividable by 4
# encode 4 grads into 1 Byte
sum_1 = tf.add(a_split1, a_split2 * 4)
sum_2 = tf.add(a_split3 * 16, a_split4 * 64)
sum_all = tf.add(sum_1, sum_2)
encoded = tf.cast(sum_all, tf.uint8)
# print("encoded: ")
# print(encoded)
return encoded
def ternary_decoder(encoded_data, scaler, shape):
"""Decoding the signs to float format """
a = tf.cast(encoded_data, tf.int32)
a_split1 = tf.compat.v1.mod(a, 4)
a_split2 = tf.compat.v1.to_int32(tf.compat.v1.mod(a / 4, 4))
a_split3 = tf.compat.v1.to_int32(tf.compat.v1.mod(a / 16, 4))
a_split4 = tf.compat.v1.to_int32(tf.compat.v1.mod(a / 64, 4))
a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
real_size = tf.reduce_prod(shape)
a = tf.compat.v1.to_float(a)
a = tf.gather(a, tf.range(0, real_size))
a = tf.reshape(a, shape)
a = tf.subtract(a, 1)
decoded = a * scaler
# print("decoded: ")
# print(decoded)
return decoded
def encode_to_ternary_gradients(grads_and_vars, get_shape=False):
"""Encode each gradient tensor."""
with tf.name_scope('ternary_encoder'):
gradients, variables = zip(*grads_and_vars)
ternary_gradients = []
gradient_shapes = []
for gradient in gradients:
if gradient is None:
ternary_gradients.append(None)
if get_shape:
gradient_shapes.append(None)
continue
if get_shape:
if isinstance(gradient, tf.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
gradient_shapes.append(gradient_shape)
ternary_gradient = tf.cond(tf.size(gradient) < 1,
lambda: tf.bitcast(gradient, type=tf.uint8),
lambda: ternary_encoder(gradient))
ternary_gradients.append(ternary_gradient)
if get_shape:
return list(zip(ternary_gradients, variables)), gradient_shapes
else:
return list(zip(ternary_gradients, variables))
def encode_to_ternary_gradients_2(gradients, get_shape=False):
"""Encode each gradient tensor."""
with tf.name_scope('ternary_encoder'):
# gradients, variables = zip(*grads_and_vars)
ternary_gradients = []
gradient_shapes = []
for gradient in gradients:
if gradient is None:
ternary_gradients.append(None)
if get_shape:
gradient_shapes.append(None)
continue
if get_shape:
if isinstance(gradient, tf.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape().as_list()
gradient_shapes.append(gradient_shape)
ternary_gradient = tf.cond(tf.size(gradient) < 1,
lambda: tf.bitcast(gradient, type=tf.uint8),
lambda: ternary_encoder(gradient))
ternary_gradients.append(ternary_gradient)
# if get_shape:
# return list(zip(ternary_gradients, variables)), gradient_shapes
# else:
# return list(zip(ternary_gradients, variables))
# print("Yooo Gradient shape")
# print(gradient_shapes)
if get_shape:
return list(ternary_gradients), gradient_shapes
else:
return list(ternary_gradients)
def decode_from_ternary_gradients(grads_and_vars, scalers, shapes):
"""Decode each gradient tensor."""
with tf.name_scope('ternary_decoder'):
gradients, variables = zip(*grads_and_vars)
floating_gradients = []
for gradient, variable, scaler, shape in zip(gradients, variables, scalers, shapes):
if gradient is None:
floating_gradients.append(None)
# gradient is encoded, so we use variable to check its size
# We also assume dtype of variable and gradient is the same
floating_gradient = tf.cond(tf.size(variable) < 1,
lambda: tf.bitcast(gradient, variable.dtype),
lambda: ternary_decoder(gradient, scaler, shape))
floating_gradients.append(floating_gradient)
return list(zip(floating_gradients, variables))
def decode_from_ternary_gradients_2(gradients, scalers, shapes):
# variable = tf.Variable(tf.constant([1.5, 2.5]), name='adfsf')
"""Decode each gradient tensor."""
with tf.name_scope('ternary_decoder'):
# gradients, variables = zip(*grads_and_vars)
floating_gradients = []
i = 0
# for gradient, scaler, shape in zip(gradients, scalers, shapes):
for gradient in gradients:
if gradient is None:
floating_gradients.append(None)
# gradient is encoded, so we use variable to check its size
# We also assume dtype of variable and gradient is the same
# floating_gradient = tf.cond(lambda: tf.bitcast(gradient, variable.dtype),
# lambda: ternary_decoder(gradient, scaler, shape))
floating_gradient = ternary_decoder(gradient, scalers[i], shapes[i])
floating_gradients.append(floating_gradient)
i += 1
return list(floating_gradients)
def clip_gradients_by_stddev(grads_and_vars, clip_factor=2.5):
""" Clip gradients to [-clip_factor*stddev, clip_factor*stddev]."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients = []
for gradient in gradients:
if gradient is None:
clipped_gradients.append(None)
continue
mean_gradient = tf.reduce_mean(gradient)
stddev_gradient = tf.sqrt(tf.reduce_mean(tf.square(gradient - mean_gradient)))
# clipped_gradient = tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient)
clipped_gradient = tf.cond(tf.size(gradient) < 1,
lambda: gradient,
lambda: tf.clip_by_value(gradient, -clip_factor * stddev_gradient,
clip_factor * stddev_gradient))
clipped_gradients.append(clipped_gradient)
return list(zip(clipped_gradients, variables))
def clip_gradients_by_stddev_2(gradients, clip_factor=2.5):
""" Clip gradients to [-clip_factor*stddev, clip_factor*stddev]."""
# gradients, variables = zip(*grads_and_vars)
clipped_gradients = []
for gradient in gradients:
if gradient is None:
clipped_gradients.append(None)
continue
mean_gradient = tf.reduce_mean(gradient)
stddev_gradient = tf.sqrt(tf.reduce_mean(tf.square(gradient - mean_gradient)))
# clipped_gradient = tf.clip_by_value(gradient, -clip_factor * stddev_gradient, clip_factor * stddev_gradient)
clipped_gradient = tf.cond(tf.size(gradient) < 1,
lambda: gradient,
lambda: tf.clip_by_value(gradient, -clip_factor * stddev_gradient,
clip_factor * stddev_gradient))
clipped_gradients.append(clipped_gradient)
return list(clipped_gradients)
def clip_gradients_by_thresholds(grads_and_vars, thresholds):
""" Clip gradients to [-threshold, threshold]."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients = []
for gradient, threshold in zip(gradients, thresholds):
if gradient is None:
clipped_gradients.append(None)
continue
# clipped_gradient = tf.clip_by_value(gradient, -threshold, threshold)
clipped_gradient = tf.cond(tf.size(gradient) < 1,
lambda: gradient,
lambda: tf.clip_by_value(gradient, -threshold, threshold))
clipped_gradients.append(clipped_gradient)
return list(zip(clipped_gradients, variables))
def stochastical_binarize_gradients(grads_and_vars, scalers):
"""Stochastically binarize gradients."""
gradients, variables = zip(*grads_and_vars)
binarized_gradients = []
for gradient, scaler in zip(gradients, scalers):
if gradient is None:
binarized_gradients.append(None)
continue
if isinstance(gradient, tf.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
zeros = tf.zeros(gradient_shape)
abs_gradient = tf.abs(gradient)
sign_gradient = tf.sign(gradient)
rnd_sample = tf.random.uniform(gradient_shape, 0, scaler)
where_cond = tf.less(rnd_sample, abs_gradient)
binarized_gradient = tf.cond(tf.size(gradient) < 1,
lambda: gradient,
lambda: tf.where(where_cond, sign_gradient * scaler, zeros))
binarized_gradients.append(binarized_gradient)
return list(zip(binarized_gradients, variables))
def stochastical_binarize_gradients_2(gradients, scalers):
"""Stochastically binarize gradients."""
# gradients, variables = zip(*grads_and_vars)
binarized_gradients = []
# for gradient, scaler in zip(gradients, scalers):
i = 0
for i in range(len(gradients)):
gradient = gradients[i]
if gradient is None:
binarized_gradients.append(None)
continue
if isinstance(gradient, tf.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
zeros = tf.zeros(gradient_shape)
abs_gradient = tf.abs(gradient)
sign_gradient = tf.sign(gradient)
rnd_sample = tf.random.uniform(gradient_shape, 0, scalers[i])
where_cond = tf.less(rnd_sample, abs_gradient)
binarized_gradient = tf.cond(tf.size(gradient) < 1,
lambda: gradient,
lambda: tf.where(where_cond, sign_gradient * scalers[i], zeros))
binarized_gradients.append(binarized_gradient)
i += 1
# return list(zip(binarized_gradients, variables))
return list(binarized_gradients)
def gradient_binarizing_scalers(grads_and_vars, clip_factor):
""" Get the scalers."""
gradients, variables = zip(*grads_and_vars)
scalers = []
for gradient in gradients:
if gradient is None:
scalers.append(None)
continue
if (clip_factor > 1.0e-5):
mean_gradient = tf.reduce_mean(gradient)
stddev_gradient = tf.sqrt(tf.reduce_mean(tf.square(gradient - mean_gradient)))
scalers.append(clip_factor * stddev_gradient)
else:
scalers.append(tf.reduce_max(tf.abs(gradient)))
return list(zip(scalers, variables))
def gradient_binarizing_scalers_2(gradients, clip_factor):
""" Get the scalers."""
# gradients, variables = zip(*grads_and_vars)
scalers = []
for gradient in gradients:
if gradient is None:
scalers.append(None)
continue
if (clip_factor > 1.0e-5):
mean_gradient = tf.reduce_mean(gradient)
stddev_gradient = tf.sqrt(tf.reduce_mean(tf.square(gradient - mean_gradient)))
scalers.append(clip_factor * stddev_gradient)
else:
scalers.append(tf.reduce_max(tf.abs(gradient)))
return list(scalers)
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def average_gradients_v2(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
# for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g in tower_grads:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
# v = grad_and_vars[0][1]
# grad_and_var = (grad, v)
# average_grads.append(grad_and_var)
average_grads.append(grad)
return average_grads
def average_gradients2(tower_grads):
"""This is identical to average_gradients() but returns pairs of (shared gradient, unshared variable) across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of Lists of pairs of (gradient, variable) where the gradient has been averaged
across all towers and variable is the one in each tower.
"""
res = []
mean_grads = average_gradients(tower_grads)
for grad_and_vars in tower_grads:
_grads = []
for _grad1, _grad2 in zip(mean_grads, grad_and_vars):
_grads.append((_grad1[0], _grad2[1]))
res.append(_grads)
return res
def average_scalers(tower_scalers):
"""Calculate the average scalers for gradients across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_scalers: List of lists of (scaler, variable) tuples. The outer list
is over individual scaler. The inner list is over the scaler
calculation for each tower.
Returns:
List of pairs of scaler where the scaler has been averaged
across all towers.
"""
average_scalers = []
for scale_and_vars in zip(*tower_scalers):
# Note that each scale_and_vars looks like the following:
# ((scale0_gpu0, var0_gpu0), ... , (scale0_gpuN, var0_gpuN))
scalers = []
for s, _ in scale_and_vars:
# Add 0 dimension to the scalers to represent the tower.
expanded_s = tf.expand_dims(s, 0)
# Append on a 'tower' dimension which we will average over below.
scalers.append(expanded_s)
# Average over the 'tower' dimension.
scaler = tf.concat(scalers, 0)
scaler = tf.reduce_mean(scaler, 0)
average_scalers.append(scaler)
return average_scalers
def average_scalers_2(tower_scalers):
"""Calculate the average scalers for gradients across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_scalers: List of lists of (scaler, variable) tuples. The outer list
is over individual scaler. The inner list is over the scaler
calculation for each tower.
Returns:
List of pairs of scaler where the scaler has been averaged
across all towers.
"""
average_scalers = []
# for scale_and_vars in zip(*tower_scalers):
# Note that each scale_and_vars looks like the following:
# ((scale0_gpu0, var0_gpu0), ... , (scale0_gpuN, var0_gpuN))
scalers = []
for s in tower_scalers:
# Add 0 dimension to the scalers to represent the tower.
expanded_s = tf.expand_dims(s, 0)
# Append on a 'tower' dimension which we will average over below.
scalers.append(expanded_s)
# Average over the 'tower' dimension.
scaler = tf.concat(scalers, 0)
scaler = tf.reduce_mean(scaler, 0)
average_scalers.append(scaler)
return average_scalers
def max_scalers(tower_scalers):
"""Calculate the max scalers for gradients across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_scalers: List of lists of (scaler, variable) tuples. The outer list
is over individual scaler. The inner list is over the scaler
calculation for each tower.
Returns:
List of pairs of scaler where the scaler is the max one
across all towers.
"""
max_scalers = []
for scale_and_vars in zip(*tower_scalers):
# Note that each scale_and_vars looks like the following:
# ((scale0_gpu0, var0_gpu0), ... , (scale0_gpuN, var0_gpuN))
scalers = []
for s, _ in scale_and_vars:
# Add 0 dimension to the scalers to represent the tower.
expanded_s = tf.expand_dims(s, 0)
# Append on a 'tower' dimension which we get the max over below.
scalers.append(expanded_s)
# Get the max over the 'tower' dimension.
scaler = tf.concat(scalers, 0)
scaler = tf.reduce_max(scaler, 0)
max_scalers.append(scaler)
return max_scalers
def max_scalers_2(tower_scalers):
"""Calculate the max scalers for gradients across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_scalers: List of lists of (scaler, variable) tuples. The outer list
is over individual scaler. The inner list is over the scaler
calculation for each tower.
Returns:
List of pairs of scaler where the scaler is the max one
across all towers.
"""
max_scalers = []
# for scale_and_vars in zip(*tower_scalers):
# Note that each scale_and_vars looks like the following:
# ((scale0_gpu0, var0_gpu0), ... , (scale0_gpuN, var0_gpuN))
scalers = []
for s in tower_scalers:
# Add 0 dimension to the scalers to represent the tower.
expanded_s = tf.expand_dims(s, 0)
# Append on a 'tower' dimension which we get the max over below.
scalers.append(expanded_s)
# Get the max over the 'tower' dimension.
scaler = tf.concat(scalers, 0)
scaler = tf.reduce_max(scaler, 0)
max_scalers.append(scaler)
return max_scalers
| 38.320583
| 124
| 0.639842
| 2,653
| 21,038
| 4.889936
| 0.087071
| 0.019425
| 0.024281
| 0.022046
| 0.850998
| 0.807446
| 0.7854
| 0.783705
| 0.774994
| 0.768982
| 0
| 0.0133
| 0.274503
| 21,038
| 548
| 125
| 38.390511
| 0.836664
| 0.359778
| 0
| 0.688581
| 0
| 0
| 0.004563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069204
| false
| 0
| 0.013841
| 0
| 0.15917
| 0.00346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13495de762d74dd836659966c1b770a9f723c748
| 27
|
py
|
Python
|
recipes/core/util/recipe/__init__.py
|
bschnitz/recipes
|
8af348774a1edc11ccab3da9753bc456c19f2000
|
[
"MIT"
] | null | null | null |
recipes/core/util/recipe/__init__.py
|
bschnitz/recipes
|
8af348774a1edc11ccab3da9753bc456c19f2000
|
[
"MIT"
] | null | null | null |
recipes/core/util/recipe/__init__.py
|
bschnitz/recipes
|
8af348774a1edc11ccab3da9753bc456c19f2000
|
[
"MIT"
] | null | null | null |
from .recipe import Recipe
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
135ed97212594fcf515ec0e81f9d0e7827762bd2
| 64
|
py
|
Python
|
python/gateway_api/__init__.py
|
Koisell/SmartCoffeeMachine
|
40844039970d177b20b9d3c6d3e7eedf7352885e
|
[
"MIT"
] | null | null | null |
python/gateway_api/__init__.py
|
Koisell/SmartCoffeeMachine
|
40844039970d177b20b9d3c6d3e7eedf7352885e
|
[
"MIT"
] | null | null | null |
python/gateway_api/__init__.py
|
Koisell/SmartCoffeeMachine
|
40844039970d177b20b9d3c6d3e7eedf7352885e
|
[
"MIT"
] | null | null | null |
from .flask_app import add_route
from .doc_sender import add_doc
| 32
| 32
| 0.859375
| 12
| 64
| 4.25
| 0.666667
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 2
| 33
| 32
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1360852c9802efa3694fa221e49bbb3a11243789
| 3,986
|
py
|
Python
|
scripts/post_checks.py
|
flufpuf/ieee_fp_comp_pkg
|
a0c36d0648bf11e56c9f5f2989e462f5ea81ba52
|
[
"MIT"
] | null | null | null |
scripts/post_checks.py
|
flufpuf/ieee_fp_comp_pkg
|
a0c36d0648bf11e56c9f5f2989e462f5ea81ba52
|
[
"MIT"
] | null | null | null |
scripts/post_checks.py
|
flufpuf/ieee_fp_comp_pkg
|
a0c36d0648bf11e56c9f5f2989e462f5ea81ba52
|
[
"MIT"
] | null | null | null |
import numpy as np
import struct
import os
import csv
def hex_to_ieee_fp(hex_str):
hex_str = hex_str.strip()
if hex_str[0:2] in ["0x", "0X"]:
hex_str = hex_str[2:]
return struct.unpack(">f", bytes.fromhex(hex_str))[0]
def str_to_bool(bool_str):
if bool_str.strip() in ["true", "True", "1", "0x1"]:
return True
elif bool_str.strip() in ["false", "False", "0", "0x0"]:
return False
def check_ieee_fp_a_gt_b(output_path):
test_successful = True
with open(os.path.join(output_path, "results.dat"), 'r') as res_file:
results_data = csv.reader(res_file)
next(results_data, None) # skip header
for row in results_data:
idx = int(row[0].strip())
fp_a = hex_to_ieee_fp(row[1])
fp_b = hex_to_ieee_fp(row[2])
result = str_to_bool(row[3])
if (fp_a > fp_b) != result:
test_successful = False
print("index={}: {} > {} = {} not correct!".format(idx, fp_a, fp_b, result))
result = str_to_bool(row[4])
if (fp_a > fp_a) != result:
test_successful = False
print("index={}: {} > {} = {} not correct!".format(idx, fp_a, fp_a, result))
return test_successful
def check_ieee_fp_a_lt_b(output_path):
test_successful = True
with open(os.path.join(output_path, "results.dat"), 'r') as res_file:
results_data = csv.reader(res_file)
next(results_data, None) # skip header
for row in results_data:
idx = int(row[0].strip())
fp_a = hex_to_ieee_fp(row[1])
fp_b = hex_to_ieee_fp(row[2])
result = str_to_bool(row[3])
if (fp_a < fp_b) != result:
test_successful = False
print("index={}: {} < {} = {} not correct!".format(idx, fp_a, fp_b, result))
result = str_to_bool(row[4])
if (fp_a < fp_a) != result:
test_successful = False
print("index={}: {} < {} = {} not correct!".format(idx, fp_a, fp_a, result))
return test_successful
def check_ieee_fp_a_ge_b(output_path):
test_successful = True
with open(os.path.join(output_path, "results.dat"), 'r') as res_file:
results_data = csv.reader(res_file)
next(results_data, None) # skip header
for row in results_data:
idx = int(row[0].strip())
fp_a = hex_to_ieee_fp(row[1])
fp_b = hex_to_ieee_fp(row[2])
result = str_to_bool(row[3])
if (fp_a >= fp_b) != result:
test_successful = False
print("index={}: {} >= {} = {} not correct!".format(idx, fp_a, fp_b, result))
result = str_to_bool(row[4])
if (fp_a >= fp_a) != result:
test_successful = False
print("index={}: {} >= {} = {} not correct!".format(idx, fp_a, fp_a, result))
return test_successful
def check_ieee_fp_a_le_b(output_path):
test_successful = True
with open(os.path.join(output_path, "results.dat"), 'r') as res_file:
results_data = csv.reader(res_file)
next(results_data, None) # skip header
for row in results_data:
idx = int(row[0].strip())
fp_a = hex_to_ieee_fp(row[1])
fp_b = hex_to_ieee_fp(row[2])
result = str_to_bool(row[3])
if (fp_a <= fp_b) != result:
test_successful = False
print("index={}: {} <= {} = {} not correct!".format(idx, fp_a, fp_b, result))
result = str_to_bool(row[4])
if (fp_a <= fp_a) != result:
test_successful = False
print("index={}: {} <= {} = {} not correct!".format(idx, fp_a, fp_a, result))
return test_successful
| 29.094891
| 93
| 0.529604
| 544
| 3,986
| 3.588235
| 0.125
| 0.04918
| 0.040984
| 0.050717
| 0.868852
| 0.861168
| 0.861168
| 0.861168
| 0.861168
| 0.861168
| 0
| 0.012075
| 0.335173
| 3,986
| 137
| 94
| 29.094891
| 0.724528
| 0.011791
| 0
| 0.604651
| 0
| 0
| 0.092503
| 0
| 0
| 0
| 0.001525
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.046512
| 0
| 0.197674
| 0.093023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1393ad31e8974b2358a3a84e4354d882fa3e9117
| 27,885
|
py
|
Python
|
tests/agent/test_docker_agent.py
|
mattalhonte/prefect
|
aa4f0a42d721c9d926265f761a47ccfd8e4e7393
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/agent/test_docker_agent.py
|
mattalhonte/prefect
|
aa4f0a42d721c9d926265f761a47ccfd8e4e7393
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2022-02-14T11:25:57.000Z
|
2022-02-27T16:25:14.000Z
|
tests/agent/test_docker_agent.py
|
mattalhonte/prefect
|
aa4f0a42d721c9d926265f761a47ccfd8e4e7393
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-31T04:42:56.000Z
|
2020-05-31T04:42:56.000Z
|
from unittest.mock import MagicMock
import pytest
from prefect import context
from prefect.agent.docker import DockerAgent
from prefect.environments.storage import Docker, Local
from prefect.utilities.configuration import set_temporary_config
from prefect.utilities.graphql import GraphQLResult
def test_docker_agent_init(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
assert agent
assert agent.labels == []
assert agent.name == "agent"
def test_docker_agent_config_options(monkeypatch, runner_token):
import docker # DockerAgent imports docker within the constructor
api = MagicMock()
monkeypatch.setattr(
"docker.APIClient", api,
)
monkeypatch.setattr("prefect.agent.docker.agent.platform", "osx")
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = DockerAgent(name="test")
assert agent.name == "test"
assert agent.client.get_auth_token() == "TEST_TOKEN"
assert agent.logger
assert not agent.no_pull
assert api.call_args[1]["base_url"] == "unix://var/run/docker.sock"
def test_docker_agent_daemon_url_responds_to_system(monkeypatch, runner_token):
import docker # DockerAgent imports docker within the constructor
api = MagicMock()
monkeypatch.setattr(
"docker.APIClient", api,
)
monkeypatch.setattr("prefect.agent.docker.agent.platform", "win32")
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = DockerAgent()
assert agent.client.get_auth_token() == "TEST_TOKEN"
assert agent.logger
assert not agent.no_pull
assert api.call_args[1]["base_url"] == "npipe:////./pipe/docker_engine"
def test_docker_agent_config_options_populated(monkeypatch, runner_token):
import docker # DockerAgent imports docker within the constructor
api = MagicMock()
monkeypatch.setattr(
"docker.APIClient", api,
)
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = DockerAgent(base_url="url", no_pull=True)
assert agent.client.get_auth_token() == "TEST_TOKEN"
assert agent.logger
assert agent.no_pull
assert api.call_args[1]["base_url"] == "url"
def test_docker_agent_no_pull(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
assert not agent.no_pull
agent = DockerAgent(no_pull=True)
assert agent.no_pull
with context(no_pull=True):
agent = DockerAgent()
assert agent.no_pull
with context(no_pull=False):
agent = DockerAgent(no_pull=True)
assert agent.no_pull
with context(no_pull=False):
agent = DockerAgent(no_pull=False)
assert not agent.no_pull
def test_docker_agent_ping(monkeypatch, runner_token):
api = MagicMock()
api.ping.return_value = True
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
assert api.ping.called
def test_docker_agent_ping_exception(monkeypatch, runner_token):
api = MagicMock()
api.ping.return_value = True
api.ping.side_effect = Exception()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
with pytest.raises(Exception):
agent = DockerAgent()
def test_populate_env_vars_uses_user_provided_env_vars(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
with set_temporary_config(
{
"cloud.agent.auth_token": "token",
"cloud.api": "api",
"logging.log_to_cloud": True,
}
):
agent = DockerAgent(env_vars=dict(AUTH_THING="foo"))
env_vars = agent.populate_env_vars(
GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}})
)
assert env_vars["AUTH_THING"] == "foo"
def test_populate_env_vars(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
with set_temporary_config(
{
"cloud.agent.auth_token": "token",
"cloud.api": "api",
"logging.log_to_cloud": True,
}
):
agent = DockerAgent()
env_vars = agent.populate_env_vars(
GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}})
)
expected_vars = {
"PREFECT__CLOUD__API": "api",
"PREFECT__CLOUD__AUTH_TOKEN": "token",
"PREFECT__CLOUD__AGENT__LABELS": "[]",
"PREFECT__CONTEXT__FLOW_RUN_ID": "id",
"PREFECT__CONTEXT__FLOW_ID": "foo",
"PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
"PREFECT__LOGGING__LOG_TO_CLOUD": "true",
"PREFECT__LOGGING__LEVEL": "DEBUG",
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
}
assert env_vars == expected_vars
def test_populate_env_vars_includes_agent_labels(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
with set_temporary_config(
{
"cloud.agent.auth_token": "token",
"cloud.api": "api",
"logging.log_to_cloud": True,
}
):
agent = DockerAgent(labels=["42", "marvin"])
env_vars = agent.populate_env_vars(
GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}})
)
expected_vars = {
"PREFECT__CLOUD__API": "api",
"PREFECT__CLOUD__AGENT__LABELS": "['42', 'marvin']",
"PREFECT__CLOUD__AUTH_TOKEN": "token",
"PREFECT__CONTEXT__FLOW_RUN_ID": "id",
"PREFECT__CONTEXT__FLOW_ID": "foo",
"PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
"PREFECT__LOGGING__LOG_TO_CLOUD": "true",
"PREFECT__LOGGING__LEVEL": "DEBUG",
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
}
assert env_vars == expected_vars
@pytest.mark.parametrize("flag", [True, False])
def test_populate_env_vars_is_responsive_to_logging_config(
monkeypatch, runner_token, flag
):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
with set_temporary_config(
{"cloud.agent.auth_token": "token", "cloud.api": "api",}
):
agent = DockerAgent(labels=["42", "marvin"], no_cloud_logs=flag)
env_vars = agent.populate_env_vars(
GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}})
)
assert env_vars["PREFECT__LOGGING__LOG_TO_CLOUD"] == str(not flag).lower()
def test_docker_agent_deploy_flow(monkeypatch, runner_token):
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
api.create_host_config.return_value = {"AutoRemove": True}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"id": "foo",
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
}
),
"id": "id",
"name": "name",
}
)
)
assert api.pull.called
assert api.create_container.called
assert api.start.called
assert api.create_host_config.call_args[1]["auto_remove"] is True
assert api.create_container.call_args[1]["command"] == "prefect execute cloud-flow"
assert api.create_container.call_args[1]["host_config"]["AutoRemove"] is True
assert api.start.call_args[1]["container"] == "container_id"
def test_docker_agent_deploy_flow_storage_raises(monkeypatch, runner_token):
monkeypatch.setattr("prefect.agent.agent.Client", MagicMock())
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
with pytest.raises(ValueError):
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{"storage": Local().serialize(), "id": "foo"}
),
"id": "id",
"name": "name",
"version": "version",
}
)
)
assert not api.pull.called
def test_docker_agent_deploy_flow_no_pull(monkeypatch, runner_token):
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent(no_pull=True)
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"id": "foo",
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
}
),
"id": "id",
"name": "name",
}
)
)
assert not api.pull.called
assert api.create_container.called
assert api.start.called
def test_docker_agent_deploy_flow_show_flow_logs(monkeypatch, runner_token):
process = MagicMock()
monkeypatch.setattr("multiprocessing.Process", process)
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent(show_flow_logs=True)
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"id": "foo",
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
}
),
"id": "id",
"name": "name",
}
)
)
process.assert_called_with(
target=agent.stream_container_logs, kwargs={"container_id": "container_id"}
)
assert len(agent.processes) == 1
assert api.create_container.called
assert api.start.called
def test_docker_agent_shutdown_terminates_child_processes(monkeypatch, runner_token):
monkeypatch.setattr("prefect.agent.agent.Client", MagicMock())
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
proc = MagicMock(is_alive=MagicMock(return_value=True))
agent = DockerAgent(show_flow_logs=True)
agent.processes = [proc]
agent.on_shutdown()
assert proc.is_alive.called
assert proc.terminate.called
def test_docker_agent_deploy_flow_no_registry_does_not_pull(monkeypatch, runner_token):
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"id": "foo",
"storage": Docker(
registry_url="", image_name="name", image_tag="tag"
).serialize(),
}
),
"id": "id",
"name": "name",
}
)
)
assert not api.pull.called
assert api.create_container.called
assert api.start.called
def test_docker_agent_heartbeat_gocase(monkeypatch, runner_token):
api = MagicMock()
api.ping.return_value = True
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
agent.heartbeat()
assert api.ping.call_count == 2
def test_docker_agent_heartbeat_exits_on_failure(monkeypatch, runner_token, caplog):
api = MagicMock()
api.ping.return_value = True
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
api.ping.return_value = False
agent.heartbeat()
agent.heartbeat()
agent.heartbeat()
agent.heartbeat()
agent.heartbeat()
with pytest.raises(SystemExit):
agent.heartbeat()
assert "Cannot reconnect to Docker daemon. Agent is shutting down." in caplog.text
assert api.ping.call_count == 7
def test_docker_agent_heartbeat_logs_reconnect(monkeypatch, runner_token, caplog):
api = MagicMock()
api.ping.return_value = True
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
api.ping.return_value = False
agent.heartbeat()
agent.heartbeat()
api.ping.return_value = True
agent.heartbeat()
assert api.ping.call_count == 4
assert "Reconnected to Docker daemon" in caplog.text
def test_docker_agent_heartbeat_resets_fail_count(monkeypatch, runner_token, caplog):
api = MagicMock()
api.ping.return_value = True
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
api.ping.return_value = False
agent.heartbeat()
agent.heartbeat()
assert agent.failed_connections == 2
api.ping.return_value = True
agent.heartbeat()
assert agent.failed_connections == 0
assert api.ping.call_count == 4
def test_docker_agent_init_volume_empty_options(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
assert agent
assert agent.named_volumes == []
assert agent.container_mount_paths == []
assert agent.host_spec == {}
@pytest.mark.parametrize(
"path,result",
[
("name", True),
("/some/path", False),
("./some/path", False),
("~/some/path", False),
("../some/path", False),
(" ../some/path", True), # it is up to the caller to strip the string
("\n../some/path", True), # it is up to the caller to strip the string
],
)
def test_docker_agent_is_named_volume_unix(monkeypatch, runner_token, path, result):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
monkeypatch.setattr("prefect.agent.docker.agent.platform", "osx")
agent = DockerAgent()
assert agent._is_named_volume_unix(path) == result
@pytest.mark.parametrize(
"path,result",
[
("name", True),
("C:\\\\some\\path", False),
("c:\\\\some\\path", False),
("\\\\some\\path", False),
("\\\\\\some\\path", False),
],
)
def test_docker_agent_is_named_volume_win32(monkeypatch, runner_token, path, result):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
monkeypatch.setattr("prefect.agent.docker.agent.platform", "win32")
agent = DockerAgent()
assert agent._is_named_volume_win32(path) == result
@pytest.mark.parametrize(
"candidate,named_volumes,container_mount_paths,host_spec",
[
(
# handle no volume spec
[],
[],
[],
{},
),
(
# no external path given (assume same as host path)
["/some/path"],
[],
["/some/path"],
{"/some/path": {"bind": "/some/path", "mode": "rw",}},
),
(
# internal & external paths
["/some/path:/ctr/path"],
[],
["/ctr/path"],
{"/some/path": {"bind": "/ctr/path", "mode": "rw",}},
),
(
# internal & external paths with mode
["/some/path:/ctr/path:ro"],
[],
["/ctr/path"],
{"/some/path": {"bind": "/ctr/path", "mode": "ro",}},
),
(
# named volume
["some-name:/ctr/path"],
["some-name"],
["/ctr/path"],
{},
),
(
# multiple volumes
[
"some-name:/ctr/path3",
"/some/path:/ctr/path1",
"/another/path:/ctr/path2:ro",
],
["some-name"],
["/ctr/path3", "/ctr/path1", "/ctr/path2"],
{
"/another/path": {"bind": "/ctr/path2", "mode": "ro"},
"/some/path": {"bind": "/ctr/path1", "mode": "rw",},
},
),
],
)
def test_docker_agent_parse_volume_spec_unix(
monkeypatch,
runner_token,
candidate,
named_volumes,
container_mount_paths,
host_spec,
):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
(
actual_named_volumes,
actual_container_mount_paths,
actual_host_spec,
) = agent._parse_volume_spec_unix(candidate)
assert actual_named_volumes == named_volumes
assert actual_container_mount_paths == container_mount_paths
assert actual_host_spec == host_spec
@pytest.mark.parametrize(
"candidate,named_volumes,container_mount_paths,host_spec",
[
(
# windows host --> linux container
["C:\\some\\path"],
[],
["/c/some/path"],
{"C:\\some\\path": {"bind": "/c/some/path", "mode": "rw",}},
),
(
# internal & external paths
["C:\\some\\path:/ctr/path"],
[],
["/ctr/path"],
{"C:\\some\\path": {"bind": "/ctr/path", "mode": "rw",}},
),
(
# internal & external paths with mode
["C:\\some\\path:/ctr/path:ro"],
[],
["/ctr/path"],
{"C:\\some\\path": {"bind": "/ctr/path", "mode": "ro",}},
),
(
# named volume
["some-name:/ctr/path"],
["some-name"],
["/ctr/path"],
{},
),
(
# multiple volumes
[
"some-name:/ctr/path3",
"C:\\some\\path:/ctr/path1",
"D:\\another\\path:/ctr/path2:ro",
],
["some-name"],
["/ctr/path3", "/ctr/path1", "/ctr/path2"],
{
"D:\\another\\path": {"bind": "/ctr/path2", "mode": "ro"},
"C:\\some\\path": {"bind": "/ctr/path1", "mode": "rw",},
},
),
],
)
def test_docker_agent_parse_volume_spec_win(
monkeypatch,
runner_token,
candidate,
named_volumes,
container_mount_paths,
host_spec,
):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
(
actual_named_volumes,
actual_container_mount_paths,
actual_host_spec,
) = agent._parse_volume_spec_win32(candidate)
assert actual_named_volumes == named_volumes
assert actual_container_mount_paths == container_mount_paths
assert actual_host_spec == host_spec
@pytest.mark.parametrize(
"candidate,exception_type",
[
# named volumes cannot be read only
("some-name:/ctr/path:ro", ValueError),
# dont attempt to parse too many fields
("/some/path:/ctr/path:rw:something-else", ValueError),
],
)
def test_docker_agent_parse_volume_spec_raises_on_invalid_spec(
monkeypatch, runner_token, candidate, exception_type,
):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent()
with pytest.raises(exception_type):
agent._parse_volume_spec([candidate])
def test_docker_agent_start_max_polls(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
on_shutdown = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent.on_shutdown", on_shutdown
)
agent_process = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.agent_process", agent_process)
agent_connect = MagicMock(return_value="id")
monkeypatch.setattr("prefect.agent.agent.Agent.agent_connect", agent_connect)
heartbeat = MagicMock()
monkeypatch.setattr("prefect.agent.docker.agent.DockerAgent.heartbeat", heartbeat)
agent = DockerAgent(max_polls=1)
agent.start()
assert agent_process.called
assert heartbeat.called
def test_docker_agent_start_max_polls_count(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
on_shutdown = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent.on_shutdown", on_shutdown
)
agent_process = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.agent_process", agent_process)
agent_connect = MagicMock(return_value="id")
monkeypatch.setattr("prefect.agent.agent.Agent.agent_connect", agent_connect)
heartbeat = MagicMock()
monkeypatch.setattr("prefect.agent.docker.agent.DockerAgent.heartbeat", heartbeat)
agent = DockerAgent(max_polls=2)
agent.start()
assert on_shutdown.call_count == 1
assert agent_process.call_count == 2
assert heartbeat.call_count == 2
def test_docker_agent_start_max_polls_zero(monkeypatch, runner_token):
api = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
on_shutdown = MagicMock()
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent.on_shutdown", on_shutdown
)
agent_process = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.agent_process", agent_process)
agent_connect = MagicMock(return_value="id")
monkeypatch.setattr("prefect.agent.agent.Agent.agent_connect", agent_connect)
heartbeat = MagicMock()
monkeypatch.setattr("prefect.agent.docker.agent.DockerAgent.heartbeat", heartbeat)
agent = DockerAgent(max_polls=0)
agent.start()
assert on_shutdown.call_count == 1
assert agent_process.call_count == 0
assert heartbeat.call_count == 0
def test_docker_agent_network(monkeypatch, runner_token):
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
api.create_networking_config.return_value = {"test-network": "config"}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
agent = DockerAgent(network="test-network")
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"id": "foo",
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
}
),
"id": "id",
"name": "name",
}
)
)
assert agent.network == "test-network"
args, kwargs = api.create_container.call_args
assert kwargs["networking_config"] == {"test-network": "config"}
def test_docker_agent_deploy_with_interface_check_linux(
monkeypatch, runner_token, linux_platform
):
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
get_ip = MagicMock()
monkeypatch.setattr("prefect.agent.docker.agent.get_docker_ip", get_ip)
agent = DockerAgent()
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"id": "foo",
"storage": Docker(
registry_url="", image_name="name", image_tag="tag"
).serialize(),
}
),
"id": "id",
"name": "name",
}
)
)
assert get_ip.called
def test_docker_agent_deploy_with_no_interface_check_linux(
monkeypatch, runner_token, linux_platform
):
api = MagicMock()
api.ping.return_value = True
api.create_container.return_value = {"Id": "container_id"}
monkeypatch.setattr(
"prefect.agent.docker.agent.DockerAgent._get_docker_client",
MagicMock(return_value=api),
)
get_ip = MagicMock()
monkeypatch.setattr("prefect.agent.docker.agent.get_docker_ip", get_ip)
agent = DockerAgent(docker_interface=False)
agent.deploy_flow(
flow_run=GraphQLResult(
{
"flow": GraphQLResult(
{
"id": "foo",
"storage": Docker(
registry_url="", image_name="name", image_tag="tag"
).serialize(),
}
),
"id": "id",
"name": "name",
}
)
)
assert not get_ip.called
| 29.759872
| 98
| 0.602008
| 2,916
| 27,885
| 5.47668
| 0.077846
| 0.073137
| 0.078272
| 0.093926
| 0.854101
| 0.82129
| 0.798873
| 0.754101
| 0.73757
| 0.733125
| 0
| 0.00272
| 0.274736
| 27,885
| 936
| 99
| 29.791667
| 0.786936
| 0.021374
| 0
| 0.642202
| 0
| 0
| 0.215438
| 0.141217
| 0
| 0
| 0
| 0
| 0.102228
| 1
| 0.04325
| false
| 0
| 0.013106
| 0
| 0.056356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13cd7c4da098facf1a6158a62737e976e427b9d8
| 256,588
|
py
|
Python
|
test/test_autopep8.py
|
bryanwills/autopep8
|
577774fa57619ea87682c0999d9e91cd1cdb7425
|
[
"MIT"
] | null | null | null |
test/test_autopep8.py
|
bryanwills/autopep8
|
577774fa57619ea87682c0999d9e91cd1cdb7425
|
[
"MIT"
] | null | null | null |
test/test_autopep8.py
|
bryanwills/autopep8
|
577774fa57619ea87682c0999d9e91cd1cdb7425
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
"""Test suite for autopep8.
Unit tests go in "UnitTests". System tests go in "SystemTests".
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import sys
import time
import contextlib
import io
import shutil
import stat
from subprocess import Popen, PIPE
from tempfile import mkstemp, mkdtemp
import tokenize
import unittest
import warnings
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
ROOT_DIR = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
sys.path.insert(0, ROOT_DIR)
import autopep8 # NOQA: E402
from autopep8 import get_module_imports_on_top_of_file # NOQA: E402
FAKE_CONFIGURATION = os.path.join(ROOT_DIR, 'test', 'fake_configuration')
FAKE_PYCODESTYLE_CONFIGURATION = os.path.join(
ROOT_DIR, 'test', 'fake_pycodestyle_configuration'
)
if 'AUTOPEP8_COVERAGE' in os.environ and int(os.environ['AUTOPEP8_COVERAGE']):
AUTOPEP8_CMD_TUPLE = ('coverage', 'run', '--branch', '--parallel',
'--omit=*/site-packages/*',
os.path.join(ROOT_DIR, 'autopep8.py'),)
else:
# We need to specify the executable to make sure the correct Python
# interpreter gets used.
AUTOPEP8_CMD_TUPLE = (sys.executable,
os.path.join(ROOT_DIR,
'autopep8.py'),) # pragma: no cover
class UnitTests(unittest.TestCase):
maxDiff = None
def test_compile_value_error(self):
source = '"\\xhh" \\'
self.assertFalse(autopep8.check_syntax(source))
def test_find_newline_only_cr(self):
source = ['print 1\r', 'print 2\r', 'print3\r']
self.assertEqual(autopep8.CR, autopep8.find_newline(source))
def test_find_newline_only_lf(self):
source = ['print 1\n', 'print 2\n', 'print3\n']
self.assertEqual(autopep8.LF, autopep8.find_newline(source))
def test_find_newline_only_crlf(self):
source = ['print 1\r\n', 'print 2\r\n', 'print3\r\n']
self.assertEqual(autopep8.CRLF, autopep8.find_newline(source))
def test_find_newline_cr1_and_lf2(self):
source = ['print 1\n', 'print 2\r', 'print3\n']
self.assertEqual(autopep8.LF, autopep8.find_newline(source))
def test_find_newline_cr1_and_crlf2(self):
source = ['print 1\r\n', 'print 2\r', 'print3\r\n']
self.assertEqual(autopep8.CRLF, autopep8.find_newline(source))
def test_find_newline_should_default_to_lf(self):
self.assertEqual(autopep8.LF, autopep8.find_newline([]))
self.assertEqual(autopep8.LF, autopep8.find_newline(['', '']))
def test_detect_encoding(self):
self.assertEqual(
'utf-8',
autopep8.detect_encoding(
os.path.join(ROOT_DIR, 'setup.py')))
def test_detect_encoding_with_cookie(self):
self.assertEqual(
'iso-8859-1',
autopep8.detect_encoding(
os.path.join(ROOT_DIR, 'test', 'iso_8859_1.py')))
def test_readlines_from_file_with_bad_encoding(self):
"""Bad encoding should not cause an exception."""
self.assertEqual(
['# -*- coding: zlatin-1 -*-\n'],
autopep8.readlines_from_file(
os.path.join(ROOT_DIR, 'test', 'bad_encoding.py')))
def test_readlines_from_file_with_bad_encoding2(self):
"""Bad encoding should not cause an exception."""
# This causes a warning on Python 3.
with warnings.catch_warnings(record=True):
self.assertTrue(autopep8.readlines_from_file(
os.path.join(ROOT_DIR, 'test', 'bad_encoding2.py')))
def test_fix_whitespace(self):
self.assertEqual(
'a b',
autopep8.fix_whitespace('a b', offset=1, replacement=' '))
def test_fix_whitespace_with_tabs(self):
self.assertEqual(
'a b',
autopep8.fix_whitespace('a\t \t b', offset=1, replacement=' '))
def test_multiline_string_lines(self):
self.assertEqual(
{2},
autopep8.multiline_string_lines(
"""\
'''
'''
"""))
def test_multiline_string_lines_with_many(self):
self.assertEqual(
{2, 7, 10, 11, 12},
autopep8.multiline_string_lines(
"""\
'''
'''
''''''
''''''
''''''
'''
'''
'''
'''
"""))
def test_multiline_string_should_not_report_single_line(self):
self.assertEqual(
set(),
autopep8.multiline_string_lines(
"""\
'''abc'''
"""))
def test_multiline_string_should_not_report_docstrings(self):
self.assertEqual(
{5},
autopep8.multiline_string_lines(
"""\
def foo():
'''Foo.
Bar.'''
hello = '''
'''
"""))
def test_supported_fixes(self):
self.assertIn('E121', [f[0] for f in autopep8.supported_fixes()])
def test_shorten_comment(self):
self.assertEqual('# ' + '=' * 72 + '\n',
autopep8.shorten_comment('# ' + '=' * 100 + '\n',
max_line_length=79))
def test_shorten_comment_should_not_split_numbers(self):
line = '# ' + '0' * 100 + '\n'
self.assertEqual(line,
autopep8.shorten_comment(line,
max_line_length=79))
def test_shorten_comment_should_not_split_words(self):
line = '# ' + 'a' * 100 + '\n'
self.assertEqual(line,
autopep8.shorten_comment(line,
max_line_length=79))
def test_shorten_comment_should_not_split_urls(self):
line = '# http://foo.bar/' + 'abc-' * 100 + '\n'
self.assertEqual(line,
autopep8.shorten_comment(line,
max_line_length=79))
def test_shorten_comment_should_not_modify_special_comments(self):
line = '#!/bin/blah ' + ' x' * 90 + '\n'
self.assertEqual(line,
autopep8.shorten_comment(line,
max_line_length=79))
def test_format_block_comments(self):
self.assertEqual(
'# abc',
autopep8.fix_e265('#abc'))
self.assertEqual(
'# abc',
autopep8.fix_e265('####abc'))
self.assertEqual(
'# abc',
autopep8.fix_e265('## # ##abc'))
self.assertEqual(
'# abc "# noqa"',
autopep8.fix_e265('# abc "# noqa"'))
self.assertEqual(
'# *abc',
autopep8.fix_e265('#*abc'))
def test_format_block_comments_should_leave_outline_alone(self):
line = """\
###################################################################
## Some people like these crazy things. So leave them alone. ##
###################################################################
"""
self.assertEqual(line, autopep8.fix_e265(line))
line = """\
#################################################################
# Some people like these crazy things. So leave them alone. #
#################################################################
"""
self.assertEqual(line, autopep8.fix_e265(line))
def test_format_block_comments_with_multiple_lines(self):
self.assertEqual(
"""\
# abc
# blah blah
# four space indentation
''' #do not modify strings
#do not modify strings
#do not modify strings
#do not modify strings'''
#
""",
autopep8.fix_e265("""\
# abc
#blah blah
#four space indentation
''' #do not modify strings
#do not modify strings
#do not modify strings
#do not modify strings'''
#
"""))
def test_format_block_comments_should_not_corrupt_special_comments(self):
self.assertEqual(
'#: abc',
autopep8.fix_e265('#: abc'))
self.assertEqual(
'#!/bin/bash\n',
autopep8.fix_e265('#!/bin/bash\n'))
def test_format_block_comments_should_only_touch_real_comments(self):
commented_out_code = '#x = 1'
self.assertEqual(
commented_out_code,
autopep8.fix_e265(commented_out_code))
def test_fix_file(self):
self.assertIn(
'import ',
autopep8.fix_file(
filename=os.path.join(ROOT_DIR, 'test', 'example.py')))
def test_fix_file_with_diff(self):
filename = os.path.join(ROOT_DIR, 'test', 'example.py')
self.assertIn(
'@@',
autopep8.fix_file(
filename=filename,
options=autopep8.parse_args(['--diff', filename])))
def test_fix_lines(self):
self.assertEqual(
'print(123)\n',
autopep8.fix_lines(['print( 123 )\n'],
options=autopep8.parse_args([''])))
def test_fix_code(self):
self.assertEqual(
'print(123)\n',
autopep8.fix_code('print( 123 )\n'))
def test_fix_code_with_empty_string(self):
self.assertEqual(
'',
autopep8.fix_code(''))
def test_fix_code_with_multiple_lines(self):
self.assertEqual(
'print(123)\nx = 4\n',
autopep8.fix_code('print( 123 )\nx =4'))
def test_fix_code_byte_string(self):
"""This feature is here for friendliness to Python 2."""
self.assertEqual(
'print(123)\n',
autopep8.fix_code(b'print( 123 )\n'))
def test_fix_code_with_options(self):
self.assertEqual(
'print(123)\n',
autopep8.fix_code('print( 123 )\n', options={'ignore': ['W']}))
self.assertEqual(
'print( 123 )\n',
autopep8.fix_code('print( 123 )\n', options={'ignore': ['E']}))
self.assertEqual(
'y in x\n',
autopep8.fix_code('x.has_key(y)\n', options={'aggressive': True}))
def test_fix_code_with_bad_options(self):
with self.assertRaises(ValueError):
autopep8.fix_code('print( 123 )\n', options={'ignor': ['W']})
with self.assertRaises(ValueError):
autopep8.fix_code('print( 123 )\n', options={'ignore': 'W'})
def test_normalize_line_endings(self):
self.assertEqual(
['abc\n', 'def\n', '123\n', 'hello\n', 'world\n'],
autopep8.normalize_line_endings(
['abc\n', 'def\n', '123\n', 'hello\r\n', 'world\r'],
'\n'))
def test_normalize_line_endings_with_crlf(self):
self.assertEqual(
['abc\r\n', 'def\r\n', '123\r\n', 'hello\r\n', 'world\r\n'],
autopep8.normalize_line_endings(
['abc\n', 'def\r\n', '123\r\n', 'hello\r\n', 'world\r'],
'\r\n'))
def test_normalize_multiline(self):
self.assertEqual('def foo(): pass',
autopep8.normalize_multiline('def foo():'))
self.assertEqual('def _(): return 1',
autopep8.normalize_multiline('return 1'))
self.assertEqual('@decorator\ndef _(): pass',
autopep8.normalize_multiline('@decorator\n'))
self.assertEqual('class A: pass',
autopep8.normalize_multiline('class A:'))
def test_code_match(self):
self.assertTrue(autopep8.code_match('E2', select=['E2', 'E3'],
ignore=[]))
self.assertTrue(autopep8.code_match('E26', select=['E2', 'E3'],
ignore=[]))
self.assertFalse(autopep8.code_match('E26', select=[], ignore=['E']))
self.assertFalse(autopep8.code_match('E2', select=['E2', 'E3'],
ignore=['E2']))
self.assertFalse(autopep8.code_match('E26', select=['W'], ignore=['']))
self.assertFalse(autopep8.code_match('E26', select=['W'],
ignore=['E1']))
def test_split_at_offsets(self):
self.assertEqual([''], autopep8.split_at_offsets('', [0]))
self.assertEqual(['1234'], autopep8.split_at_offsets('1234', [0]))
self.assertEqual(['1', '234'], autopep8.split_at_offsets('1234', [1]))
self.assertEqual(['12', '34'], autopep8.split_at_offsets('1234', [2]))
self.assertEqual(['12', '3', '4'],
autopep8.split_at_offsets('1234', [2, 3]))
def test_split_at_offsets_with_out_of_order(self):
self.assertEqual(['12', '3', '4'],
autopep8.split_at_offsets('1234', [3, 2]))
def test_fix_2to3(self):
self.assertEqual(
'try: pass\nexcept ValueError as e: pass\n',
autopep8.fix_2to3('try: pass\nexcept ValueError, e: pass\n'))
self.assertEqual(
'while True: pass\n',
autopep8.fix_2to3('while 1: pass\n'))
self.assertEqual(
"""\
import sys
sys.maxsize
""",
autopep8.fix_2to3("""\
import sys
sys.maxint
"""))
def test_fix_2to3_subset(self):
line = 'type(res) == type(42)\n'
fixed = 'isinstance(res, type(42))\n'
self.assertEqual(fixed, autopep8.fix_2to3(line))
self.assertEqual(fixed, autopep8.fix_2to3(line, select=['E721']))
self.assertEqual(fixed, autopep8.fix_2to3(line, select=['E7']))
self.assertEqual(line, autopep8.fix_2to3(line, select=['W']))
self.assertEqual(line, autopep8.fix_2to3(line, select=['E999']))
self.assertEqual(line, autopep8.fix_2to3(line, ignore=['E721']))
def test_is_python_file(self):
self.assertTrue(autopep8.is_python_file(
os.path.join(ROOT_DIR, 'autopep8.py')))
with temporary_file_context('#!/usr/bin/env python') as filename:
self.assertTrue(autopep8.is_python_file(filename))
with temporary_file_context('#!/usr/bin/python') as filename:
self.assertTrue(autopep8.is_python_file(filename))
with temporary_file_context('#!/usr/bin/python3') as filename:
self.assertTrue(autopep8.is_python_file(filename))
with temporary_file_context('#!/usr/bin/pythonic') as filename:
self.assertFalse(autopep8.is_python_file(filename))
with temporary_file_context('###!/usr/bin/python') as filename:
self.assertFalse(autopep8.is_python_file(filename))
self.assertFalse(autopep8.is_python_file(os.devnull))
self.assertFalse(autopep8.is_python_file('/bin/bash'))
def test_match_file(self):
with temporary_file_context('', suffix='.py', prefix='.') as filename:
self.assertFalse(autopep8.match_file(filename, exclude=[]),
msg=filename)
self.assertFalse(autopep8.match_file(os.devnull, exclude=[]))
with temporary_file_context('', suffix='.py', prefix='') as filename:
self.assertTrue(autopep8.match_file(filename, exclude=[]),
msg=filename)
def test_find_files(self):
temp_directory = mkdtemp()
try:
target = os.path.join(temp_directory, 'dir')
os.mkdir(target)
with open(os.path.join(target, 'a.py'), 'w'):
pass
exclude = os.path.join(target, 'ex')
os.mkdir(exclude)
with open(os.path.join(exclude, 'b.py'), 'w'):
pass
sub = os.path.join(exclude, 'sub')
os.mkdir(sub)
with open(os.path.join(sub, 'c.py'), 'w'):
pass
# FIXME: Avoid changing directory. This may interfere with parallel
# test runs.
cwd = os.getcwd()
os.chdir(temp_directory)
try:
files = list(autopep8.find_files(
['dir'], True, [os.path.join('dir', 'ex')]))
finally:
os.chdir(cwd)
file_names = [os.path.basename(f) for f in files]
self.assertIn('a.py', file_names)
self.assertNotIn('b.py', file_names)
self.assertNotIn('c.py', file_names)
finally:
shutil.rmtree(temp_directory)
def test_line_shortening_rank(self):
self.assertGreater(
autopep8.line_shortening_rank('(1\n+1)\n',
indent_word=' ',
max_line_length=79),
autopep8.line_shortening_rank('(1+\n1)\n',
indent_word=' ',
max_line_length=79))
self.assertGreaterEqual(
autopep8.line_shortening_rank('(1+\n1)\n',
indent_word=' ',
max_line_length=79),
autopep8.line_shortening_rank('(1+1)\n',
indent_word=' ',
max_line_length=79))
# Do not crash.
autopep8.line_shortening_rank('\n',
indent_word=' ',
max_line_length=79)
self.assertGreater(
autopep8.line_shortening_rank('[foo(\nx) for x in y]\n',
indent_word=' ',
max_line_length=79),
autopep8.line_shortening_rank('[foo(x)\nfor x in y]\n',
indent_word=' ',
max_line_length=79))
def test_extract_code_from_function(self):
def fix_e123():
pass # pragma: no cover
self.assertEqual('e123', autopep8.extract_code_from_function(fix_e123))
def foo():
pass # pragma: no cover
self.assertEqual(None, autopep8.extract_code_from_function(foo))
def fix_foo():
pass # pragma: no cover
self.assertEqual(None, autopep8.extract_code_from_function(fix_foo))
def e123():
pass # pragma: no cover
self.assertEqual(None, autopep8.extract_code_from_function(e123))
def fix_():
pass # pragma: no cover
self.assertEqual(None, autopep8.extract_code_from_function(fix_))
def test_reindenter(self):
reindenter = autopep8.Reindenter('if True:\n pass\n')
self.assertEqual('if True:\n pass\n',
reindenter.run())
def test_reindenter_with_non_standard_indent_size(self):
reindenter = autopep8.Reindenter('if True:\n pass\n')
self.assertEqual('if True:\n pass\n',
reindenter.run(3))
def test_reindenter_with_good_input(self):
lines = 'if True:\n pass\n'
reindenter = autopep8.Reindenter(lines)
self.assertEqual(lines,
reindenter.run())
def test_reindenter_should_leave_stray_comment_alone(self):
lines = ' #\nif True:\n pass\n'
reindenter = autopep8.Reindenter(lines)
self.assertEqual(' #\nif True:\n pass\n',
reindenter.run())
@unittest.skipIf('AUTOPEP8_COVERAGE' in os.environ, 'exists form-feed')
def test_reindenter_not_affect_with_formfeed(self):
lines = """print('hello')
print('python')
"""
reindenter = autopep8.Reindenter(lines)
self.assertEqual(lines,
reindenter.run())
def test_fix_e225_avoid_failure(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents=' 1\n')
self.assertEqual(
[],
fix_pep8.fix_e225({'line': 1,
'column': 5}))
def test_fix_e271_ignore_redundant(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='x = 1\n')
self.assertEqual(
[],
fix_pep8.fix_e271({'line': 1,
'column': 2}))
def test_fix_e401_avoid_non_import(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents=' 1\n')
self.assertEqual(
[],
fix_pep8.fix_e401({'line': 1,
'column': 5}))
def test_fix_e711_avoid_failure(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='None == x\n')
self.assertEqual(
None,
fix_pep8.fix_e711({'line': 1,
'column': 6}))
self.assertEqual(
[],
fix_pep8.fix_e711({'line': 1,
'column': 700}))
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='x <> None\n')
self.assertEqual(
[],
fix_pep8.fix_e711({'line': 1,
'column': 3}))
def test_fix_e712_avoid_failure(self):
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='True == x\n')
self.assertEqual(
[],
fix_pep8.fix_e712({'line': 1,
'column': 5}))
self.assertEqual(
[],
fix_pep8.fix_e712({'line': 1,
'column': 700}))
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='x != True\n')
self.assertEqual(
[],
fix_pep8.fix_e712({'line': 1,
'column': 3}))
fix_pep8 = autopep8.FixPEP8(filename='',
options=autopep8.parse_args(['']),
contents='x == False\n')
self.assertEqual(
[],
fix_pep8.fix_e712({'line': 1,
'column': 3}))
def test_get_diff_text(self):
# We ignore the first two lines since it differs on Python 2.6.
self.assertEqual(
"""\
-foo
+bar
""",
'\n'.join(autopep8.get_diff_text(['foo\n'],
['bar\n'],
'').split('\n')[3:]))
def test_get_diff_text_without_newline(self):
# We ignore the first two lines since it differs on Python 2.6.
self.assertEqual(
"""\
-foo
\\ No newline at end of file
+foo
""",
'\n'.join(autopep8.get_diff_text(['foo'],
['foo\n'],
'').split('\n')[3:]))
def test_count_unbalanced_brackets(self):
self.assertEqual(
0,
autopep8.count_unbalanced_brackets('()'))
self.assertEqual(
1,
autopep8.count_unbalanced_brackets('('))
self.assertEqual(
2,
autopep8.count_unbalanced_brackets('(['))
self.assertEqual(
1,
autopep8.count_unbalanced_brackets('[])'))
self.assertEqual(
1,
autopep8.count_unbalanced_brackets(
"'','.join(['%s=%s' % (col, col)')"))
def test_refactor_with_2to3(self):
self.assertEqual(
'1 in {}\n',
autopep8.refactor_with_2to3('{}.has_key(1)\n', ['has_key']))
def test_refactor_with_2to3_should_handle_syntax_error_gracefully(self):
self.assertEqual(
'{}.has_key(1\n',
autopep8.refactor_with_2to3('{}.has_key(1\n', ['has_key']))
def test_commented_out_code_lines(self):
self.assertEqual(
[1, 4],
autopep8.commented_out_code_lines("""\
#x = 1
#Hello
#Hello world.
#html_use_index = True
"""))
def test_standard_deviation(self):
self.assertAlmostEqual(
2, autopep8.standard_deviation([2, 4, 4, 4, 5, 5, 7, 9]))
self.assertAlmostEqual(0, autopep8.standard_deviation([]))
self.assertAlmostEqual(0, autopep8.standard_deviation([1]))
self.assertAlmostEqual(.5, autopep8.standard_deviation([1, 2]))
def test_priority_key_with_non_existent_key(self):
pep8_result = {'id': 'foobar'}
self.assertGreater(autopep8._priority_key(pep8_result), 1)
def test_decode_filename(self):
self.assertEqual('foo.py', autopep8.decode_filename(b'foo.py'))
def test_almost_equal(self):
self.assertTrue(autopep8.code_almost_equal(
"""\
[1, 2, 3
4, 5]
""",
"""\
[1, 2, 3
4, 5]
"""))
self.assertTrue(autopep8.code_almost_equal(
"""\
[1,2,3
4,5]
""",
"""\
[1, 2, 3
4,5]
"""))
self.assertFalse(autopep8.code_almost_equal(
"""\
[1, 2, 3
4, 5]
""",
"""\
[1, 2, 3, 4,
5]
"""))
def test_token_offsets(self):
text = """\
1
"""
string_io = io.StringIO(text)
self.assertEqual(
[(tokenize.NUMBER, '1', 0, 1),
(tokenize.NEWLINE, '\n', 1, 2),
(tokenize.ENDMARKER, '', 2, 2)],
list(autopep8.token_offsets(
tokenize.generate_tokens(string_io.readline))))
def test_token_offsets_with_multiline(self):
text = """\
x = '''
1
2
'''
"""
string_io = io.StringIO(text)
self.assertEqual(
[(tokenize.NAME, 'x', 0, 1),
(tokenize.OP, '=', 2, 3),
(tokenize.STRING, "'''\n1\n2\n'''", 4, 15),
(tokenize.NEWLINE, '\n', 15, 16),
(tokenize.ENDMARKER, '', 16, 16)],
list(autopep8.token_offsets(
tokenize.generate_tokens(string_io.readline))))
def test_token_offsets_with_escaped_newline(self):
text = """\
True or \\
False
"""
string_io = io.StringIO(text)
self.assertEqual(
[(tokenize.NAME, 'True', 0, 4),
(tokenize.NAME, 'or', 5, 7),
(tokenize.NAME, 'False', 11, 16),
(tokenize.NEWLINE, '\n', 16, 17),
(tokenize.ENDMARKER, '', 17, 17)],
list(autopep8.token_offsets(
tokenize.generate_tokens(string_io.readline))))
def test_shorten_line_candidates_are_valid(self):
for text in [
"""\
[xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, y] = [1, 2]
""",
"""\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, y = [1, 2]
""",
"""\
lambda xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: line_shortening_rank(x,
indent_word,
max_line_length)
""",
]:
indent = autopep8._get_indentation(text)
source = text[len(indent):]
assert source.lstrip() == source
tokens = list(autopep8.generate_tokens(source))
for candidate in autopep8.shorten_line(
tokens, source, indent,
indent_word=' ',
max_line_length=79,
aggressive=10,
experimental=True,
previous_line=''):
self.assertEqual(
re.sub(r'\s', '', text),
re.sub(r'\s', '', candidate))
def test_get_fixed_long_line_empty(self):
line = ''
self.assertEqual(line, autopep8.get_fixed_long_line(line, line, line))
class SystemTests(unittest.TestCase):
maxDiff = None
def test_e101(self):
line = """\
while True:
if True:
\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_with_indent_size_1(self):
line = """\
while True:
if True:
\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line, options=['--indent-size=1']) as result:
self.assertEqual(fixed, result)
def test_e101_with_indent_size_2(self):
line = """\
while True:
if True:
\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line, options=['--indent-size=2']) as result:
self.assertEqual(fixed, result)
def test_e101_with_indent_size_3(self):
line = """\
while True:
if True:
\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line, options=['--indent-size=3']) as result:
self.assertEqual(fixed, result)
def test_e101_should_not_expand_non_indentation_tabs(self):
line = """\
while True:
if True:
\t1 == '\t'
"""
fixed = """\
while True:
if True:
1 == '\t'
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_should_ignore_multiline_strings(self):
line = """\
x = '''
while True:
if True:
\t1
'''
"""
fixed = """\
x = '''
while True:
if True:
\t1
'''
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_should_fix_docstrings(self):
line = """\
class Bar(object):
def foo():
'''
\tdocstring
'''
"""
fixed = """\
class Bar(object):
def foo():
'''
docstring
'''
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_when_pep8_mistakes_first_tab_in_string(self):
# pep8 will complain about this even if the tab indentation found
# elsewhere is in a multiline string.
line = """\
x = '''
\tHello.
'''
if True:
123
"""
fixed = """\
x = '''
\tHello.
'''
if True:
123
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_should_ignore_multiline_strings_complex(self):
line = """\
print(3 <> 4, '''
while True:
if True:
\t1
\t''', 4 <> 5)
"""
fixed = """\
print(3 != 4, '''
while True:
if True:
\t1
\t''', 4 != 5)
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e101_with_comments(self):
line = """\
while True: # My inline comment
# with a hanging
# comment.
# Hello
if True:
\t# My comment
\t1
\t# My other comment
"""
fixed = """\
while True: # My inline comment
# with a hanging
# comment.
# Hello
if True:
# My comment
1
# My other comment
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e101_skip_if_bad_indentation(self):
line = """\
try:
\t pass
except:
pass
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e101_skip_innocuous(self):
# pep8 will complain about this even if the tab indentation found
# elsewhere is in a multiline string. If we don't filter the innocuous
# report properly, the below command will take a long time.
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
['-vvv', '--select=E101', '--diff',
'--global-config={}'.format(os.devnull),
os.path.join(ROOT_DIR, 'test', 'e101_example.py')],
stdout=PIPE, stderr=PIPE)
output = [x.decode('utf-8') for x in p.communicate()][0]
self.assertEqual('', output)
def test_e111_short(self):
line = 'class Dummy:\n\n def __init__(self):\n pass\n'
fixed = 'class Dummy:\n\n def __init__(self):\n pass\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_long(self):
line = 'class Dummy:\n\n def __init__(self):\n pass\n'
fixed = 'class Dummy:\n\n def __init__(self):\n pass\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_longer(self):
line = """\
while True:
if True:
1
elif True:
2
"""
fixed = """\
while True:
if True:
1
elif True:
2
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_multiple_levels(self):
line = """\
while True:
if True:
1
# My comment
print('abc')
"""
fixed = """\
while True:
if True:
1
# My comment
print('abc')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_with_dedent(self):
line = """\
def foo():
if True:
2
1
"""
fixed = """\
def foo():
if True:
2
1
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_with_other_errors(self):
line = """\
def foo():
if True:
(2 , 1)
1
if True:
print('hello')\t
2
"""
fixed = """\
def foo():
if True:
(2, 1)
1
if True:
print('hello')
2
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e111_should_not_modify_string_contents(self):
line = """\
if True:
x = '''
1
'''
"""
fixed = """\
if True:
x = '''
1
'''
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e112_should_leave_bad_syntax_alone(self):
line = """\
if True:
pass
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e113(self):
line = """\
a = 1
b = 2
"""
fixed = """\
a = 1
b = 2
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e113_bad_syntax(self):
line = """\
pass
"""
fixed = """\
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e114(self):
line = """\
# a = 1
"""
fixed = """\
# a = 1
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e115(self):
line = """\
if True:
# A comment.
pass
"""
fixed = """\
if True:
# A comment.
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e116(self):
line = """\
a = 1
# b = 2
"""
fixed = """\
a = 1
# b = 2
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e117(self):
line = """\
for a in [1, 2, 3]:
print('hello world')
for b in [1, 2, 3]:
print(a, b)
"""
fixed = """\
for a in [1, 2, 3]:
print('hello world')
for b in [1, 2, 3]:
print(a, b)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_reindent(self):
line = """\
def foo_bar(baz, frop,
fizz, bang): # E128
pass
if True:
x = {
} # E123
#: E121
print "E121", (
"dent")
#: E122
print "E122", (
"dent")
#: E124
print "E124", ("visual",
"indent_two"
)
#: E125
if (row < 0 or self.moduleCount <= row or
col < 0 or self.moduleCount <= col):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
#: E126
print "E126", (
"dent")
#: E127
print "E127", ("over-",
"over-indent")
#: E128
print "E128", ("under-",
"under-indent")
"""
fixed = """\
def foo_bar(baz, frop,
fizz, bang): # E128
pass
if True:
x = {
} # E123
#: E121
print "E121", (
"dent")
#: E122
print "E122", (
"dent")
#: E124
print "E124", ("visual",
"indent_two"
)
#: E125
if (row < 0 or self.moduleCount <= row or
col < 0 or self.moduleCount <= col):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
#: E126
print "E126", (
"dent")
#: E127
print "E127", ("over-",
"over-indent")
#: E128
print "E128", ("under-",
"under-indent")
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_reindent_with_multiple_fixes(self):
line = """\
sql = 'update %s set %s %s' % (from_table,
','.join(['%s=%s' % (col, col) for col in cols]),
where_clause)
"""
fixed = """\
sql = 'update %s set %s %s' % (from_table,
','.join(['%s=%s' % (col, col)
for col in cols]),
where_clause)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_tricky(self):
line = """\
#: E126
if (
x == (
3
) or
x == (
3
) or
y == 4):
pass
"""
fixed = """\
#: E126
if (
x == (
3
) or
x == (
3
) or
y == 4):
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_large(self):
line = """\
class BogusController(controller.CementBaseController):
class Meta:
pass
class BogusController2(controller.CementBaseController):
class Meta:
pass
class BogusController3(controller.CementBaseController):
class Meta:
pass
class BogusController4(controller.CementBaseController):
class Meta:
pass
class TestBaseController(controller.CementBaseController):
class Meta:
pass
class TestBaseController2(controller.CementBaseController):
class Meta:
pass
class TestStackedController(controller.CementBaseController):
class Meta:
arguments = [
]
class TestDuplicateController(controller.CementBaseController):
class Meta:
config_defaults = dict(
foo='bar',
)
arguments = [
(['-f2', '--foo2'], dict(action='store'))
]
def my_command(self):
pass
"""
fixed = """\
class BogusController(controller.CementBaseController):
class Meta:
pass
class BogusController2(controller.CementBaseController):
class Meta:
pass
class BogusController3(controller.CementBaseController):
class Meta:
pass
class BogusController4(controller.CementBaseController):
class Meta:
pass
class TestBaseController(controller.CementBaseController):
class Meta:
pass
class TestBaseController2(controller.CementBaseController):
class Meta:
pass
class TestStackedController(controller.CementBaseController):
class Meta:
arguments = [
]
class TestDuplicateController(controller.CementBaseController):
class Meta:
config_defaults = dict(
foo='bar',
)
arguments = [
(['-f2', '--foo2'], dict(action='store'))
]
def my_command(self):
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_with_bad_indentation(self):
line = r"""
def bar():
foo(1,
2)
def baz():
pass
pass
"""
fixed = r"""
def bar():
foo(1,
2)
def baz():
pass
pass
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e121_with_multiline_string(self):
line = """\
testing = \\
'''inputs: d c b a
'''
"""
fixed = """\
testing = \\
'''inputs: d c b a
'''
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e122_with_fallback(self):
line = """\
foooo('',
scripts=[''],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
])
"""
fixed = """\
foooo('',
scripts=[''],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
])
"""
with autopep8_context(line, options=[]) as result:
self.assertEqual(fixed, result)
def test_e123(self):
line = """\
if True:
foo = (
)
"""
fixed = """\
if True:
foo = (
)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e123_with_escaped_newline(self):
line = r"""
x = \
(
)
"""
fixed = r"""
x = \
(
)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e128_with_aaa_option(self):
line = """\
def extractBlocks(self):
addLine = (self.matchMultiple(linesIncludePatterns, line)
and not self.matchMultiple(linesExcludePatterns, line)) or emptyLine
"""
fixed = """\
def extractBlocks(self):
addLine = (
self.matchMultiple(
linesIncludePatterns,
line) and not self.matchMultiple(
linesExcludePatterns,
line)) or emptyLine
"""
with autopep8_context(line, options=['-aaa']) as result:
self.assertEqual(fixed, result)
def test_e129(self):
line = """\
if (a and
b in [
'foo',
] or
c):
pass
"""
fixed = """\
if (a and
b in [
'foo',
] or
c):
pass
"""
with autopep8_context(line, options=['--select=E129']) as result:
self.assertEqual(fixed, result)
def test_e125_with_multiline_string(self):
line = """\
for foo in '''
abc
123
'''.strip().split():
print(foo)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(line, result)
def test_e125_with_multiline_string_okay(self):
line = """\
def bar(
a='''a'''):
print(foo)
"""
fixed = """\
def bar(
a='''a'''):
print(foo)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e126(self):
line = """\
if True:
posted = models.DateField(
default=datetime.date.today,
help_text="help"
)
"""
fixed = """\
if True:
posted = models.DateField(
default=datetime.date.today,
help_text="help"
)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e126_should_not_interfere_with_other_fixes(self):
line = """\
self.assertEqual('bottom 1',
SimpleNamedNode.objects.filter(id__gt=1).exclude(
name='bottom 3').filter(
name__in=['bottom 3', 'bottom 1'])[0].name)
"""
fixed = """\
self.assertEqual('bottom 1',
SimpleNamedNode.objects.filter(id__gt=1).exclude(
name='bottom 3').filter(
name__in=['bottom 3', 'bottom 1'])[0].name)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e127(self):
line = """\
if True:
if True:
chksum = (sum([int(value[i]) for i in xrange(0, 9, 2)]) * 7 -
sum([int(value[i]) for i in xrange(1, 9, 2)])) % 10
"""
fixed = """\
if True:
if True:
chksum = (sum([int(value[i]) for i in xrange(0, 9, 2)]) * 7 -
sum([int(value[i]) for i in xrange(1, 9, 2)])) % 10
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e127_align_visual_indent(self):
line = """\
def draw(self):
color = [([0.2, 0.1, 0.3], [0.2, 0.1, 0.3], [0.2, 0.1, 0.3]),
([0.9, 0.3, 0.5], [0.5, 1.0, 0.5], [0.3, 0.3, 0.9]) ][self._p._colored ]
self.draw_background(color)
"""
fixed = """\
def draw(self):
color = [([0.2, 0.1, 0.3], [0.2, 0.1, 0.3], [0.2, 0.1, 0.3]),
([0.9, 0.3, 0.5], [0.5, 1.0, 0.5], [0.3, 0.3, 0.9])][self._p._colored]
self.draw_background(color)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e127_align_visual_indent_okay(self):
"""This is for code coverage."""
line = """\
want = (have + _leading_space_count(
after[jline - 1]) -
_leading_space_count(lines[jline]))
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e127_with_backslash(self):
line = r"""
if True:
if True:
self.date = meta.session.query(schedule.Appointment)\
.filter(schedule.Appointment.id ==
appointment_id).one().agenda.endtime
"""
fixed = r"""
if True:
if True:
self.date = meta.session.query(schedule.Appointment)\
.filter(schedule.Appointment.id ==
appointment_id).one().agenda.endtime
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e127_with_bracket_then_parenthesis(self):
line = r"""
if True:
foo = [food(1)
for bar in bars]
"""
fixed = r"""
if True:
foo = [food(1)
for bar in bars]
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e12_with_backslash(self):
line = r"""
if True:
assert reeval == parsed, \
'Repr gives different object:\n %r !=\n %r' % (parsed, reeval)
"""
fixed = r"""
if True:
assert reeval == parsed, \
'Repr gives different object:\n %r !=\n %r' % (parsed, reeval)
"""
with autopep8_context(line, options=['--select=E12']) as result:
self.assertEqual(fixed, result)
def test_e133(self):
line = """\
if True:
e = [
1, 2
]
"""
fixed = """\
if True:
e = [
1, 2
]
"""
with autopep8_context(line, options=['--hang-closing']) as result:
self.assertEqual(fixed, result)
def test_e133_no_indentation_line(self):
line = """\
e = [
1, 2
]
"""
fixed = """\
e = [
1, 2
]
"""
with autopep8_context(line, options=['--hang-closing']) as result:
self.assertEqual(fixed, result)
def test_e133_not_effected(self):
line = """\
if True:
e = [
1, 2
]
"""
with autopep8_context(line, options=['--hang-closing']) as result:
self.assertEqual(line, result)
def test_w191(self):
line = """\
while True:
\tif True:
\t\t1
"""
fixed = """\
while True:
if True:
1
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e131_with_select_option(self):
line = 'd = f(\n a="hello"\n "world",\n b=1)\n'
fixed = 'd = f(\n a="hello"\n "world",\n b=1)\n'
with autopep8_context(line, options=['--select=E131']) as result:
self.assertEqual(fixed, result)
def test_e131_invalid_indent_with_select_option(self):
line = 'd = (\n "hello"\n "world")\n'
fixed = 'd = (\n "hello"\n "world")\n'
with autopep8_context(line, options=['--select=E131']) as result:
self.assertEqual(fixed, result)
def test_e201(self):
line = '( 1)\n'
fixed = '(1)\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e202(self):
line = '(1 )\n[2 ]\n{3 }\n'
fixed = '(1)\n[2]\n{3}\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e202_multiline(self):
line = """\
('''
a
b
c
''' )
"""
fixed = """\
('''
a
b
c
''')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e202_skip_multiline_with_escaped_newline(self):
line = r"""
('c\
' )
"""
fixed = r"""
('c\
')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e203_colon(self):
line = '{4 : 3}\n'
fixed = '{4: 3}\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e203_comma(self):
line = '[1 , 2 , 3]\n'
fixed = '[1, 2, 3]\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e203_semicolon(self):
line = "print(a, end=' ') ; nl = 0\n"
fixed = "print(a, end=' '); nl = 0\n"
with autopep8_context(line, options=['--select=E203']) as result:
self.assertEqual(fixed, result)
def test_e203_with_newline(self):
line = "print(a\n, end=' ')\n"
fixed = "print(a, end=' ')\n"
with autopep8_context(line, options=['--select=E203']) as result:
self.assertEqual(fixed, result)
def test_e211(self):
line = 'd = [1, 2, 3]\nprint d [0]\n'
fixed = 'd = [1, 2, 3]\nprint d[0]\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e221(self):
line = 'a = 1 + 1\n'
fixed = 'a = 1 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e221_do_not_skip_multiline(self):
line = '''\
def javascript(self):
return u"""
<script type="text/javascript" src="++resource++ptg.shufflegallery/jquery.promptu-menu.js"></script>
<script type="text/javascript">
$(function(){
$('ul.promptu-menu').promptumenu({width: %(width)i, height: %(height)i, rows: %(rows)i, columns: %(columns)i, direction: '%(direction)s', intertia: %(inertia)i, pages: %(pages)i});
\t$('ul.promptu-menu a').click(function(e) {
e.preventDefault();
});
$('ul.promptu-menu a').dblclick(function(e) {
window.location.replace($(this).attr("href"));
});
});
</script>
""" % {
}
'''
fixed = '''\
def javascript(self):
return u"""
<script type="text/javascript" src="++resource++ptg.shufflegallery/jquery.promptu-menu.js"></script>
<script type="text/javascript">
$(function(){
$('ul.promptu-menu').promptumenu({width: %(width)i, height: %(height)i, rows: %(rows)i, columns: %(columns)i, direction: '%(direction)s', intertia: %(inertia)i, pages: %(pages)i});
\t$('ul.promptu-menu a').click(function(e) {
e.preventDefault();
});
$('ul.promptu-menu a').dblclick(function(e) {
window.location.replace($(this).attr("href"));
});
});
</script>
""" % {
}
'''
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e222(self):
line = 'a = 1 + 1\n'
fixed = 'a = 1 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e222_with_multiline(self):
line = 'a = \"\"\"bar\nbaz\"\"\"\n'
fixed = 'a = \"\"\"bar\nbaz\"\"\"\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e223(self):
line = 'a = 1 + 1\n' # include TAB
fixed = 'a = 1 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e223_double(self):
line = 'a = 1 + 1\n' # include TAB
fixed = 'a = 1 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e223_with_tab_indentation(self):
line = """\
class Foo():
\tdef __init__(self):
\t\tx= 1\t+ 3
"""
fixed = """\
class Foo():
\tdef __init__(self):
\t\tx = 1 + 3
"""
with autopep8_context(line, options=['--ignore=E1,W191']) as result:
self.assertEqual(fixed, result)
def test_e224(self):
line = 'a = 11 + 1\n' # include TAB
fixed = 'a = 11 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e224_double(self):
line = 'a = 11 + 1\n' # include TAB
fixed = 'a = 11 + 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e224_with_tab_indentation(self):
line = """\
class Foo():
\tdef __init__(self):
\t\tx= \t3
"""
fixed = """\
class Foo():
\tdef __init__(self):
\t\tx = 3
"""
with autopep8_context(line, options=['--ignore=E1,W191']) as result:
self.assertEqual(fixed, result)
def test_e225(self):
line = '1+1\n2 +2\n3+ 3\n'
fixed = '1 + 1\n2 + 2\n3 + 3\n'
with autopep8_context(line, options=['--select=E,W']) as result:
self.assertEqual(fixed, result)
def test_e225_with_indentation_fix(self):
line = """\
class Foo(object):
def bar(self):
return self.elephant!='test'
"""
fixed = """\
class Foo(object):
def bar(self):
return self.elephant != 'test'
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e226(self):
line = '1*1\n2*2\n3*3\n'
fixed = '1 * 1\n2 * 2\n3 * 3\n'
with autopep8_context(line, options=['--select=E22']) as result:
self.assertEqual(fixed, result)
def test_e227(self):
line = '1&1\n2&2\n3&3\n'
fixed = '1 & 1\n2 & 2\n3 & 3\n'
with autopep8_context(line, options=['--select=E22']) as result:
self.assertEqual(fixed, result)
def test_e228(self):
line = '1%1\n2%2\n3%3\n'
fixed = '1 % 1\n2 % 2\n3 % 3\n'
with autopep8_context(line, options=['--select=E22']) as result:
self.assertEqual(fixed, result)
def test_e231(self):
line = '[1,2,3]\n'
fixed = '[1, 2, 3]\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e231_with_many_commas(self):
fixed = str(list(range(200))) + '\n'
line = re.sub(', ', ',', fixed)
with autopep8_context(line, options=['--select=E231']) as result:
self.assertEqual(fixed, result)
def test_e231_with_colon_after_comma(self):
"""ws_comma fixer ignores this case."""
line = 'a[b1,:]\n'
fixed = 'a[b1, :]\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e231_should_only_do_ws_comma_once(self):
"""If we don't check appropriately, we end up doing ws_comma multiple
times and skipping all other fixes."""
line = """\
print( 1 )
foo[0,:]
bar[zap[0][0]:zig[0][0],:]
"""
fixed = """\
print(1)
foo[0, :]
bar[zap[0][0]:zig[0][0], :]
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e241(self):
line = 'l = (1, 2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--select=E']) as result:
self.assertEqual(fixed, result)
def test_e241_should_be_enabled_by_aggressive(self):
line = 'l = (1, 2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e241_double(self):
line = 'l = (1, 2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--select=E']) as result:
self.assertEqual(fixed, result)
def test_e242(self):
line = 'l = (1,\t2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--select=E']) as result:
self.assertEqual(fixed, result)
def test_e242_double(self):
line = 'l = (1,\t\t2)\n'
fixed = 'l = (1, 2)\n'
with autopep8_context(line, options=['--select=E']) as result:
self.assertEqual(fixed, result)
def test_e251(self):
line = 'def a(arg = 1):\n print arg\n'
fixed = 'def a(arg=1):\n print arg\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e251_with_escaped_newline(self):
line = '1\n\n\ndef a(arg=\\\n1):\n print(arg)\n'
fixed = '1\n\n\ndef a(arg=1):\n print(arg)\n'
with autopep8_context(line, options=['--select=E251']) as result:
self.assertEqual(fixed, result)
def test_e251_with_calling(self):
line = 'foo(bar= True)\n'
fixed = 'foo(bar=True)\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e251_with_argument_on_next_line(self):
line = 'foo(bar\n=None)\n'
fixed = 'foo(bar=None)\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e252(self):
line = 'def a(arg1: int=1, arg2: int =1, arg3: int= 1):\n print arg\n'
fixed = 'def a(arg1: int = 1, arg2: int = 1, arg3: int = 1):\n print arg\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e252_with_argument_on_next_line(self):
line = 'def a(arg: int\n=1):\n print arg\n'
fixed = 'def a(arg: int\n= 1):\n print arg\n'
with autopep8_context(line, options=['--select=E252']) as result:
self.assertEqual(fixed, result)
def test_e252_with_escaped_newline(self):
line = 'def a(arg: int\\\n=1):\n print arg\n'
fixed = 'def a(arg: int\\\n= 1):\n print arg\n'
with autopep8_context(line, options=['--select=E252']) as result:
self.assertEqual(fixed, result)
def test_e261(self):
line = "print 'a b '# comment\n"
fixed = "print 'a b ' # comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e261_with_inline_commented_out_code(self):
line = '1 # 0 + 0\n'
fixed = '1 # 0 + 0\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e261_with_dictionary(self):
line = 'd = {# comment\n1: 2}\n'
fixed = 'd = { # comment\n 1: 2}\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e261_with_dictionary_no_space(self):
line = 'd = {#comment\n1: 2}\n'
fixed = 'd = { # comment\n 1: 2}\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e261_with_comma(self):
line = '{1: 2 # comment\n , }\n'
fixed = '{1: 2 # comment\n , }\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_more_space(self):
line = "print 'a b ' # comment\n"
fixed = "print 'a b ' # comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_none_space(self):
line = "print 'a b ' #comment\n"
fixed = "print 'a b ' # comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_hash_in_string(self):
line = "print 'a b #string' #comment\n"
fixed = "print 'a b #string' # comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_hash_in_string_and_multiple_hashes(self):
line = "print 'a b #string' #comment #comment\n"
fixed = "print 'a b #string' # comment #comment\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e262_more_complex(self):
line = "print 'a b ' #comment\n123\n"
fixed = "print 'a b ' # comment\n123\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e265(self):
line = "## comment\n123\n"
fixed = "# comment\n123\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e266(self):
line = "#1 comment\n123\n"
fixed = "# 1 comment\n123\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e271(self):
line = 'True and False\n'
fixed = 'True and False\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e271_with_multiline(self):
line = 'if True and False \\\n True:\n pass\n'
fixed = 'if True and False \\\n True:\n pass\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e272(self):
line = 'True and False\n'
fixed = 'True and False\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e273(self):
line = 'True and\tFalse\n'
fixed = 'True and False\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e274(self):
line = 'True\tand False\n'
fixed = 'True and False\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e306(self):
line = """
def test_descriptors(self):
class descriptor(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return 'method'
"""
fixed = """
def test_descriptors(self):
class descriptor(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return 'method'
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e301(self):
line = 'class k:\n s = 0\n def f():\n print 1\n'
fixed = 'class k:\n s = 0\n\n def f():\n print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e301_extended_with_docstring(self):
line = '''\
class Foo(object):
"""Test."""
def foo(self):
"""Test."""
def bar():
pass
'''
fixed = '''\
class Foo(object):
"""Test."""
def foo(self):
"""Test."""
def bar():
pass
'''
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_not_e301_extended_with_comment(self):
line = '''\
class Foo(object):
"""Test."""
# A comment.
def foo(self):
pass
'''
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e302(self):
line = 'def f():\n print 1\n\ndef ff():\n print 2\n'
fixed = 'def f():\n print 1\n\n\ndef ff():\n print 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e302_bug(self):
"""Avoid creating bad syntax."""
line = r"""def repeatable_expr(): return [bracketed_choice, simple_match, rule_ref],\
Optional(repeat_operator)
# def match(): return [simple_match , mixin_rule_match] TODO
def simple_match(): return [str_match, re_match]
"""
self.assertTrue(autopep8.check_syntax(line))
with autopep8_context(line) as result:
self.assertTrue(autopep8.check_syntax(result))
def test_e303(self):
line = '\n\n\n# alpha\n\n1\n'
fixed = '\n\n# alpha\n\n1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e303_extended(self):
line = '''\
def foo():
"""Document."""
'''
fixed = '''\
def foo():
"""Document."""
'''
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e303_with_e305(self):
line = """\
def foo():
pass
# comment (E303)
a = 1 # (E305)
"""
fixed = """\
def foo():
pass
# comment (E303)
a = 1 # (E305)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e304(self):
line = '@contextmanager\n\ndef f():\n print 1\n'
fixed = '@contextmanager\ndef f():\n print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e304_with_comment(self):
line = '@contextmanager\n# comment\n\ndef f():\n print 1\n'
fixed = '@contextmanager\n# comment\ndef f():\n print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e305(self):
line = 'def a():\n pass\na()\n'
fixed = 'def a():\n pass\n\n\na()\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401(self):
line = 'import os, sys\n'
fixed = 'import os\nimport sys\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_with_indentation(self):
line = 'def a():\n import os, sys\n'
fixed = 'def a():\n import os\n import sys\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_should_ignore_commented_comma(self):
line = 'import bdist_egg, egg # , not a module, neither is this\n'
fixed = 'import bdist_egg\nimport egg # , not a module, neither is this\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_should_ignore_commented_comma_with_indentation(self):
line = 'if True:\n import bdist_egg, egg # , not a module, neither is this\n'
fixed = 'if True:\n import bdist_egg\n import egg # , not a module, neither is this\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_should_ignore_false_positive(self):
line = 'import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe)\n'
with autopep8_context(line, options=['--select=E401']) as result:
self.assertEqual(line, result)
def test_e401_with_escaped_newline_case(self):
line = 'import foo, \\\n bar\n'
fixed = 'import foo\nimport \\\n bar\n'
with autopep8_context(line, options=['--select=E401']) as result:
self.assertEqual(fixed, result)
def test_e402(self):
line = 'a = 1\nimport os\n'
fixed = 'import os\na = 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_duplicate_module(self):
line = 'a = 1\nimport os\nprint(os)\nimport os\n'
fixed = 'import os\na = 1\nprint(os)\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_future_import(self):
line = 'from __future__ import print_function\na = 1\nimport os\n'
fixed = 'from __future__ import print_function\nimport os\na = 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e401_with_multiline_from_import(self):
line = """\
from os import (
chroot
)
def f():
pass
from a import b
from b import c
from c import d
"""
fixed = """\
from a import b
from c import d
from b import c
from os import (
chroot
)
def f():
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_multiline_from_future_import(self):
line = """\
from __future__ import (
absolute_import,
print_function
)
def f():
pass
import os
"""
fixed = """\
from __future__ import (
absolute_import,
print_function
)
import os
def f():
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_module_doc(self):
line1 = '"""\nmodule doc\n"""\na = 1\nimport os\n'
fixed1 = '"""\nmodule doc\n"""\nimport os\na = 1\n'
line2 = '# comment\nr"""\nmodule doc\n"""\na = 1\nimport os\n'
fixed2 = '# comment\nr"""\nmodule doc\n"""\nimport os\na = 1\n'
line3 = "u'''one line module doc'''\na = 1\nimport os\n"
fixed3 = "u'''one line module doc'''\nimport os\na = 1\n"
line4 = "'''\n\"\"\"\ndoc'''\na = 1\nimport os\n"
fixed4 = "'''\n\"\"\"\ndoc'''\nimport os\na = 1\n"
for line, fixed in [(line1, fixed1), (line2, fixed2),
(line3, fixed3), (line4, fixed4)]:
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_import_some_modules(self):
line = """\
a = 1
from csv import (
reader,
writer,
)
import os
print(os, reader, writer)
import os
"""
fixed = """\
import os
from csv import (
reader,
writer,
)
a = 1
print(os, reader, writer)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_dunder(self):
line = """\
__all__ = ["a", "b"]
def f():
pass
import os
"""
fixed = """\
import os
__all__ = ["a", "b"]
def f():
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e402_with_dunder_lines(self):
line = """\
__all__ = [
"a",
"b",
]
def f():
pass
import os
"""
fixed = """\
import os
__all__ = [
"a",
"b",
]
def f():
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_basic(self):
line = """\
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_dictionary(self):
line = """\
myDict = { 'kg': 1, 'tonnes': tonne, 't/y': tonne / year, 'Mt/y': 1e6 * tonne / year}
"""
fixed = """\
myDict = {
'kg': 1,
'tonnes': tonne,
't/y': tonne / year,
'Mt/y': 1e6 * tonne / year}
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_in(self):
line = """\
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if k_left in ('any', k_curr) and k_right in ('any', k_curr):
pass
"""
fixed = """\
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if k_left in ('any', k_curr) and k_right in ('any', k_curr):
pass
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_commas_and_colons(self):
line = """\
foobar = {'aaaaaaaaaaaa': 'bbbbbbbbbbbbbbbb', 'dddddd': 'eeeeeeeeeeeeeeee', 'ffffffffffff': 'gggggggg'}
"""
fixed = """\
foobar = {'aaaaaaaaaaaa': 'bbbbbbbbbbbbbbbb',
'dddddd': 'eeeeeeeeeeeeeeee', 'ffffffffffff': 'gggggggg'}
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_inline_comments(self):
line = """\
' ' # Long inline comments should be moved above.
if True:
' ' # Long inline comments should be moved above.
"""
fixed = """\
# Long inline comments should be moved above.
' '
if True:
# Long inline comments should be moved above.
' '
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_inline_comments_should_skip_multiline(self):
line = """\
'''This should be left alone. -----------------------------------------------------
''' # foo
'''This should be left alone. -----------------------------------------------------
''' \\
# foo
'''This should be left alone. -----------------------------------------------------
''' \\
\\
# foo
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(line, result)
def test_e501_with_inline_comments_should_skip_keywords(self):
line = """\
' ' # noqa Long inline comments should be moved above.
if True:
' ' # pylint: disable-msgs=E0001
' ' # pragma: no cover
' ' # pragma: no cover
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_with_inline_comments_should_skip_keywords_without_aggressive(
self):
line = """\
' ' # noqa Long inline comments should be moved above.
if True:
' ' # pylint: disable-msgs=E0001
' ' # pragma: no cover
' ' # pragma: no cover
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_with_inline_comments_should_skip_edge_cases(self):
line = """\
if True:
x = \\
' ' # Long inline comments should be moved above.
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_basic_should_prefer_balanced_brackets(self):
line = """\
if True:
reconstructed = iradon(radon(image), filter="ramp", interpolation="nearest")
"""
fixed = """\
if True:
reconstructed = iradon(radon(image), filter="ramp",
interpolation="nearest")
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_very_long_line(self):
line = """\
x = [3244234243234, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243]
"""
fixed = """\
x = [
3244234243234,
234234234324,
234234324,
23424234,
234234234,
234234,
234243,
234243,
234234234324,
234234324,
23424234,
234234234,
234234,
234243,
234243]
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_lambda(self):
line = """\
self.mock_group.modify_state.side_effect = lambda *_: defer.fail(NoSuchScalingGroupError(1, 2))
"""
fixed = """\
self.mock_group.modify_state.side_effect = lambda *_: defer.fail(
NoSuchScalingGroupError(1, 2))
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_shorten_with_backslash(self):
line = """\
class Bar(object):
def bar(self, position):
if 0 <= position <= self._blocks[-1].position + len(self._blocks[-1].text):
pass
"""
fixed = """\
class Bar(object):
def bar(self, position):
if 0 <= position <= self._blocks[-1].position + \\
len(self._blocks[-1].text):
pass
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_shorten_at_commas_skip(self):
line = """\
parser.add_argument('source_corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('target_corpus', help='corpus name/path relative to an nltk_data directory')
"""
fixed = """\
parser.add_argument(
'source_corpus',
help='corpus name/path relative to an nltk_data directory')
parser.add_argument(
'target_corpus',
help='corpus name/path relative to an nltk_data directory')
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_shorter_length(self):
line = "foooooooooooooooooo('abcdefghijklmnopqrstuvwxyz')\n"
fixed = "foooooooooooooooooo(\n 'abcdefghijklmnopqrstuvwxyz')\n"
with autopep8_context(line,
options=['--max-line-length=40']) as result:
self.assertEqual(fixed, result)
def test_e501_with_indent(self):
line = """\
def d():
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
def d():
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_alone_with_indentation(self):
line = """\
if True:
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
if True:
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--select=E501']) as result:
self.assertEqual(fixed, result)
def test_e501_alone_with_tuple(self):
line = """\
fooooooooooooooooooooooooooooooo000000000000000000000000 = [1,
('TransferTime', 'FLOAT')
]
"""
fixed = """\
fooooooooooooooooooooooooooooooo000000000000000000000000 = [1,
('TransferTime',
'FLOAT')
]
"""
with autopep8_context(line, options=['--select=E501']) as result:
self.assertEqual(fixed, result)
def test_e501_should_not_try_to_break_at_every_paren_in_arithmetic(self):
line = """\
term3 = w6 * c5 * (8.0 * psi4 * (11.0 - 24.0 * t2) - 28 * psi3 * (1 - 6.0 * t2) + psi2 * (1 - 32 * t2) - psi * (2.0 * t2) + t4) / 720.0
this_should_be_shortened = (' ', ' ')
"""
fixed = """\
term3 = w6 * c5 * (8.0 * psi4 * (11.0 - 24.0 * t2) - 28 * psi3 *
(1 - 6.0 * t2) + psi2 * (1 - 32 * t2) - psi * (2.0 * t2) + t4) / 720.0
this_should_be_shortened = (
' ',
' ')
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_arithmetic_operator_with_indent(self):
line = """\
def d():
111 + 111 + 111 + 111 + 111 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 333 + 333 + 333 + 333
"""
fixed = r"""def d():
111 + 111 + 111 + 111 + 111 + 222 + 222 + 222 + 222 + \
222 + 222 + 222 + 222 + 222 + 333 + 333 + 333 + 333
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_more_complicated(self):
line = """\
blahblah = os.environ.get('blahblah') or os.environ.get('blahblahblah') or os.environ.get('blahblahblahblah')
"""
fixed = """\
blahblah = os.environ.get('blahblah') or os.environ.get(
'blahblahblah') or os.environ.get('blahblahblahblah')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_skip_even_more_complicated(self):
line = """\
if True:
if True:
if True:
blah = blah.blah_blah_blah_bla_bl(blahb.blah, blah.blah,
blah=blah.label, blah_blah=blah_blah,
blah_blah2=blah_blah)
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_avoid_breaking_at_empty_parentheses_if_possible(self):
line = """\
someverylongindenttionwhatnot().foo().bar().baz("and here is a long string 123456789012345678901234567890")
"""
fixed = """\
someverylongindenttionwhatnot().foo().bar().baz(
"and here is a long string 123456789012345678901234567890")
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_logical_fix(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_logical_fix_and_physical_fix(self):
line = """\
# ------------------------------------ ------------------------------------------
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
# ------------------------------------ -----------------------------------
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_logical_fix_and_adjacent_strings(self):
line = """\
print('a-----------------------' 'b-----------------------' 'c-----------------------'
'd-----------------------''e'"f"r"g")
"""
fixed = """\
print(
'a-----------------------'
'b-----------------------'
'c-----------------------'
'd-----------------------'
'e'
"f"
r"g")
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_multiple_lines(self):
line = """\
foo_bar_zap_bing_bang_boom(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333,
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333)
"""
fixed = """\
foo_bar_zap_bing_bang_boom(
111,
111,
111,
111,
222,
222,
222,
222,
222,
222,
222,
222,
222,
333,
333,
111,
111,
111,
111,
222,
222,
222,
222,
222,
222,
222,
222,
222,
333,
333)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_multiple_lines_and_quotes(self):
line = """\
if True:
xxxxxxxxxxx = xxxxxxxxxxxxxxxxx(xxxxxxxxxxx, xxxxxxxxxxxxxxxx={'xxxxxxxxxxxx': 'xxxxx',
'xxxxxxxxxxx': xx,
'xxxxxxxx': False,
})
"""
fixed = """\
if True:
xxxxxxxxxxx = xxxxxxxxxxxxxxxxx(
xxxxxxxxxxx,
xxxxxxxxxxxxxxxx={
'xxxxxxxxxxxx': 'xxxxx',
'xxxxxxxxxxx': xx,
'xxxxxxxx': False,
})
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_do_not_break_on_keyword(self):
# We don't want to put a newline after equals for keywords as this
# violates PEP 8.
line = """\
if True:
long_variable_name = tempfile.mkstemp(prefix='abcdefghijklmnopqrstuvwxyz0123456789')
"""
fixed = """\
if True:
long_variable_name = tempfile.mkstemp(
prefix='abcdefghijklmnopqrstuvwxyz0123456789')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_do_not_begin_line_with_comma(self):
# This fix is incomplete. (The line is still too long.) But it is here
# just to confirm that we do not put a comma at the beginning of a
# line.
line = """\
def dummy():
if True:
if True:
if True:
object = ModifyAction( [MODIFY70.text, OBJECTBINDING71.text, COLON72.text], MODIFY70.getLine(), MODIFY70.getCharPositionInLine() )
"""
fixed = """\
def dummy():
if True:
if True:
if True:
object = ModifyAction([MODIFY70.text, OBJECTBINDING71.text, COLON72.text], MODIFY70.getLine(
), MODIFY70.getCharPositionInLine())
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_should_not_break_on_dot(self):
line = """\
if True:
if True:
raise xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx('xxxxxxxxxxxxxxxxx "{d}" xxxxxxxxxxxxxx'.format(d='xxxxxxxxxxxxxxx'))
"""
fixed = """\
if True:
if True:
raise xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
'xxxxxxxxxxxxxxxxx "{d}" xxxxxxxxxxxxxx'.format(d='xxxxxxxxxxxxxxx'))
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_comment(self):
line = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
pass
# http://foo.bar/abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-
# The following is ugly commented-out code and should not be touched.
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 1
"""
fixed = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will
# wrap it using textwrap to be within 72 characters.
pass
# http://foo.bar/abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-
# The following is ugly commented-out code and should not be touched.
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 1
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_comment_should_not_modify_docstring(self):
line = '''\
def foo():
"""
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
"""
'''
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_should_only_modify_last_comment(self):
line = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 1. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 2. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 3. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
"""
fixed = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 1. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 2. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 3. This is a long comment that should be wrapped. I
# will wrap it using textwrap to be within 72
# characters.
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_should_not_interfere_with_non_comment(self):
line = '''
"""
# not actually a comment %d. 12345678901234567890, 12345678901234567890, 12345678901234567890.
""" % (0,)
'''
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_should_cut_comment_pattern(self):
line = """123
# -- Useless lines ----------------------------------------------------------------------
321
"""
fixed = """123
# -- Useless lines -------------------------------------------------------
321
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_function_should_not_break_on_colon(self):
line = r"""
class Useless(object):
def _table_field_is_plain_widget(self, widget):
if widget.__class__ == Widget or\
(widget.__class__ == WidgetMeta and Widget in widget.__bases__):
return True
return False
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_should_break_before_tuple_start(self):
line = """\
xxxxxxxxxxxxx(aaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbb, cccccccccc, (dddddddddddddddddddddd, eeeeeeeeeeee, fffffffffff, gggggggggg))
"""
fixed = """\
xxxxxxxxxxxxx(aaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbb, cccccccccc,
(dddddddddddddddddddddd, eeeeeeeeeeee, fffffffffff, gggggggggg))
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive(self):
line = """\
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
}
"""
fixed = """\
models = {
'auth.group': {
'Meta': {
'object_name': 'Group'},
'permissions': (
'django.db.models.fields.related.ManyToManyField',
[],
{
'to': "orm['auth.Permission']",
'symmetrical': 'False',
'blank': 'True'})},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'name': (
'django.db.models.fields.CharField',
[],
{
'max_length': '50'})},
}
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiple_logical_lines(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiple_logical_lines_with_math(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx([-1 + 5 / 10,
100,
-3 - 4])
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
[-1 + 5 / 10, 100, -3 - 4])
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_import(self):
line = """\
from . import (xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy)
"""
fixed = """\
from . import (
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_massive_number_of_logical_lines(self):
"""We do not care about results here.
We just want to know that it doesn't take a ridiculous amount of
time. Caching is currently required to avoid repeately trying
the same line.
"""
line = """\
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from provider.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Client'
db.create_table('oauth2_client', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model_label])),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('redirect_uri', self.gf('django.db.models.fields.URLField')(max_length=200)),
('client_id', self.gf('django.db.models.fields.CharField')(default='37b581bdc702c732aa65', max_length=255)),
('client_secret', self.gf('django.db.models.fields.CharField')(default='5cf90561f7566aa81457f8a32187dcb8147c7b73', max_length=255)),
('client_type', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('oauth2', ['Client'])
# Adding model 'Grant'
db.create_table('oauth2_grant', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model_label])),
('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth2.Client'])),
('code', self.gf('django.db.models.fields.CharField')(default='f0cda1a5f4ae915431ff93f477c012b38e2429c4', max_length=255)),
('expires', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 2, 8, 10, 43, 45, 620301))),
('redirect_uri', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('scope', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('oauth2', ['Grant'])
# Adding model 'AccessToken'
db.create_table('oauth2_accesstoken', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model_label])),
('token', self.gf('django.db.models.fields.CharField')(default='b10b8f721e95117cb13c', max_length=255)),
('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth2.Client'])),
('expires', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 2, 7, 10, 33, 45, 618854))),
('scope', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('oauth2', ['AccessToken'])
# Adding model 'RefreshToken'
db.create_table('oauth2_refreshtoken', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_model_label])),
('token', self.gf('django.db.models.fields.CharField')(default='84035a870dab7c820c2c501fb0b10f86fdf7a3fe', max_length=255)),
('access_token', self.gf('django.db.models.fields.related.OneToOneField')(related_name='refresh_token', unique=True, to=orm['oauth2.AccessToken'])),
('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oauth2.Client'])),
('expired', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('oauth2', ['RefreshToken'])
def backwards(self, orm):
# Deleting model 'Client'
db.delete_table('oauth2_client')
# Deleting model 'Grant'
db.delete_table('oauth2_grant')
# Deleting model 'AccessToken'
db.delete_table('oauth2_accesstoken')
# Deleting model 'RefreshToken'
db.delete_table('oauth2_refreshtoken')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oauth2.accesstoken': {
'Meta': {'object_name': 'AccessToken'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oauth2.Client']"}),
'expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 7, 10, 33, 45, 624553)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scope': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'d5c1f65020ebdc89f20c'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'oauth2.client': {
'Meta': {'object_name': 'Client'},
'client_id': ('django.db.models.fields.CharField', [], {'default': "'306fb26cbcc87dd33cdb'", 'max_length': '255'}),
'client_secret': ('django.db.models.fields.CharField', [], {'default': "'7e5785add4898448d53767f15373636b918cf0e3'", 'max_length': '255'}),
'client_type': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'oauth2.grant': {
'Meta': {'object_name': 'Grant'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oauth2.Client']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'310b2c63e27306ecf5307569dd62340cc4994b73'", 'max_length': '255'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 8, 10, 43, 45, 625956)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'scope': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'oauth2.refreshtoken': {
'Meta': {'object_name': 'RefreshToken'},
'access_token': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'refresh_token'", 'unique': 'True', 'to': "orm['oauth2.AccessToken']"}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oauth2.Client']"}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'ef0ab76037f17769ab2975a816e8f41a1c11d25e'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
}
}
complete_apps = ['oauth2']
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(''.join(line.split()),
''.join(result.split()))
def test_e501_shorten_comment_with_aggressive(self):
line = """\
# --------- ----------------------------------------------------------------------
"""
fixed = """\
# --------- --------------------------------------------------------------
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_shorten_comment_without_aggressive(self):
"""Do nothing without aggressive."""
line = """\
def foo():
pass
# --------- ----------------------------------------------------------------------
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_with_aggressive_and_escaped_newline(self):
line = """\
if True or \\
False: # test test test test test test test test test test test test test test
pass
"""
fixed = """\
if True or \\
False: # test test test test test test test test test test test test test test
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiline_string(self):
line = """\
print('---------------------------------------------------------------------',
('================================================', '====================='),
'''--------------------------------------------------------------------------------
''')
"""
fixed = """\
print(
'---------------------------------------------------------------------',
('================================================',
'====================='),
'''--------------------------------------------------------------------------------
''')
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiline_string_with_addition(self):
line = '''\
def f():
email_text += """<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>"""
'''
fixed = '''\
def f():
email_text += """<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>""" + despot["Nicholas"] + """<br>
<b>Minion: </b>""" + serf["Dmitri"] + """<br>
<b>Residence: </b>""" + palace["Winter"] + """<br>
</body>
</html>"""
'''
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_multiline_string_in_parens(self):
line = '''\
def f():
email_text += ("""<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>""")
'''
fixed = '''\
def f():
email_text += (
"""<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>""" +
despot["Nicholas"] +
"""<br>
<b>Minion: </b>""" +
serf["Dmitri"] +
"""<br>
<b>Residence: </b>""" +
palace["Winter"] +
"""<br>
</body>
</html>""")
'''
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_indentation(self):
line = """\
if True:
# comment here
print(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,cccccccccccccccccccccccccccccccccccccccccc)
"""
fixed = """\
if True:
# comment here
print(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccccc)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_multiple_keys_and_aggressive(self):
line = """\
one_two_three_four_five_six = {'one two three four five': 12345, 'asdfsdflsdkfjl sdflkjsdkfkjsfjsdlkfj sdlkfjlsfjs': '343',
1: 1}
"""
fixed = """\
one_two_three_four_five_six = {
'one two three four five': 12345,
'asdfsdflsdkfjl sdflkjsdkfkjsfjsdlkfj sdlkfjlsfjs': '343',
1: 1}
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_with_aggressive_and_carriage_returns_only(self):
"""Make sure _find_logical() does not crash."""
line = 'if True:\r from aaaaaaaaaaaaaaaa import bbbbbbbbbbbbbbbbbbb\r \r ccccccccccc = None\r'
fixed = 'if True:\r from aaaaaaaaaaaaaaaa import bbbbbbbbbbbbbbbbbbb\r\r ccccccccccc = None\r'
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_should_ignore_imports(self):
line = """\
import logging, os, bleach, commonware, urllib2, json, time, requests, urlparse, re
"""
with autopep8_context(line, options=['--select=E501']) as result:
self.assertEqual(line, result)
def test_e501_should_not_do_useless_things(self):
line = """\
foo(' ')
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e501_aggressive_with_percent(self):
line = """\
raise MultiProjectException("Ambiguous workspace: %s=%s, %s" % ( varname, varname_path, os.path.abspath(config_filename)))
"""
fixed = """\
raise MultiProjectException(
"Ambiguous workspace: %s=%s, %s" %
(varname, varname_path, os.path.abspath(config_filename)))
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_with_def(self):
line = """\
def foo(sldfkjlsdfsdf, kksdfsdfsf,sdfsdfsdf, sdfsdfkdk, szdfsdfsdf, sdfsdfsdfsdlkfjsdlf, sdfsdfddf,sdfsdfsfd, sdfsdfdsf):
pass
"""
fixed = """\
def foo(sldfkjlsdfsdf, kksdfsdfsf, sdfsdfsdf, sdfsdfkdk, szdfsdfsdf,
sdfsdfsdfsdlkfjsdlf, sdfsdfddf, sdfsdfsfd, sdfsdfdsf):
pass
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_more_aggressive_with_def(self):
line = """\
def foobar(sldfkjlsdfsdf, kksdfsdfsf,sdfsdfsdf, sdfsdfkdk, szdfsdfsdf, sdfsdfsdfsdlkfjsdlf, sdfsdfddf,sdfsdfsfd, sdfsdfdsf):
pass
"""
fixed = """\
def foobar(
sldfkjlsdfsdf,
kksdfsdfsf,
sdfsdfsdf,
sdfsdfkdk,
szdfsdfsdf,
sdfsdfsdfsdlkfjsdlf,
sdfsdfddf,
sdfsdfsfd,
sdfsdfdsf):
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_with_tuple(self):
line = """\
def f():
man_this_is_a_very_long_function_name(an_extremely_long_variable_name,
('a string that is long: %s'%'bork'))
"""
fixed = """\
def f():
man_this_is_a_very_long_function_name(
an_extremely_long_variable_name,
('a string that is long: %s' % 'bork'))
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_with_tuple_in_list(self):
line = """\
def f(self):
self._xxxxxxxx(aaaaaa, bbbbbbbbb, cccccccccccccccccc,
[('mmmmmmmmmm', self.yyyyyyyyyy.zzzzzzz/_DDDDD)], eee, 'ff')
"""
fixed = """\
def f(self):
self._xxxxxxxx(aaaaaa, bbbbbbbbb, cccccccccccccccccc, [
('mmmmmmmmmm', self.yyyyyyyyyy.zzzzzzz / _DDDDD)], eee, 'ff')
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_decorator(self):
line = """\
@foo(('xxxxxxxxxxxxxxxxxxxxxxxxxx', users.xxxxxxxxxxxxxxxxxxxxxxxxxx), ('yyyyyyyyyyyy', users.yyyyyyyyyyyy), ('zzzzzzzzzzzzzz', users.zzzzzzzzzzzzzz))
"""
fixed = """\
@foo(('xxxxxxxxxxxxxxxxxxxxxxxxxx', users.xxxxxxxxxxxxxxxxxxxxxxxxxx),
('yyyyyyyyyyyy', users.yyyyyyyyyyyy), ('zzzzzzzzzzzzzz', users.zzzzzzzzzzzzzz))
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_long_class_name(self):
line = """\
class AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA(BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB):
pass
"""
fixed = """\
class AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA(
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB):
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_long_comment_and_long_line(self):
line = """\
def foo():
# This is not a novel to be tossed aside lightly. It should be throw with great force.
self.xxxxxxxxx(_('yyyyyyyyyyyyy yyyyyyyyyyyy yyyyyyyy yyyyyyyy y'), 'zzzzzzzzzzzzzzzzzzz', bork='urgent')
"""
fixed = """\
def foo():
# This is not a novel to be tossed aside lightly. It should be throw with
# great force.
self.xxxxxxxxx(
_('yyyyyyyyyyyyy yyyyyyyyyyyy yyyyyyyy yyyyyyyy y'),
'zzzzzzzzzzzzzzzzzzz',
bork='urgent')
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_aggressive_intermingled_comments(self):
line = """\
A = [
# A comment
['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'bbbbbbbbbbbbbbbbbbbbbb', 'cccccccccccccccccccccc']
]
"""
fixed = """\
A = [
# A comment
['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'bbbbbbbbbbbbbbbbbbbbbb',
'cccccccccccccccccccccc']
]
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_if_line_over_limit(self):
line = """\
if not xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
return 1
"""
fixed = """\
if not xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccc,
dddddddddddddddddddddd):
return 1
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_for_line_over_limit(self):
line = """\
for aaaaaaaaa in xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
pass
"""
fixed = """\
for aaaaaaaaa in xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccc,
dddddddddddddddddddddd):
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_while_line_over_limit(self):
line = """\
while xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
pass
"""
fixed = """\
while xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccc,
dddddddddddddddddddddd):
pass
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e501_avoid_breaking_at_opening_slice(self):
"""Prevents line break on slice notation, dict access in this example:
GYakymOSMc=GYakymOSMW(GYakymOSMJ,GYakymOSMA,GYakymOSMr,GYakymOSMw[
'abc'],GYakymOSMU,GYakymOSMq,GYakymOSMH,GYakymOSMl,svygreNveyvarf=GYakymOSME)
"""
line = """\
GYakymOSMc=GYakymOSMW(GYakymOSMJ,GYakymOSMA,GYakymOSMr,GYakymOSMw['abc'],GYakymOSMU,GYakymOSMq,GYakymOSMH,GYakymOSMl,svygreNveyvarf=GYakymOSME)
"""
fixed = """\
GYakymOSMc = GYakymOSMW(GYakymOSMJ, GYakymOSMA, GYakymOSMr,
GYakymOSMw['abc'], GYakymOSMU, GYakymOSMq, GYakymOSMH, GYakymOSMl, svygreNveyvarf=GYakymOSME)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e501_avoid_breaking_at_multi_level_slice(self):
"""Prevents line break on slice notation, dict access in this example:
GYakymOSMc=GYakymOSMW(GYakymOSMJ,GYakymOSMA,GYakymOSMr,GYakymOSMw['abc'][
'def'],GYakymOSMU,GYakymOSMq,GYakymOSMH,GYakymOSMl,svygreNveyvarf=GYakymOSME)
"""
line = """\
GYakymOSMc=GYakymOSMW(GYakymOSMJ,GYakymOSMA,GYakymOSMr,GYakymOSMw['abc']['def'],GYakymOSMU,GYakymOSMq,GYakymOSMH,GYakymOSMl,svygreNveyvarf=GYakymOSME)
"""
fixed = """\
GYakymOSMc = GYakymOSMW(GYakymOSMJ, GYakymOSMA, GYakymOSMr,
GYakymOSMw['abc']['def'], GYakymOSMU, GYakymOSMq, GYakymOSMH, GYakymOSMl, svygreNveyvarf=GYakymOSME)
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
@unittest.skipIf(
(sys.version_info.major >= 3 and sys.version_info.minor < 8)
or sys.version_info.major < 3,
"syntax error in Python3.7 and lower version",
)
def test_e501_with_pep572_assignment_expressions(self):
line = """\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = 1
if bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb := aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
print(bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb)
"""
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(line, result)
def test_e502(self):
line = "print('abc'\\\n 'def')\n"
fixed = "print('abc'\n 'def')\n"
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e701(self):
line = 'if True: print True\n'
fixed = 'if True:\n print True\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e701_with_escaped_newline(self):
line = 'if True:\\\nprint True\n'
fixed = 'if True:\n print True\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e701_with_escaped_newline_and_spaces(self):
line = 'if True: \\ \nprint True\n'
fixed = 'if True:\n print True\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702(self):
line = 'print 1; print 2\n'
fixed = 'print 1\nprint 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_after_colon_should_be_untouched(self):
# https://docs.python.org/2/reference/compound_stmts.html
line = 'def foo(): print(1); print(2)\n'
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_e702_with_semicolon_at_end(self):
line = 'print 1;\n'
fixed = 'print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_semicolon_and_space_at_end(self):
line = 'print 1; \n'
fixed = 'print 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_whitespace(self):
line = 'print 1 ; print 2\n'
fixed = 'print 1\nprint 2\n'
with autopep8_context(line, options=['--select=E702']) as result:
self.assertEqual(fixed, result)
def test_e702_with_non_ascii_file(self):
line = """\
# -*- coding: utf-8 -*-
# French comment with accent é
# Un commentaire en français avec un accent é
import time
time.strftime('%d-%m-%Y');
"""
fixed = """\
# -*- coding: utf-8 -*-
# French comment with accent é
# Un commentaire en français avec un accent é
import time
time.strftime('%d-%m-%Y')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_escaped_newline(self):
line = '1; \\\n2\n'
fixed = '1\n2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_escaped_newline_with_indentation(self):
line = '1; \\\n 2\n'
fixed = '1\n2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_more_complicated(self):
line = """\
def foo():
if bar : bar+=1; bar=bar*bar ; return bar
"""
fixed = """\
def foo():
if bar:
bar += 1
bar = bar * bar
return bar
"""
with autopep8_context(line, options=['--select=E,W']) as result:
self.assertEqual(fixed, result)
def test_e702_with_semicolon_in_string(self):
line = 'print(";");\n'
fixed = 'print(";")\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_semicolon_in_string_to_the_right(self):
line = 'x = "x"; y = "y;y"\n'
fixed = 'x = "x"\ny = "y;y"\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_indent_correctly(self):
line = """\
(
1,
2,
3); 4; 5; 5 # pyflakes
"""
fixed = """\
(
1,
2,
3)
4
5
5 # pyflakes
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_triple_quote(self):
line = '"""\n hello\n """; 1\n'
fixed = '"""\n hello\n """\n1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_triple_quote_and_indent(self):
line = 'def f():\n """\n hello\n """; 1\n'
fixed = 'def f():\n """\n hello\n """\n 1\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_semicolon_after_string(self):
line = """\
raise IOError('abc '
'def.');
"""
fixed = """\
raise IOError('abc '
'def.')
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_dict_semicolon(self):
line = """\
MY_CONST = [
{'A': 1},
{'B': 2}
];
"""
fixed = """\
MY_CONST = [
{'A': 1},
{'B': 2}
]
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e702_with_e701_and_only_select_e702_option(self):
line = """\
for i in range(3):
if i == 1: print i; continue
print i
"""
with autopep8_context(line, options=["--select=E702"]) as result:
self.assertEqual(line, result)
def test_e703_with_inline_comment(self):
line = 'a = 5; # inline comment\n'
fixed = 'a = 5 # inline comment\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e703_in_example_of_readme(self):
line = """\
def example2(): return ('' in {'f': 2}) in {'has_key() is deprecated': True};
"""
fixed = """\
def example2(): return ('' in {'f': 2}) in {'has_key() is deprecated': True}
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e704(self):
line = 'def f(x): return 2*x\n'
fixed = 'def f(x):\n return 2 * x\n'
with autopep8_context(line, options=['-aaa']) as result:
self.assertEqual(fixed, result)
def test_e704_not_work_with_aa_option(self):
line = 'def f(x): return 2*x\n'
with autopep8_context(line, options=['-aa', '--select=E704']) as result:
self.assertEqual(line, result)
def test_e711(self):
line = 'foo == None\n'
fixed = 'foo is None\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
line = 'None == foo\n'
fixed = 'None is foo\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e711_in_conditional(self):
line = 'if foo == None and None == foo:\npass\n'
fixed = 'if foo is None and None is foo:\npass\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e711_in_conditional_with_multiple_instances(self):
line = 'if foo == None and bar == None:\npass\n'
fixed = 'if foo is None and bar is None:\npass\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e711_with_not_equals_none(self):
line = 'foo != None\n'
fixed = 'foo is not None\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e712(self):
line = 'foo == True\n'
fixed = 'foo\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_in_conditional_with_multiple_instances(self):
line = 'if foo == True and bar == True:\npass\n'
fixed = 'if foo and bar:\npass\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_with_false(self):
line = 'foo != False\n'
fixed = 'foo\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_with_special_case_equal_not_true(self):
line = 'if foo != True:\n pass\n'
fixed = 'if not foo:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_with_special_case_equal_false(self):
line = 'if foo == False:\n pass\n'
fixed = 'if not foo:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_with_dict_value(self):
line = 'if d["key"] != True:\n pass\n'
fixed = 'if not d["key"]:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E712']) as result:
self.assertEqual(fixed, result)
def test_e712_only_if_aggressive_level_2(self):
line = 'foo == True\n'
with autopep8_context(line, options=['-a']) as result:
self.assertEqual(line, result)
def test_e711_and_e712(self):
line = 'if (foo == None and bar == True) or (foo != False and bar != None):\npass\n'
fixed = 'if (foo is None and bar) or (foo and bar is not None):\npass\n'
with autopep8_context(line, options=['-aa']) as result:
self.assertEqual(fixed, result)
def test_e713(self):
line = 'if not x in y:\n pass\n'
fixed = 'if x not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_more(self):
line = 'if not "." in y:\n pass\n'
fixed = 'if "." not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_with_in(self):
line = 'if not "." in y and "," in y:\n pass\n'
fixed = 'if "." not in y and "," in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_with_tuple(self):
line = """
if not role in ("domaincontroller_master",
"domaincontroller_backup",
"domaincontroller_slave",
"memberserver",
):
pass
"""
fixed = """
if role not in ("domaincontroller_master",
"domaincontroller_backup",
"domaincontroller_slave",
"memberserver",
):
pass
"""
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_chain(self):
line = 'if "@" not in x or not "/" in y:\n pass\n'
fixed = 'if "@" not in x or "/" not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_chain2(self):
line = 'if "@" not in x or "[" not in x or not "/" in y:\n pass\n'
fixed = 'if "@" not in x or "[" not in x or "/" not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_chain3(self):
line = 'if not "@" in x or "[" not in x or not "/" in y:\n pass\n'
fixed = 'if "@" not in x or "[" not in x or "/" not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e713_chain4(self):
line = 'if not "." in y and not "," in y:\n pass\n'
fixed = 'if "." not in y and "," not in y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E713']) as result:
self.assertEqual(fixed, result)
def test_e714(self):
line = 'if not x is y:\n pass\n'
fixed = 'if x is not y:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E714']) as result:
self.assertEqual(fixed, result)
def test_e714_with_is(self):
line = 'if not x is y or x is z:\n pass\n'
fixed = 'if x is not y or x is z:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E714']) as result:
self.assertEqual(fixed, result)
def test_e714_chain(self):
line = 'if not x is y or not x is z:\n pass\n'
fixed = 'if x is not y or x is not z:\n pass\n'
with autopep8_context(line,
options=['-aa', '--select=E714']) as result:
self.assertEqual(fixed, result)
def test_e713_and_e714(self):
line = """
if not x is y:
pass
if not role in ("domaincontroller_master",
"domaincontroller_backup",
"domaincontroller_slave",
"memberserver",
):
pass
"""
fixed = """
if x is not y:
pass
if role not in ("domaincontroller_master",
"domaincontroller_backup",
"domaincontroller_slave",
"memberserver",
):
pass
"""
with autopep8_context(line,
options=['-aa', '--select=E713,E714']) as result:
self.assertEqual(fixed, result)
def test_e713_with_single_quote(self):
line = "if not 'DC IP' in info:\n"
fixed = "if 'DC IP' not in info:\n"
with autopep8_context(line,
options=['-aa', '--select=E713,E714']) as result:
self.assertEqual(fixed, result)
def test_e714_with_single_quote(self):
line = "if not 'DC IP' is info:\n"
fixed = "if 'DC IP' is not info:\n"
with autopep8_context(line,
options=['-aa', '--select=E713,E714']) as result:
self.assertEqual(fixed, result)
def test_e721(self):
line = "type('') == type('')\n"
fixed = "isinstance('', type(''))\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e721_with_str(self):
line = "str == type('')\n"
fixed = "isinstance('', str)\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e721_in_conditional(self):
line = "if str == type(''):\n pass\n"
fixed = "if isinstance('', str):\n pass\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e722(self):
line = "try:\n print(a)\nexcept:\n pass\n"
fixed = "try:\n print(a)\nexcept BaseException:\n pass\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e722_with_if_else_stmt(self):
line = "try:\n print(a)\nexcept:\n if a==b:\n print(a)\n else:\n print(b)\n"
fixed = "try:\n print(a)\nexcept BaseException:\n if a == b:\n print(a)\n else:\n print(b)\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e722_non_aggressive(self):
line = "try:\n print(a)\nexcept:\n pass\n"
with autopep8_context(line, options=[]) as result:
self.assertEqual(line, result)
def test_e731(self):
line = 'a = lambda x: x * 2\n'
fixed = 'def a(x): return x * 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e731_no_arg(self):
line = 'a = lambda: x * 2\n'
fixed = 'def a(): return x * 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e731_with_tuple_arg(self):
line = 'a = lambda (x, y), z: x * 2\n'
fixed = 'def a((x, y), z): return x * 2\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e731_with_args(self):
line = 'a = lambda x, y: x * 2 + y\n'
fixed = 'def a(x, y): return x * 2 + y\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_e731_with_select_option(self):
line = 'a = lambda x: x * 2\n'
fixed = 'def a(x): return x * 2\n'
with autopep8_context(line, options=['--select=E731']) as result:
self.assertEqual(fixed, result)
def test_e731_with_default_arguments(self):
line = 'a = lambda k, d=None: bar.get("%s/%s" % (prefix, k), d)\n'
fixed = 'def a(k, d=None): return bar.get("%s/%s" % (prefix, k), d)\n'
with autopep8_context(line, options=['--select=E731']) as result:
self.assertEqual(fixed, result)
def test_e901_should_cause_indentation_screw_up(self):
line = """\
def tmp(g):
g(4)))
if not True:
pass
pass
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_should_preserve_vertical_tab(self):
line = """\
#Memory Bu\vffer Register:
"""
fixed = """\
# Memory Bu\vffer Register:
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_w191_should_ignore_multiline_strings(self):
line = """\
print(3 <> 4, '''
while True:
if True:
\t1
\t''', 4 <> 5)
if True:
\t123
"""
fixed = """\
print(3 != 4, '''
while True:
if True:
\t1
\t''', 4 != 5)
if True:
123
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w191_should_ignore_tabs_in_strings(self):
line = """\
if True:
\tx = '''
\t\tblah
\tif True:
\t1
\t'''
if True:
\t123
else:
\t32
"""
fixed = """\
if True:
x = '''
\t\tblah
\tif True:
\t1
\t'''
if True:
123
else:
32
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w291(self):
line = "print 'a b '\t \n"
fixed = "print 'a b '\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w291_with_comment(self):
line = "print 'a b ' # comment\t \n"
fixed = "print 'a b ' # comment\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w292(self):
line = '1\n2'
fixed = '1\n2\n'
with autopep8_context(line, options=['--aggressive',
'--select=W292']) as result:
self.assertEqual(fixed, result)
def test_w292_ignore(self):
line = "1\n2"
with autopep8_context(line, options=['--aggressive',
'--ignore=W292']) as result:
self.assertEqual(line, result)
def test_w293(self):
line = '1\n \n2\n'
fixed = '1\n\n2\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w391(self):
line = ' \n'
fixed = ''
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w391_more_complex(self):
line = '123\n456\n \n'
fixed = '123\n456\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w503(self):
line = '(width == 0\n + height == 0)\n'
fixed = '(width == 0 +\n height == 0)\n'
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_ignore_w504(self):
line = '(width == 0\n + height == 0)\n'
fixed = '(width == 0 +\n height == 0)\n'
with autopep8_context(line, options=['--ignore=E,W504']) as result:
self.assertEqual(fixed, result)
def test_w504_with_ignore_w503(self):
line = '(width == 0 +\n height == 0)\n'
fixed = '(width == 0\n + height == 0)\n'
with autopep8_context(line, options=['--ignore=E,W503']) as result:
self.assertEqual(fixed, result)
def test_w503_w504_none_ignored(self):
line = '(width == 0 +\n height == 0\n+ depth == 0)\n'
fixed = '(width == 0 +\n height == 0\n+ depth == 0)\n'
with autopep8_context(line, options=['--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w503_w504_both_ignored(self):
line = '(width == 0 +\n height == 0\n+ depth == 0)\n'
fixed = '(width == 0 +\n height == 0\n+ depth == 0)\n'
with autopep8_context(
line, options=['--ignore=E,W503, W504'],
) as result:
self.assertEqual(fixed, result)
def test_w503_skip_default(self):
line = '(width == 0\n + height == 0)\n'
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_w503_and_or(self):
line = '(width == 0\n and height == 0\n or name == "")\n'
fixed = '(width == 0 and\n height == 0 or\n name == "")\n'
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_comment(self):
line = '(width == 0 # this is comment\n + height == 0)\n'
fixed = '(width == 0 + # this is comment\n height == 0)\n'
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_comment_into_point_out_line(self):
line = """\
def test():
return (
True not in []
and False # comment required
)
"""
fixed = """\
def test():
return (
True not in [] and
False # comment required
)
"""
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_comment_double(self):
line = """\
(
1111 # C1
and 22222222 # C2
and 333333333333 # C3
)
"""
fixed = """\
(
1111 and # C1
22222222 and # C2
333333333333 # C3
)
"""
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_comment_with_only_comment_block_charactor(self):
line = """\
if (True #
and True
and True):
print(1)
"""
fixed = """\
if (True and #
True and
True):
print(1)
"""
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_over_5lines(self):
line = """\
X = (
1 # 1
+ 2 # 2
+ 3 # 3
+ 4 # 4
+ 5 # 5
+ 6 # 6
+ 7 # 7
)
"""
fixed = """\
X = (
1 + # 1
2 + # 2
3 + # 3
4 + # 4
5 + # 5
6 + # 6
7 # 7
)
"""
with autopep8_context(line, options=['--select=W503']) as result:
self.assertEqual(fixed, result)
def test_w503_with_line_comment(self):
line = '(width == 0\n # this is comment\n + height == 0)\n'
fixed = '(width == 0 +\n # this is comment\n height == 0)\n'
with autopep8_context(line, options=['--select=W503', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w503_with_empty_line(self):
line = """\
# this is comment
a = 2
b = (1 +
2 +
3) / 2.0
"""
fixed = """\
# this is comment
a = 2
b = (1 +
2 +
3) / 2.0
"""
with autopep8_context(line, options=['--ignore=E721']) as result:
self.assertEqual(fixed, result)
def test_w503_with_line_comments(self):
line = '(width == 0\n # this is comment\n # comment2\n + height == 0)\n'
fixed = '(width == 0 +\n # this is comment\n # comment2\n height == 0)\n'
with autopep8_context(line, options=['--select=W503', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_ignore_only_w503_with_select_w(self):
line = """\
a = (
11 + 22 +
33 +
44
+ 55
)
"""
fixed = """\
a = (
11 + 22
+ 33
+ 44
+ 55
)
"""
with autopep8_context(line, options=['--select=W', '--ignore=W503']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--select=W5', '--ignore=W503']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--select=W50', '--ignore=W503']) as result:
self.assertEqual(fixed, result)
def test_ignore_only_w504_with_select_w(self):
line = """\
a = (
11 + 22 +
33 +
44
+ 55
)
"""
fixed = """\
a = (
11 + 22 +
33 +
44 +
55
)
"""
with autopep8_context(line, options=['--select=W', '--ignore=W504']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--select=W5', '--ignore=W504']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--select=W50', '--ignore=W504']) as result:
self.assertEqual(fixed, result)
def test_ignore_w503_and_w504_with_select_w(self):
line = """\
a = (
11 + 22 +
33 +
44
+ 55
)
"""
with autopep8_context(line, options=['--select=W', '--ignore=W503,W504']) as result:
self.assertEqual(line, result)
with autopep8_context(line, options=['--select=W5', '--ignore=W503,W504']) as result:
self.assertEqual(line, result)
with autopep8_context(line, options=['--select=W50', '--ignore=W503,W504']) as result:
self.assertEqual(line, result)
def test_w504(self):
line = '(width == 0 +\n height == 0)\n'
fixed = '(width == 0\n + height == 0)\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_comment_on_first_line(self):
line = 'x = (1 | # test\n2)\n'
fixed = 'x = (1 # test\n| 2)\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_comment_on_second_line(self):
line = 'x = (1 |\n2) # test\n'
fixed = 'x = (1\n| 2) # test\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_comment_on_each_lines(self):
line = 'x = (1 |# test\n2 |# test\n3) # test\n'
fixed = 'x = (1# test\n| 2# test\n| 3) # test\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_with_e265_ignore_option(self):
line = '(width == 0 +\n height == 0)\n'
with autopep8_context(line, options=['--ignore=E265']) as result:
self.assertEqual(line, result)
def test_w504_with_e265_ignore_option_regression(self):
line = """\
if True:
if True:
if (
link.is_wheel and
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
"""
with autopep8_context(line, options=['--ignore=E265']) as result:
self.assertEqual(line, result)
def test_w504_with_line_comment(self):
line = '(width == 0 +\n # this is comment\n height == 0)\n'
fixed = '(width == 0\n # this is comment\n + height == 0)\n'
with autopep8_context(line, options=['--select=W504', '--ignore=E']) as result:
self.assertEqual(fixed, result)
def test_w504_not_applied_by_default_when_modifying_with_ignore(self):
line = """\
q = 1
def x(y, z):
if (
y and
z
):
pass
"""
fixed = line.replace('\n\n\n\n', '\n\n')
with autopep8_context(line, options=['--ignore=E265']) as result:
self.assertEqual(fixed, result)
def test_w503_and_w504_conflict(self):
line = """\
if True:
if True:
assert_equal(self.nodes[0].getbalance(
), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
"""
fixed = """\
if True:
if True:
assert_equal(
self.nodes[0].getbalance(),
bal +
Decimal('50.00000000') +
Decimal('2.19000000')) # block reward + tx
"""
with autopep8_context(line, options=['-aa', '--select=E,W']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['-aa', '--select=E,W5']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['-aa', '--select=E,W50']) as result:
self.assertEqual(fixed, result)
def test_w601(self):
line = 'a = {0: 1}\na.has_key(0)\n'
fixed = 'a = {0: 1}\n0 in a\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_word(self):
line = 'my_dict = {0: 1}\nmy_dict.has_key(0)\n'
fixed = 'my_dict = {0: 1}\n0 in my_dict\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_conditional(self):
line = 'a = {0: 1}\nif a.has_key(0):\n print 1\n'
fixed = 'a = {0: 1}\nif 0 in a:\n print 1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_self(self):
line = 'self.a.has_key(0)\n'
fixed = '0 in self.a\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_self_with_conditional(self):
line = 'if self.a.has_key(0):\n print 1\n'
fixed = 'if 0 in self.a:\n print 1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_multiple(self):
line = 'a.has_key(0) and b.has_key(0)\n'
fixed = '0 in a and 0 in b\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_multiple_nested(self):
line = 'alpha.has_key(nested.has_key(12)) and beta.has_key(1)\n'
fixed = '(12 in nested) in alpha and 1 in beta\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_more_complexity(self):
line = 'y.has_key(0) + x.has_key(x.has_key(0) + x.has_key(x.has_key(0) + x.has_key(1)))\n'
fixed = '(0 in y) + ((0 in x) + ((0 in x) + (1 in x) in x) in x)\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_precedence(self):
line = 'if self.a.has_key(1 + 2):\n print 1\n'
fixed = 'if 1 + 2 in self.a:\n print 1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_parens(self):
line = 'foo(12) in alpha\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w601_with_multiline(self):
line = """\
a.has_key(
0
)
"""
fixed = '0 in a\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w601_with_non_ascii(self):
line = """\
# -*- coding: utf-8 -*-
## éはe
correct = dict().has_key('good syntax ?')
"""
fixed = """\
# -*- coding: utf-8 -*-
# éはe
correct = 'good syntax ?' in dict()
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_arg_is_string(self):
line = "raise ValueError, \"w602 test\"\n"
fixed = "raise ValueError(\"w602 test\")\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_arg_is_string_with_comment(self):
line = "raise ValueError, \"w602 test\" # comment\n"
fixed = "raise ValueError(\"w602 test\") # comment\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_skip_ambiguous_case(self):
line = "raise 'a', 'b', 'c'\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_with_logic(self):
line = "raise TypeError, e or 'hello'\n"
fixed = "raise TypeError(e or 'hello')\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_triple_quotes(self):
line = 'raise ValueError, """hello"""\n1\n'
fixed = 'raise ValueError("""hello""")\n1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline(self):
line = 'raise ValueError, """\nhello"""\n'
fixed = 'raise ValueError("""\nhello""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_with_complex_multiline(self):
line = 'raise ValueError, """\nhello %s %s""" % (\n 1, 2)\n'
fixed = 'raise ValueError("""\nhello %s %s""" % (\n 1, 2))\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_trailing_spaces(self):
line = 'raise ValueError, """\nhello""" \n'
fixed = 'raise ValueError("""\nhello""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_escaped_newline(self):
line = 'raise ValueError, \\\n"""\nhello"""\n'
fixed = 'raise ValueError("""\nhello""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_escaped_newline_and_comment(self):
line = 'raise ValueError, \\\n"""\nhello""" # comment\n'
fixed = 'raise ValueError("""\nhello""") # comment\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_multiple_escaped_newlines(self):
line = 'raise ValueError, \\\n\\\n\\\n"""\nhello"""\n'
fixed = 'raise ValueError("""\nhello""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_with_nested_quotes(self):
line = 'raise ValueError, """hello\'\'\'blah"a"b"c"""\n'
fixed = 'raise ValueError("""hello\'\'\'blah"a"b"c""")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_with_multiline_with_single_quotes(self):
line = "raise ValueError, '''\nhello'''\n"
fixed = "raise ValueError('''\nhello''')\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiline_string_stays_the_same(self):
line = 'raise """\nhello"""\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_escaped_lf(self):
line = 'raise ValueError, \\\n"hello"\n'
fixed = 'raise ValueError("hello")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_escaped_crlf(self):
line = 'raise ValueError, \\\r\n"hello"\r\n'
fixed = 'raise ValueError("hello")\r\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_indentation(self):
line = 'def foo():\n raise ValueError, "hello"\n'
fixed = 'def foo():\n raise ValueError("hello")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_escaped_cr(self):
line = 'raise ValueError, \\\r"hello"\n\n'
fixed = 'raise ValueError("hello")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_multiple_statements(self):
line = 'raise ValueError, "hello";print 1\n'
fixed = 'raise ValueError("hello")\nprint 1\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_raise_argument_with_indentation(self):
line = 'if True:\n raise ValueError, "error"\n'
fixed = 'if True:\n raise ValueError("error")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_skip_raise_argument_triple(self):
line = 'raise ValueError, "info", traceback\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_skip_raise_argument_triple_with_comment(self):
line = 'raise ValueError, "info", traceback # comment\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_raise_argument_triple_fake(self):
line = 'raise ValueError, "info, info2"\n'
fixed = 'raise ValueError("info, info2")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_with_list_comprehension(self):
line = 'raise Error, [x[0] for x in probs]\n'
fixed = 'raise Error([x[0] for x in probs])\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w602_with_bad_syntax(self):
line = "raise Error, 'abc\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
def test_w602_invalid_2to3_fixed_case(self):
line = """\
raise (ValueError
if True else TypeError)
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
@unittest.skip('TODO')
def test_w602_invalid_2to3_fixed_case_with_valid_syntax(self):
line = """\
raise (ValueError
if True else TypeError)
raise ValueError, "error"
"""
fixed = """\
raise (ValueError
if True else TypeError)
raise ValueError("error")
"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w603(self):
line = 'if 2 <> 2:\n print False'
fixed = 'if 2 != 2:\n print False\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w604(self):
line = '`1`\n'
fixed = 'repr(1)\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w604_with_multiple_instances(self):
line = '``1`` + ``b``\n'
fixed = 'repr(repr(1)) + repr(repr(b))\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w604_with_multiple_lines(self):
line = '`(1\n )`\n'
fixed = 'repr((1\n ))\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w605_simple(self):
line = "escape = '\\.jpg'\n"
fixed = "escape = '\\\\.jpg'\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w605_identical_token(self):
# ***NOTE***: The --pep8-passes option is required to prevent an infinite loop in
# the old, failing code. DO NOT REMOVE.
line = "escape = foo('\\.bar', '\\.kilroy')\n"
fixed = "escape = foo('\\\\.bar', '\\\\.kilroy')\n"
with autopep8_context(line, options=['--aggressive', '--pep8-passes', '5']) as result:
self.assertEqual(fixed, result, "Two tokens get r added")
line = "escape = foo('\\.bar', '\\\\.kilroy')\n"
fixed = "escape = foo('\\\\.bar', '\\\\.kilroy')\n"
with autopep8_context(line, options=['--aggressive', '--pep8-passes', '5']) as result:
self.assertEqual(fixed, result, "r not added if already there")
# Test Case to catch bad behavior reported in Issue #449
line = "escape = foo('\\.bar', '\\.bar')\n"
fixed = "escape = foo('\\\\.bar', '\\\\.bar')\n"
with autopep8_context(line, options=['--aggressive', '--pep8-passes', '5']) as result:
self.assertEqual(fixed, result)
def test_w605_with_invalid_syntax(self):
line = "escape = rr'\\.jpg'\n"
fixed = "escape = rr'\\\\.jpg'\n"
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_w605_with_multilines(self):
line = """\
regex = '\\d+(\\.\\d+){3}$'
foo = validators.RegexValidator(
regex='\\d+(\\.\\d+){3}$')\n""" # noqa
fixed = """\
regex = '\\\\d+(\\\\.\\\\d+){3}$'
foo = validators.RegexValidator(
regex='\\\\d+(\\\\.\\\\d+){3}$')\n"""
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_trailing_whitespace_in_multiline_string(self):
line = 'x = """ \nhello""" \n'
fixed = 'x = """ \nhello"""\n'
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_trailing_whitespace_in_multiline_string_aggressive(self):
line = 'x = """ \nhello""" \n'
fixed = 'x = """\nhello"""\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(fixed, result)
def test_execfile_in_lambda_should_not_be_modified(self):
"""Modifying this to the exec() form is invalid in Python 2."""
line = 'lambda: execfile("foo.py")\n'
with autopep8_context(line, options=['--aggressive']) as result:
self.assertEqual(line, result)
# FIXME: These tests should use multiline strings for readability.
def test_range(self):
line = 'print( 1 )\nprint( 2 )\n print( 3 )\n'
fixed = 'print( 1 )\nprint(2)\n print( 3 )\n'
with autopep8_context(line, options=['--line-range', '2', '2']) as result:
self.assertEqual(fixed, result)
def test_range_line_number_changes_from_one_line(self):
line = 'a=12\na=1; b=2;c=3\nd=4;\n\ndef f(a = 1):\n pass\n'
fixed = 'a=12\na = 1\nb = 2\nc = 3\nd=4;\n\ndef f(a = 1):\n pass\n'
with autopep8_context(line, options=['--line-range', '2', '2']) as result:
self.assertEqual(fixed, result)
def test_range_indent_changes_small_range(self):
line = '\nif True:\n (1, \n 2,\n3)\nelif False:\n a = 1\nelse:\n a = 2\n\nc = 1\nif True:\n c = 2\n a = (1,\n2)\n'
fixed2_5 = '\nif True:\n (1,\n 2,\n 3)\nelif False:\n a = 1\nelse:\n a = 2\n\nc = 1\nif True:\n c = 2\n a = (1,\n2)\n'
with autopep8_context(line, options=['--line-range', '2', '5']) as result:
self.assertEqual(fixed2_5, result)
def test_range_indent_deep_if_blocks_first_block(self):
line = '\nif a:\n if a = 1:\n b = 1\n else:\n b = 2\nelif a == 0:\n b = 3\nelse:\n b = 4\n'
with autopep8_context(line, options=['--line-range', '2', '5']) as result:
self.assertEqual(line, result)
def test_range_indent_deep_if_blocks_second_block(self):
line = '\nif a:\n if a = 1:\n b = 1\n else:\n b = 2\nelif a == 0:\n b = 3\nelse:\n b = 4\n'
with autopep8_context(line, options=['--line-range', '6', '9']) as result:
self.assertEqual(line, result)
def test_range_indent_continued_statements_partial(self):
line = '\nif a == 1:\n\ttry:\n\t foo\n\texcept AttributeError:\n\t pass\n\telse:\n\t "nooo"\n\tb = 1\n'
with autopep8_context(line, options=['--line-range', '2', '6']) as result:
self.assertEqual(line, result)
def test_range_indent_continued_statements_last_block(self):
line = '\nif a == 1:\n\ttry:\n\t foo\n\texcept AttributeError:\n\t pass\n\telse:\n\t "nooo"\n\tb = 1\n'
with autopep8_context(line, options=['--line-range', '6', '9']) as result:
self.assertEqual(line, result)
def test_range_with_broken_syntax(self):
line = """\
if True:
if True:
pass
else:
pass
"""
with autopep8_context(line, options=['--line-range', '1', '1']) as result:
self.assertEqual(line, result)
def test_long_import_line(self):
line = """\
s
from t import a, \
bbbbbbbbbbbbbbbbbbbbbbbbbbbbb, ccccccccccccccccccccccccccccccc, ddddddddddddddddddddddddddddddddddd
"""
fixed = """\
from t import a, \
bbbbbbbbbbbbbbbbbbbbbbbbbbbbb, ccccccccccccccccccccccccccccccc, ddddddddddddddddddddddddddddddddddd
s
"""
with autopep8_context(line) as result:
self.assertEqual(fixed, result)
def test_exchange_multiple_imports_with_def(self):
line = """\
def f(n):
return n
from a import fa
from b import fb
from c import fc
"""
with autopep8_context(line) as result:
self.assertEqual(result[:4], 'from')
@unittest.skipIf(
(sys.version_info.major >= 3 and sys.version_info.minor < 8)
or sys.version_info.major < 3,
"syntax error in Python3.7 and lower version",
)
def test_with_walrus_operator(self):
"""check pycodestyle 2.6.0+"""
line = """\
sql_stmt = ""
with open(filename) as f:
while line := f.readline():
sql_stmt += line
"""
with autopep8_context(line) as result:
self.assertEqual(line, result)
def test_autopep8_disable(self):
test_code = """\
# autopep8: off
def f():
aaaaaaaaaaa.bbbbbbb([
('xxxxxxxxxx', 'yyyyyy',
'Heaven hath no wrath like love to hatred turned. Nor hell a fury like a woman scorned.'),
('xxxxxxx', 'yyyyyyyyyyy', "To the last I grapple with thee. From hell's heart I stab at thee. For hate's sake I spit my last breath at thee!")])
# autopep8: on
"""
expected_output = """\
# autopep8: off
def f():
aaaaaaaaaaa.bbbbbbb([
('xxxxxxxxxx', 'yyyyyy',
'Heaven hath no wrath like love to hatred turned. Nor hell a fury like a woman scorned.'),
('xxxxxxx', 'yyyyyyyyyyy', "To the last I grapple with thee. From hell's heart I stab at thee. For hate's sake I spit my last breath at thee!")])
# autopep8: on
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_autopep8_disable_multi(self):
test_code = """\
fix=1
# autopep8: off
skip=1
# autopep8: on
fix=2
# autopep8: off
skip=2
# autopep8: on
fix=3
"""
expected_output = """\
fix = 1
# autopep8: off
skip=1
# autopep8: on
fix = 2
# autopep8: off
skip=2
# autopep8: on
fix = 3
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_fmt_disable(self):
test_code = """\
# fmt: off
def f():
aaaaaaaaaaa.bbbbbbb([
('xxxxxxxxxx', 'yyyyyy',
'Heaven hath no wrath like love to hatred turned. Nor hell a fury like a woman scorned.'),
('xxxxxxx', 'yyyyyyyyyyy', "To the last I grapple with thee. From hell's heart I stab at thee. For hate's sake I spit my last breath at thee!")])
# fmt: on
"""
expected_output = """\
# fmt: off
def f():
aaaaaaaaaaa.bbbbbbb([
('xxxxxxxxxx', 'yyyyyy',
'Heaven hath no wrath like love to hatred turned. Nor hell a fury like a woman scorned.'),
('xxxxxxx', 'yyyyyyyyyyy', "To the last I grapple with thee. From hell's heart I stab at thee. For hate's sake I spit my last breath at thee!")])
# fmt: on
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_fmt_disable_without_reenable(self):
test_code = """\
# fmt: off
print(123)
"""
expected_output = """\
# fmt: off
print(123)
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_fmt_disable_with_double_reenable(self):
test_code = """\
# fmt: off
print( 123 )
# fmt: on
print( 123 )
# fmt: on
print( 123 )
"""
expected_output = """\
# fmt: off
print( 123 )
# fmt: on
print(123)
# fmt: on
print(123)
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_fmt_double_disable_and_reenable(self):
test_code = """\
# fmt: off
print( 123 )
# fmt: off
print( 123 )
# fmt: on
print( 123 )
"""
expected_output = """\
# fmt: off
print( 123 )
# fmt: off
print( 123 )
# fmt: on
print(123)
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_fmt_multi_disable_and_reenable(self):
test_code = """\
fix=1
# fmt: off
skip=1
# fmt: on
fix=2
# fmt: off
skip=2
# fmt: on
fix=3
"""
expected_output = """\
fix = 1
# fmt: off
skip=1
# fmt: on
fix = 2
# fmt: off
skip=2
# fmt: on
fix = 3
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_fmt_multi_disable_complex(self):
test_code = """\
fix=1
# fmt: off
skip=1
# fmt: off
fix=2
# fmt: off
skip=2
# fmt: on
fix=3
"""
expected_output = """\
fix = 1
# fmt: off
skip=1
# fmt: off
fix=2
# fmt: off
skip=2
# fmt: on
fix = 3
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_fmt_multi_disable_complex_multi(self):
test_code = """\
fix=1
# fmt: off
skip=1
# fmt: off
fix=2
# fmt: on
fix=22
# fmt: on
fix=222
# fmt: off
skip=2
# fmt: on
fix=3
"""
expected_output = """\
fix = 1
# fmt: off
skip=1
# fmt: off
fix=2
# fmt: on
fix = 22
# fmt: on
fix = 222
# fmt: off
skip=2
# fmt: on
fix = 3
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
def test_general_disable(self):
test_code = """\
# fmt: off
import math, sys;
def example1():
# This is a long comment. This should be wrapped to fit within 72 characters.
some_tuple=( 1,2, 3,'a' );
some_variable={'long':'Long code lines should be wrapped within 79 characters.',
'other':[math.pi, 100,200,300,9876543210,'This is a long string that goes on'],
'more':{'inner':'This whole logical line should be wrapped.',some_tuple:[1,
20,300,40000,500000000,60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated':True}.has_key(
{'f':2}.has_key(''));
class Example3( object ):
def __init__ ( self, bar ):
# Comments should have a space after the hash.
if bar : bar+=1; bar=bar* bar ; return bar
else:
some_string = '''
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
'''
return (sys.path, some_string)
# fmt: on
import math, sys;
def example1():
# This is a long comment. This should be wrapped to fit within 72 characters.
some_tuple=( 1,2, 3,'a' );
some_variable={'long':'Long code lines should be wrapped within 79 characters.',
'other':[math.pi, 100,200,300,9876543210,'This is a long string that goes on'],
'more':{'inner':'This whole logical line should be wrapped.',some_tuple:[1,
20,300,40000,500000000,60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated':True}.has_key(
{'f':2}.has_key(''));
class Example3( object ):
def __init__ ( self, bar ):
# Comments should have a space after the hash.
if bar : bar+=1; bar=bar* bar ; return bar
else:
some_string = '''
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
'''
return (sys.path, some_string)
"""
expected_output = """\
# fmt: off
import sys
import math
import math, sys;
def example1():
# This is a long comment. This should be wrapped to fit within 72 characters.
some_tuple=( 1,2, 3,'a' );
some_variable={'long':'Long code lines should be wrapped within 79 characters.',
'other':[math.pi, 100,200,300,9876543210,'This is a long string that goes on'],
'more':{'inner':'This whole logical line should be wrapped.',some_tuple:[1,
20,300,40000,500000000,60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated':True}.has_key(
{'f':2}.has_key(''));
class Example3( object ):
def __init__ ( self, bar ):
# Comments should have a space after the hash.
if bar : bar+=1; bar=bar* bar ; return bar
else:
some_string = '''
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
'''
return (sys.path, some_string)
# fmt: on
def example1():
# This is a long comment. This should be wrapped to fit within 72 characters.
some_tuple = (1, 2, 3, 'a')
some_variable = {'long': 'Long code lines should be wrapped within 79 characters.',
'other': [math.pi, 100, 200, 300, 9876543210, 'This is a long string that goes on'],
'more': {'inner': 'This whole logical line should be wrapped.', some_tuple: [1,
20, 300, 40000, 500000000, 60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated': True}.has_key(
{'f': 2}.has_key(''))
class Example3(object):
def __init__(self, bar):
# Comments should have a space after the hash.
if bar:
bar += 1
bar = bar * bar
return bar
else:
some_string = '''
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
'''
return (sys.path, some_string)
"""
with autopep8_context(test_code) as result:
self.assertEqual(expected_output, result)
class UtilityFunctionTests(unittest.TestCase):
def test_get_module_imports(self):
line = """\
import os
import sys
if True:
print(1)
"""
target_line_index = 8
result = get_module_imports_on_top_of_file(line.splitlines(),
target_line_index)
self.assertEqual(result, 0)
def test_get_module_imports_case_of_autopep8(self):
line = """\
#!/usr/bin/python
# comment
# comment
'''this module ...
this module ...
'''
import os
import sys
if True:
print(1)
"""
target_line_index = 11
result = get_module_imports_on_top_of_file(line.splitlines(),
target_line_index)
self.assertEqual(result, 10)
class CommandLineTests(unittest.TestCase):
maxDiff = None
def test_e122_and_e302_with_backslash(self):
line = """\
import sys
\\
def f():
pass
"""
fixed = """\
import sys
\\
def f():
pass
"""
with autopep8_subprocess(line, [], timeout=3) as (result, retcode):
self.assertEqual(fixed, result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_diff(self):
line = "'abc' \n"
fixed = "-'abc' \n+'abc'\n"
with autopep8_subprocess(line, ['--diff']) as (result, retcode):
self.assertEqual(fixed, '\n'.join(result.split('\n')[3:]))
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_diff_with_exit_code_option(self):
line = "'abc' \n"
fixed = "-'abc' \n+'abc'\n"
with autopep8_subprocess(line, ['--diff', '--exit-code']) as (result, retcode):
self.assertEqual(fixed, '\n'.join(result.split('\n')[3:]))
self.assertEqual(retcode, autopep8.EXIT_CODE_EXISTS_DIFF)
def test_non_diff_with_exit_code_option(self):
line = "'abc'\n"
with autopep8_subprocess(line, ['--diff', '--exit-code']) as (result, retcode):
self.assertEqual('', '\n'.join(result.split('\n')[3:]))
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_non_diff_with_exit_code_and_jobs_options(self):
line = "'abc'\n"
with autopep8_subprocess(line, ['-j0', '--diff', '--exit-code']) as (result, retcode):
self.assertEqual('', '\n'.join(result.split('\n')[3:]))
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_diff_with_empty_file(self):
with autopep8_subprocess('', ['--diff']) as (result, retcode):
self.assertEqual('\n'.join(result.split('\n')[3:]), '')
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_diff_with_nonexistent_file(self):
p = Popen(list(AUTOPEP8_CMD_TUPLE) + ['--diff', 'non_existent_file'],
stdout=PIPE, stderr=PIPE)
error = p.communicate()[1].decode('utf-8')
self.assertIn('non_existent_file', error)
def test_diff_with_standard_in(self):
p = Popen(list(AUTOPEP8_CMD_TUPLE) + ['--diff', '-'],
stdout=PIPE, stderr=PIPE)
error = p.communicate()[1].decode('utf-8')
self.assertIn('cannot', error)
def test_indent_size_is_zero(self):
line = "'abc'\n"
with autopep8_subprocess(line, ['--indent-size=0']) as (result, retcode):
self.assertEqual(retcode, autopep8.EXIT_CODE_ARGPARSE_ERROR)
def test_exit_code_with_io_error(self):
line = "import sys\ndef a():\n print(1)\n"
with readonly_temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + ['--in-place', filename],
stdout=PIPE, stderr=PIPE)
p.communicate()
self.assertEqual(p.returncode, autopep8.EXIT_CODE_ERROR)
def test_pep8_passes(self):
line = "'abc' \n"
fixed = "'abc'\n"
with autopep8_subprocess(line, ['--pep8-passes', '0']) as (result, retcode):
self.assertEqual(fixed, result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_pep8_ignore(self):
line = "'abc' \n"
with autopep8_subprocess(line, ['--ignore=E,W']) as (result, retcode):
self.assertEqual(line, result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_pep8_ignore_should_handle_trailing_comma_gracefully(self):
line = "'abc' \n"
fixed = "'abc'\n"
with autopep8_subprocess(line, ['--ignore=,']) as (result, retcode):
self.assertEqual(fixed, result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_help(self):
p = Popen(list(AUTOPEP8_CMD_TUPLE) + ['-h'],
stdout=PIPE)
self.assertIn('usage:', p.communicate()[0].decode('utf-8').lower())
def test_verbose(self):
line = 'bad_syntax)'
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename, '-vvv'],
stdout=PIPE, stderr=PIPE)
verbose_error = p.communicate()[1].decode('utf-8')
self.assertIn("'fix_e901' is not defined", verbose_error)
def test_verbose_diff(self):
line = '+'.join(100 * ['323424234234'])
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename, '-vvvv', '--diff'],
stdout=PIPE, stderr=PIPE)
verbose_error = p.communicate()[1].decode('utf-8')
self.assertIn('------------', verbose_error)
def test_verbose_with_select_e702(self):
line = """\
for i in range(3):
if i == 1: print i; continue
print i
"""
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename, '-vvv', '--select=E702'],
stdout=PIPE, stderr=PIPE)
verbose_error = p.communicate()[1].decode('utf-8')
self.assertIn(" with other compound statements", verbose_error)
def test_in_place(self):
line = "'abc' \n"
fixed = "'abc'\n"
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename, '--in-place'])
p.wait()
with open(filename) as f:
self.assertEqual(fixed, f.read())
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
def test_in_place_no_modifications_no_writes(self):
with temporary_file_context('import os\n') as filename:
# ensure that noops do not do writes by making writing an error
os.chmod(filename, 0o444)
p = Popen(
list(AUTOPEP8_CMD_TUPLE) + [filename, '--in-place'],
stderr=PIPE,
)
_, err = p.communicate()
self.assertEqual(err, b'')
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
def test_in_place_no_modifications_no_writes_with_empty_file(self):
with temporary_file_context('') as filename:
# ensure that noops do not do writes by making writing an error
os.chmod(filename, 0o444)
p = Popen(
list(AUTOPEP8_CMD_TUPLE) + [filename, '--in-place'],
stderr=PIPE,
)
_, err = p.communicate()
self.assertEqual(err, b'')
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
def test_in_place_with_w292(self):
line = "import os"
fixed = "import os\n"
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename, '--in-place'])
p.wait()
with open(filename) as f:
self.assertEqual(fixed, f.read())
def test_in_place_with_exit_code_option(self):
line = "'abc' \n"
fixed = "'abc'\n"
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename,
'--in-place',
'--exit-code'])
p.wait()
with open(filename) as f:
self.assertEqual(fixed, f.read())
self.assertEqual(p.returncode, autopep8.EXIT_CODE_EXISTS_DIFF)
def test_in_place_with_exit_code_option_with_w391(self):
line = "\n\n\n"
fixed = ""
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename,
'--in-place',
'--exit-code'])
p.wait()
with open(filename) as f:
self.assertEqual(fixed, f.read())
self.assertEqual(p.returncode, autopep8.EXIT_CODE_EXISTS_DIFF)
def test_parallel_jobs(self):
line = "'abc' \n"
fixed = "'abc'\n"
with temporary_file_context(line) as filename_a:
with temporary_file_context(line) as filename_b:
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename_a, filename_b, '--jobs=3', '--in-place'])
p.wait()
with open(filename_a) as f:
self.assertEqual(fixed, f.read())
with open(filename_b) as f:
self.assertEqual(fixed, f.read())
def test_parallel_jobs_with_diff_option(self):
line = "'abc' \n"
with temporary_file_context(line) as filename_a:
with temporary_file_context(line) as filename_b:
files = list(set([filename_a, filename_b]))
p = Popen(list(AUTOPEP8_CMD_TUPLE) + files +
['--jobs=3', '--diff'], stdout=PIPE)
p.wait()
output = p.stdout.read().decode()
p.stdout.close()
actual_diffs = []
for filename in files:
actual_diffs.append("""\
--- original/{filename}
+++ fixed/{filename}
@@ -1 +1 @@
-'abc'
+'abc'
""".format(filename=filename))
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
for actual_diff in actual_diffs:
self.assertIn(actual_diff, output)
def test_parallel_jobs_with_inplace_option_and_io_error(self):
temp_directory = mkdtemp(dir='.')
try:
file_a = os.path.join(temp_directory, 'a.py')
with open(file_a, 'w') as output:
output.write("'abc' \n")
os.chmod(file_a, stat.S_IRUSR) # readonly
os.mkdir(os.path.join(temp_directory, 'd'))
file_b = os.path.join(temp_directory, 'd', 'b.py')
with open(file_b, 'w') as output:
output.write('123 \n')
os.chmod(file_b, stat.S_IRUSR)
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory, '--recursive', '--in-place'],
stdout=PIPE, stderr=PIPE)
p.communicate()[0].decode('utf-8')
self.assertEqual(p.returncode, autopep8.EXIT_CODE_ERROR)
finally:
shutil.rmtree(temp_directory)
def test_parallel_jobs_with_automatic_cpu_count(self):
line = "'abc' \n"
fixed = "'abc'\n"
with temporary_file_context(line) as filename_a:
with temporary_file_context(line) as filename_b:
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename_a, filename_b, '--jobs=0', '--in-place'])
p.wait()
with open(filename_a) as f:
self.assertEqual(fixed, f.read())
with open(filename_b) as f:
self.assertEqual(fixed, f.read())
def test_in_place_with_empty_file(self):
line = ''
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename, '--in-place'])
p.wait()
self.assertEqual(0, p.returncode)
with open(filename) as f:
self.assertEqual(f.read(), line)
def test_in_place_and_diff(self):
line = "'abc' \n"
with temporary_file_context(line) as filename:
p = Popen(
list(AUTOPEP8_CMD_TUPLE) + [filename,
'--in-place', '--diff'],
stderr=PIPE)
result = p.communicate()[1].decode('utf-8')
self.assertIn('--in-place and --diff are mutually exclusive', result)
def test_recursive(self):
temp_directory = mkdtemp(dir='.')
try:
with open(os.path.join(temp_directory, 'a.py'), 'w') as output:
output.write("'abc' \n")
os.mkdir(os.path.join(temp_directory, 'd'))
with open(os.path.join(temp_directory, 'd', 'b.py'),
'w') as output:
output.write('123 \n')
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory, '--recursive', '--diff'],
stdout=PIPE)
result = p.communicate()[0].decode('utf-8')
self.assertEqual(
"-'abc' \n+'abc'",
'\n'.join(result.split('\n')[3:5]))
self.assertEqual(
'-123 \n+123',
'\n'.join(result.split('\n')[8:10]))
finally:
shutil.rmtree(temp_directory)
def test_recursive_should_not_crash_on_unicode_filename(self):
temp_directory = mkdtemp(dir='.')
try:
for filename in ['x.py', 'é.py', 'é.txt']:
with open(os.path.join(temp_directory, filename), 'w'):
pass
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory,
'--recursive',
'--diff'],
stdout=PIPE)
self.assertFalse(p.communicate()[0])
self.assertEqual(0, p.returncode)
finally:
shutil.rmtree(temp_directory)
def test_recursive_should_ignore_hidden(self):
temp_directory = mkdtemp(dir='.')
temp_subdirectory = mkdtemp(prefix='.', dir=temp_directory)
try:
with open(os.path.join(temp_subdirectory, 'a.py'), 'w') as output:
output.write("'abc' \n")
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory, '--recursive', '--diff'],
stdout=PIPE)
result = p.communicate()[0].decode('utf-8')
self.assertEqual(0, p.returncode)
self.assertEqual('', result)
finally:
shutil.rmtree(temp_directory)
def test_exclude(self):
temp_directory = mkdtemp(dir='.')
try:
with open(os.path.join(temp_directory, 'a.py'), 'w') as output:
output.write("'abc' \n")
os.mkdir(os.path.join(temp_directory, 'd'))
with open(os.path.join(temp_directory, 'd', 'b.py'),
'w') as output:
output.write('123 \n')
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[temp_directory, '--recursive', '--exclude=a*',
'--diff'],
stdout=PIPE)
result = p.communicate()[0].decode('utf-8')
self.assertNotIn('abc', result)
self.assertIn('123', result)
finally:
shutil.rmtree(temp_directory)
def test_exclude_with_directly_file_args(self):
temp_directory = mkdtemp(dir='.')
try:
filepath_a = os.path.join(temp_directory, 'a.py')
with open(filepath_a, 'w') as output:
output.write("'abc' \n")
os.mkdir(os.path.join(temp_directory, 'd'))
filepath_b = os.path.join(temp_directory, 'd', 'b.py')
with open(os.path.join(filepath_b), 'w') as output:
output.write('123 \n')
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
['--exclude=*/a.py', '--diff', filepath_a, filepath_b],
stdout=PIPE)
result = p.communicate()[0].decode('utf-8')
self.assertNotIn('abc', result)
self.assertIn('123', result)
finally:
shutil.rmtree(temp_directory)
def test_invalid_option_combinations(self):
line = "'abc' \n"
with temporary_file_context(line) as filename:
for options in [['--recursive', filename], # without --diff
['--jobs=2', filename], # without --diff
['--max-line-length=0', filename],
[], # no argument
['-', '--in-place'],
['-', '--recursive'],
['-', filename],
['--line-range', '0', '2', filename],
['--line-range', '2', '1', filename],
['--line-range', '-1', '-1', filename],
]:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + options,
stderr=PIPE)
result = p.communicate()[1].decode('utf-8')
self.assertNotEqual(0, p.returncode, msg=str(options))
self.assertTrue(len(result))
def test_list_fixes(self):
with autopep8_subprocess('', options=['--list-fixes']) as (result, retcode):
self.assertIn('E121', result)
self.assertEqual(retcode, autopep8.EXIT_CODE_OK)
def test_fixpep8_class_constructor(self):
line = 'print 1\nprint 2\n'
with temporary_file_context(line) as filename:
pep8obj = autopep8.FixPEP8(filename, None)
self.assertEqual(''.join(pep8obj.source), line)
def test_inplace_with_multi_files(self):
exception = None
with disable_stderr():
try:
autopep8.parse_args(['test.py', 'dummy.py'])
except SystemExit as e:
exception = e
self.assertTrue(exception)
self.assertEqual(exception.code, autopep8.EXIT_CODE_ARGPARSE_ERROR)
def test_standard_out_should_use_native_line_ending(self):
line = '1\r\n2\r\n3\r\n'
with temporary_file_context(line) as filename:
process = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename],
stdout=PIPE)
self.assertEqual(
os.linesep.join(['1', '2', '3', '']),
process.communicate()[0].decode('utf-8'))
def test_standard_out_should_use_native_line_ending_with_cr_input(self):
line = '1\r2\r3\r'
with temporary_file_context(line) as filename:
process = Popen(list(AUTOPEP8_CMD_TUPLE) +
[filename],
stdout=PIPE)
self.assertEqual(
os.linesep.join(['1', '2', '3', '']),
process.communicate()[0].decode('utf-8'))
def test_standard_in(self):
line = 'print( 1 )\n'
fixed = 'print(1)' + os.linesep
process = Popen(list(AUTOPEP8_CMD_TUPLE) +
['-'],
stdout=PIPE,
stdin=PIPE)
self.assertEqual(
fixed,
process.communicate(line.encode('utf-8'))[0].decode('utf-8'))
def test_exit_code_should_be_set_when_standard_in(self):
line = 'print( 1 )\n'
process = Popen(list(AUTOPEP8_CMD_TUPLE) +
['--exit-code', '-'],
stdout=PIPE,
stdin=PIPE)
process.communicate(line.encode('utf-8'))[0].decode('utf-8')
self.assertEqual(
process.returncode,
autopep8.EXIT_CODE_EXISTS_DIFF)
class ConfigurationTests(unittest.TestCase):
def test_local_config(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(os.devnull)],
apply_config=True)
self.assertEqual(args.indent_size, 2)
def test_config_override(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--indent-size=7'],
apply_config=True)
self.assertEqual(args.indent_size, 7)
def test_config_false_with_local(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config=False'],
apply_config=True)
self.assertEqual(args.global_config, 'False')
self.assertEqual(args.indent_size, 2)
def test_config_false_with_local_space(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config', 'False'],
apply_config=True)
self.assertEqual(args.global_config, 'False')
self.assertEqual(args.indent_size, 2)
def test_local_pycodestyle_config_line_length(self):
args = autopep8.parse_args(
[os.path.join(FAKE_PYCODESTYLE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(os.devnull)],
apply_config=True)
self.assertEqual(args.max_line_length, 40)
def test_config_false_with_local_autocomplete(self):
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--g', 'False'],
apply_config=True)
self.assertEqual(args.global_config, 'False')
self.assertEqual(args.indent_size, 2)
def test_config_false_without_local(self):
args = autopep8.parse_args(['/nowhere/foo.py',
'--global-config={}'.format(os.devnull)],
apply_config=True)
self.assertEqual(args.indent_size, 4)
def test_global_config_with_locals(self):
with temporary_file_context('[pep8]\nindent-size=3\n') as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.indent_size, 2)
def test_global_config_ignore_locals(self):
with temporary_file_context('[pep8]\nindent-size=3\n') as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename),
'--ignore-local-config'],
apply_config=True)
self.assertEqual(args.indent_size, 3)
def test_global_config_without_locals(self):
with temporary_file_context('[pep8]\nindent-size=3\n') as filename:
args = autopep8.parse_args(
['/nowhere/foo.py', '--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.indent_size, 3)
def test_config_local_int_value(self):
with temporary_file_context('[pep8]\naggressive=1\n') as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.aggressive, 1)
def test_config_local_inclue_invalid_key(self):
configstr = """\
[pep8]
count=True
aggressive=1
"""
with temporary_file_context(configstr) as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.aggressive, 1)
def test_pyproject_toml_config_local_int_value(self):
with temporary_file_context('[tool.autopep8]\naggressive=2\n') as filename:
args = autopep8.parse_args(
[os.path.join(FAKE_CONFIGURATION, 'foo.py'),
'--global-config={}'.format(filename)],
apply_config=True)
self.assertEqual(args.aggressive, 2)
class ConfigurationFileTests(unittest.TestCase):
def test_pyproject_toml_with_flake8_config(self):
"""override to flake8 config"""
line = "a = 1\n"
dot_flake8 = """[pep8]\naggressive=0\n"""
pyproject_toml = """[tool.autopep8]\naggressvie=2\nignore="E,W"\n"""
with temporary_project_directory() as dirname:
with open(os.path.join(dirname, "pyproject.toml"), "w") as fp:
fp.write(pyproject_toml)
with open(os.path.join(dirname, ".flake8"), "w") as fp:
fp.write(dot_flake8)
target_filename = os.path.join(dirname, "foo.py")
with open(target_filename, "w") as fp:
fp.write(line)
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[target_filename], stdout=PIPE)
self.assertEqual(p.communicate()[0].decode("utf-8"), line)
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
def test_pyproject_toml_with_verbose_option(self):
"""override to flake8 config"""
line = "a = 1\n"
verbose_line = "enable pyproject.toml config: key=ignore, value=E,W\n"
pyproject_toml = """[tool.autopep8]\naggressvie=2\nignore="E,W"\n"""
with temporary_project_directory() as dirname:
with open(os.path.join(dirname, "pyproject.toml"), "w") as fp:
fp.write(pyproject_toml)
target_filename = os.path.join(dirname, "foo.py")
with open(target_filename, "w") as fp:
fp.write(line)
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[target_filename, "-vvv"], stdout=PIPE)
output = p.communicate()[0].decode("utf-8")
self.assertTrue(line in output)
self.assertTrue(verbose_line in output)
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
def test_pyproject_toml_with_iterable_value(self):
line = "a = 1\n"
pyproject_toml = """[tool.autopep8]\naggressvie=2\nignore=["E","W"]\n"""
with temporary_project_directory() as dirname:
with open(os.path.join(dirname, "pyproject.toml"), "w") as fp:
fp.write(pyproject_toml)
target_filename = os.path.join(dirname, "foo.py")
with open(target_filename, "w") as fp:
fp.write(line)
p = Popen(list(AUTOPEP8_CMD_TUPLE) +
[target_filename, ], stdout=PIPE)
output = p.communicate()[0].decode("utf-8")
self.assertTrue(line in output)
self.assertEqual(p.returncode, autopep8.EXIT_CODE_OK)
class ExperimentalSystemTests(unittest.TestCase):
maxDiff = None
def test_e501_experimental_basic(self):
line = """\
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_commas_and_colons(self):
line = """\
foobar = {'aaaaaaaaaaaa': 'bbbbbbbbbbbbbbbb', 'dddddd': 'eeeeeeeeeeeeeeee', 'ffffffffffff': 'gggggggg'}
"""
fixed = """\
foobar = {'aaaaaaaaaaaa': 'bbbbbbbbbbbbbbbb',
'dddddd': 'eeeeeeeeeeeeeeee', 'ffffffffffff': 'gggggggg'}
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_inline_comments(self):
line = """\
' ' # Long inline comments should be moved above.
if True:
' ' # Long inline comments should be moved above.
"""
fixed = """\
# Long inline comments should be moved above.
' '
if True:
# Long inline comments should be moved above.
' '
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_inline_comments_should_skip_multiline(
self):
line = """\
'''This should be left alone. -----------------------------------------------------
''' # foo
'''This should be left alone. -----------------------------------------------------
''' \\
# foo
'''This should be left alone. -----------------------------------------------------
''' \\
\\
# foo
"""
fixed = """\
'''This should be left alone. -----------------------------------------------------
''' # foo
'''This should be left alone. -----------------------------------------------------
''' # foo
'''This should be left alone. -----------------------------------------------------
''' # foo
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_inline_comments_should_skip_keywords(self):
line = """\
' ' # noqa Long inline comments should be moved above.
if True:
' ' # pylint: disable-msgs=E0001
' ' # pragma: no cover
' ' # pragma: no cover
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_with_inline_comments_should_skip_edge_cases(
self):
line = """\
if True:
x = \\
' ' # Long inline comments should be moved above.
"""
fixed = """\
if True:
# Long inline comments should be moved above.
x = ' '
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_basic_should_prefer_balanced_brackets(self):
line = """\
if True:
reconstructed = iradon(radon(image), filter="ramp", interpolation="nearest")
"""
fixed = """\
if True:
reconstructed = iradon(
radon(image),
filter="ramp", interpolation="nearest")
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_very_long_line(self):
line = """\
x = [3244234243234, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243]
"""
fixed = """\
x = [3244234243234, 234234234324, 234234324, 23424234, 234234234, 234234, 234243,
234243, 234234234324, 234234324, 23424234, 234234234, 234234, 234243, 234243]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_shorten_at_commas_skip(self):
line = """\
parser.add_argument('source_corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('target_corpus', help='corpus name/path relative to an nltk_data directory')
"""
fixed = """\
parser.add_argument(
'source_corpus',
help='corpus name/path relative to an nltk_data directory')
parser.add_argument(
'target_corpus',
help='corpus name/path relative to an nltk_data directory')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_shorter_length(self):
line = """\
foooooooooooooooooo('abcdefghijklmnopqrstuvwxyz')
"""
fixed = """\
foooooooooooooooooo(
'abcdefghijklmnopqrstuvwxyz')
"""
with autopep8_context(line,
options=['--max-line-length=40',
'--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_indent(self):
line = """\
def d():
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
def d():
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_alone_with_indentation(self):
line = """\
if True:
print(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
if True:
print(111, 111, 111, 111, 222, 222, 222, 222,
222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--select=E501',
'--experimental']) as result:
self.assertEqual(fixed, result)
@unittest.skip('Not sure why space is not removed anymore')
def test_e501_experimental_alone_with_tuple(self):
line = """\
fooooooooooooooooooooooooooooooo000000000000000000000000 = [1,
('TransferTime', 'FLOAT')
]
"""
fixed = """\
fooooooooooooooooooooooooooooooo000000000000000000000000 = [
1, ('TransferTime', 'FLOAT')]
"""
with autopep8_context(line, options=['--select=E501',
'--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_should_not_try_to_break_at_every_paren_in_arithmetic(
self):
line = """\
term3 = w6 * c5 * (8.0 * psi4 * (11.0 - 24.0 * t2) - 28 * psi3 * (1 - 6.0 * t2) + psi2 * (1 - 32 * t2) - psi * (2.0 * t2) + t4) / 720.0
this_should_be_shortened = (' ', ' ')
"""
fixed = """\
term3 = w6 * c5 * (8.0 * psi4 * (11.0 - 24.0 * t2) - 28 * psi3 * (1 - 6.0 * t2) +
psi2 * (1 - 32 * t2) - psi * (2.0 * t2) + t4) / 720.0
this_should_be_shortened = (
' ',
' ')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_arithmetic_operator_with_indent(self):
line = """\
def d():
111 + 111 + 111 + 111 + 111 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 222 + 333 + 333 + 333 + 333
"""
fixed = """\
def d():
111 + 111 + 111 + 111 + 111 + 222 + 222 + 222 + 222 + \\
222 + 222 + 222 + 222 + 222 + 333 + 333 + 333 + 333
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_more_complicated(self):
line = """\
blahblah = os.environ.get('blahblah') or os.environ.get('blahblahblah') or os.environ.get('blahblahblahblah')
"""
fixed = """\
blahblah = os.environ.get('blahblah') or os.environ.get(
'blahblahblah') or os.environ.get('blahblahblahblah')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_skip_even_more_complicated(self):
line = """\
if True:
if True:
if True:
blah = blah.blah_blah_blah_bla_bl(blahb.blah, blah.blah,
blah=blah.label, blah_blah=blah_blah,
blah_blah2=blah_blah)
"""
fixed = """\
if True:
if True:
if True:
blah = blah.blah_blah_blah_bla_bl(
blahb.blah, blah.blah, blah=blah.label, blah_blah=blah_blah,
blah_blah2=blah_blah)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_logical_fix(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_logical_fix_and_physical_fix(self):
line = """\
# ------ ------------------------------------------------------------------------
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
# ------ -----------------------------------------------------------------
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc,
dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_logical_fix_and_adjacent_strings(self):
line = """\
print('a-----------------------' 'b-----------------------' 'c-----------------------'
'd-----------------------''e'"f"r"g")
"""
fixed = """\
print(
'a-----------------------'
'b-----------------------'
'c-----------------------'
'd-----------------------'
'e'
"f"
r"g")
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_multiple_lines(self):
line = """\
foo_bar_zap_bing_bang_boom(111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333,
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333)
"""
fixed = """\
foo_bar_zap_bing_bang_boom(
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333,
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_do_not_break_on_keyword(self):
# We don't want to put a newline after equals for keywords as this
# violates PEP 8.
line = """\
if True:
long_variable_name = tempfile.mkstemp(prefix='abcdefghijklmnopqrstuvwxyz0123456789')
"""
fixed = """\
if True:
long_variable_name = tempfile.mkstemp(
prefix='abcdefghijklmnopqrstuvwxyz0123456789')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_do_not_begin_line_with_comma(self):
line = """\
def dummy():
if True:
if True:
if True:
object = ModifyAction( [MODIFY70.text, OBJECTBINDING71.text, COLON72.text], MODIFY70.getLine(), MODIFY70.getCharPositionInLine() )
"""
fixed = """\
def dummy():
if True:
if True:
if True:
object = ModifyAction(
[MODIFY70.text, OBJECTBINDING71.text, COLON72.text],
MODIFY70.getLine(),
MODIFY70.getCharPositionInLine())
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_should_not_break_on_dot(self):
line = """\
if True:
if True:
raise xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx('xxxxxxxxxxxxxxxxx "{d}" xxxxxxxxxxxxxx'.format(d='xxxxxxxxxxxxxxx'))
"""
fixed = """\
if True:
if True:
raise xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
'xxxxxxxxxxxxxxxxx "{d}" xxxxxxxxxxxxxx'.format(
d='xxxxxxxxxxxxxxx'))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_comment(self):
line = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
pass
# http://foo.bar/abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-
# The following is ugly commented-out code and should not be touched.
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 1
"""
fixed = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will
# wrap it using textwrap to be within 72 characters.
pass
# http://foo.bar/abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-abc-
# The following is ugly commented-out code and should not be touched.
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx = 1
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_comment_should_not_modify_docstring(self):
line = '''\
def foo():
"""
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
"""
'''
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_experimental_should_only_modify_last_comment(self):
line = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 1. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 2. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 3. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
"""
fixed = """123
if True:
if True:
if True:
if True:
if True:
if True:
# This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 1. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 2. This is a long comment that should be wrapped. I will wrap it using textwrap to be within 72 characters.
# 3. This is a long comment that should be wrapped. I
# will wrap it using textwrap to be within 72
# characters.
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_should_not_interfere_with_non_comment(self):
line = '''
"""
# not actually a comment %d. 12345678901234567890, 12345678901234567890, 12345678901234567890.
""" % (0,)
'''
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(line, result)
def test_e501_experimental_should_cut_comment_pattern(self):
line = """123
# -- Useless lines ----------------------------------------------------------------------
321
"""
fixed = """123
# -- Useless lines -------------------------------------------------------
321
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_function_should_not_break_on_colon(self):
line = r"""
class Useless(object):
def _table_field_is_plain_widget(self, widget):
if widget.__class__ == Widget or\
(widget.__class__ == WidgetMeta and Widget in widget.__bases__):
return True
return False
"""
fixed = r"""
class Useless(object):
def _table_field_is_plain_widget(self, widget):
if widget.__class__ == Widget or (
widget.__class__ == WidgetMeta and Widget in widget.__bases__):
return True
return False
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental(self):
line = """\
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
}
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_and_multiple_logical_lines(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(aaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccc, dddddddddddddddddddddddd)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_and_multiple_logical_lines_with_math(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx([-1 + 5 / -10,
100,
-3 - 4])
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx(
[-1 + 5 / -10, 100, -3 - 4])
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_and_import(self):
line = """\
from . import (xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy)
"""
fixed = """\
from . import (
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_shorten_comment_with_experimental(self):
line = """\
# ------ -------------------------------------------------------------------------
"""
fixed = """\
# ------ -----------------------------------------------------------------
"""
with autopep8_context(line, options=['--experimental',
'--aggressive']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_escaped_newline(self):
line = """\
if True or \\
False: # test test test test test test test test test test test test test test
pass
"""
fixed = """\
if True or \\
False: # test test test test test test test test test test test test test test
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_multiline_string(self):
line = """\
print('---------------------------------------------------------------------',
('================================================', '====================='),
'''--------------------------------------------------------------------------------
''')
"""
fixed = """\
print(
'---------------------------------------------------------------------',
('================================================',
'====================='),
'''--------------------------------------------------------------------------------
''')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_multiline_string_with_addition(self):
line = '''\
def f():
email_text += """<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>"""
'''
fixed = '''\
def f():
email_text += """<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>"""
'''
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_multiline_string_in_parens(self):
line = '''\
def f():
email_text += ("""<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>""")
'''
fixed = '''\
def f():
email_text += (
"""<html>This is a really long docstring that goes over the column limit and is multi-line.<br><br>
<b>Czar: </b>"""+despot["Nicholas"]+"""<br>
<b>Minion: </b>"""+serf["Dmitri"]+"""<br>
<b>Residence: </b>"""+palace["Winter"]+"""<br>
</body>
</html>""")
'''
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_indentation(self):
line = """\
if True:
# comment here
print(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,cccccccccccccccccccccccccccccccccccccccccc)
"""
fixed = """\
if True:
# comment here
print(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccccc)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_multiple_keys_and_experimental(self):
line = """\
one_two_three_four_five_six = {'one two three four five': 12345, 'asdfsdflsdkfjl sdflkjsdkfkjsfjsdlkfj sdlkfjlsfjs': '343',
1: 1}
"""
fixed = """\
one_two_three_four_five_six = {
'one two three four five': 12345,
'asdfsdflsdkfjl sdflkjsdkfkjsfjsdlkfj sdlkfjlsfjs': '343', 1: 1}
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_with_experimental_and_carriage_returns_only(self):
"""Make sure _find_logical() does not crash."""
line = 'if True:\r from aaaaaaaaaaaaaaaa import bbbbbbbbbbbbbbbbbbb\r \r ccccccccccc = None\r'
fixed = 'if True:\r from aaaaaaaaaaaaaaaa import bbbbbbbbbbbbbbbbbbb\r\r ccccccccccc = None\r'
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_should_ignore_imports(self):
line = """\
import logging, os, bleach, commonware, urllib2, json, time, requests, urlparse, re
"""
with autopep8_context(line, options=['--select=E501',
'--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_should_not_do_useless_things(self):
line = """\
foo(' ')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_with_percent(self):
line = """\
raise MultiProjectException("Ambiguous workspace: %s=%s, %s" % ( varname, varname_path, os.path.abspath(config_filename)))
"""
fixed = """\
raise MultiProjectException(
"Ambiguous workspace: %s=%s, %s" %
(varname, varname_path, os.path.abspath(config_filename)))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_def(self):
line = """\
def foobar(sldfkjlsdfsdf, kksdfsdfsf,sdfsdfsdf, sdfsdfkdk, szdfsdfsdf, sdfsdfsdfsdlkfjsdlf, sdfsdfddf,sdfsdfsfd, sdfsdfdsf):
pass
"""
fixed = """\
def foobar(sldfkjlsdfsdf, kksdfsdfsf, sdfsdfsdf, sdfsdfkdk, szdfsdfsdf,
sdfsdfsdfsdlkfjsdlf, sdfsdfddf, sdfsdfsfd, sdfsdfdsf):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_tuple(self):
line = """\
def f():
man_this_is_a_very_long_function_name(an_extremely_long_variable_name,
('a string that is long: %s'%'bork'))
"""
fixed = """\
def f():
man_this_is_a_very_long_function_name(
an_extremely_long_variable_name,
('a string that is long: %s' % 'bork'))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_tuple_in_list(self):
line = """\
def f(self):
self._xxxxxxxx(aaaaaa, bbbbbbbbb, cccccccccccccccccc,
[('mmmmmmmmmm', self.yyyyyyyyyy.zzzzzzzz/_DDDDDD)], eee, 'ff')
"""
fixed = """\
def f(self):
self._xxxxxxxx(
aaaaaa, bbbbbbbbb, cccccccccccccccccc,
[('mmmmmmmmmm', self.yyyyyyyyyy.zzzzzzzz / _DDDDDD)],
eee, 'ff')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_complex_reformat(self):
line = """\
bork(111, 111, 111, 111, 222, 222, 222, { 'foo': 222, 'qux': 222 }, ((['hello', 'world'], ['yo', 'stella', "how's", 'it'], ['going']), {str(i): i for i in range(10)}, {'bork':((x, x**x) for x in range(10))}), 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
bork(
111, 111, 111, 111, 222, 222, 222, {'foo': 222, 'qux': 222},
((['hello', 'world'],
['yo', 'stella', "how's", 'it'],
['going']),
{str(i): i for i in range(10)},
{'bork': ((x, x ** x) for x in range(10))}),
222, 222, 222, 222, 333, 333, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_dot_calls(self):
line = """\
if True:
logging.info('aaaaaa bbbbb dddddd ccccccc eeeeeee fffffff gg: %s',
xxxxxxxxxxxxxxxxx.yyyyyyyyyyyyyyyyyyyyy(zzzzzzzzzzzzzzzzz.jjjjjjjjjjjjjjjjj()))
"""
fixed = """\
if True:
logging.info(
'aaaaaa bbbbb dddddd ccccccc eeeeeee fffffff gg: %s',
xxxxxxxxxxxxxxxxx.yyyyyyyyyyyyyyyyyyyyy(
zzzzzzzzzzzzzzzzz.jjjjjjjjjjjjjjjjj()))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_avoid_breaking_at_empty_parentheses_if_possible(
self):
line = """\
someverylongindenttionwhatnot().foo().bar().baz("and here is a long string 123456789012345678901234567890")
"""
fixed = """\
someverylongindenttionwhatnot().foo().bar().baz(
"and here is a long string 123456789012345678901234567890")
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_unicode(self):
line = """\
someverylongindenttionwhatnot().foo().bar().baz("and here is a l안녕하세요 123456789012345678901234567890")
"""
fixed = """\
someverylongindenttionwhatnot().foo().bar().baz(
"and here is a l안녕하세요 123456789012345678901234567890")
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_tuple_assignment(self):
line = """\
if True:
(xxxxxxx,) = xxxx.xxxxxxx.xxxxx(xxxxxxxxxxxx.xx).xxxxxx(xxxxxxxxxxxx.xxxx == xxxx.xxxx).xxxxx()
"""
fixed = """\
if True:
(xxxxxxx,) = xxxx.xxxxxxx.xxxxx(xxxxxxxxxxxx.xx).xxxxxx(
xxxxxxxxxxxx.xxxx == xxxx.xxxx).xxxxx()
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
@unittest.skip('To do')
def test_e501_experimental_tuple_on_line(self):
line = """\
def f():
self.aaaaaaaaa(bbbbbb, ccccccccc, dddddddddddddddd,
((x, y/eeeeeee) for x, y in self.outputs.total.iteritems()),
fff, 'GG')
"""
fixed = """\
def f():
self.aaaaaaaaa(
bbbbbb, ccccccccc, dddddddddddddddd,
((x, y / eeeeeee) for x, y in self.outputs.total.iteritems()),
fff, 'GG')
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_tuple_on_line_two_space_indent(self):
line = """\
def f():
self.aaaaaaaaa(bbbbbb, ccccccccc, dddddddddddddddd,
((x, y/eeeeeee) for x, y in self.outputs.total.iteritems()),
fff, 'GG')
"""
fixed = """\
def f():
self.aaaaaaaaa(bbbbbb, ccccccccc, dddddddddddddddd,
((x, y/eeeeeee) for x, y in self.outputs.total.iteritems()),
fff, 'GG')
"""
with autopep8_context(line, options=['--experimental',
'--indent-size=2']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_oversized_default_initializer(self):
line = """\
aaaaaaaaaaaaaaaaaaaaa(lllll,mmmmmmmm,nnn,fffffffffff,ggggggggggg,hhh,ddddddddddddd=eeeeeeeee,bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=ccccccccccccccccccccccccccccccccccccccccccccccccc,bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=cccccccccccccccccccccccccccccccccccccccccccccccc)
"""
fixed = """\
aaaaaaaaaaaaaaaaaaaaa(
lllll, mmmmmmmm, nnn, fffffffffff, ggggggggggg, hhh,
ddddddddddddd=eeeeeeeee,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=ccccccccccccccccccccccccccccccccccccccccccccccccc,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=cccccccccccccccccccccccccccccccccccccccccccccccc)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_decorator(self):
line = """\
@foo(('xxxxxxxxxxxxxxxxxxxxxxxxxx', users.xxxxxxxxxxxxxxxxxxxxxxxxxx), ('yyyyyyyyyyyy', users.yyyyyyyyyyyy), ('zzzzzzzzzzzzzz', users.zzzzzzzzzzzzzz))
"""
fixed = """\
@foo(('xxxxxxxxxxxxxxxxxxxxxxxxxx', users.xxxxxxxxxxxxxxxxxxxxxxxxxx),
('yyyyyyyyyyyy', users.yyyyyyyyyyyy),
('zzzzzzzzzzzzzz', users.zzzzzzzzzzzzzz))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_long_class_name(self):
line = """\
class AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA(BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB):
pass
"""
fixed = """\
class AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA(
BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_no_line_change(self):
line = """\
def f():
return '<a href="javascript:;" class="copy-to-clipboard-button" data-clipboard-text="%s" title="copy url to clipboard">Copy Link</a>' % url
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(line, result)
def test_e501_experimental_splitting_small_arrays(self):
line = """\
def foo():
unspecified[service] = ('# The %s brown fox jumped over the lazy, good for nothing '
'dog until it grew tired and set its sights upon the cat!' % adj)
"""
fixed = """\
def foo():
unspecified[service] = (
'# The %s brown fox jumped over the lazy, good for nothing '
'dog until it grew tired and set its sights upon the cat!' % adj)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_no_splitting_in_func_call(self):
line = """\
def foo():
if True:
if True:
function.calls('%r (%s): aaaaaaaa bbbbbbbbbb ccccccc ddddddd eeeeee (%d, %d)',
xxxxxx.yy, xxxxxx.yyyy, len(mmmmmmmmmmmmm['fnord']),
len(mmmmmmmmmmmmm['asdfakjhdsfkj']))
"""
fixed = """\
def foo():
if True:
if True:
function.calls(
'%r (%s): aaaaaaaa bbbbbbbbbb ccccccc ddddddd eeeeee (%d, %d)',
xxxxxx.yy, xxxxxx.yyyy, len(mmmmmmmmmmmmm['fnord']),
len(mmmmmmmmmmmmm['asdfakjhdsfkj']))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_no_splitting_at_dot(self):
line = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx = [yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.MMMMMM_NNNNNNN_OOOOO,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.PPPPPP_QQQQQQQ_RRRRR,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.SSSSSS_TTTTTTT_UUUUU]
"""
fixed = """\
xxxxxxxxxxxxxxxxxxxxxxxxxxxx = [
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.MMMMMM_NNNNNNN_OOOOO,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.PPPPPP_QQQQQQQ_RRRRR,
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.SSSSSS_TTTTTTT_UUUUU]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_no_splitting_before_arg_list(self):
line = """\
xxxxxxxxxxxx = [yyyyyy['yyyyyy'].get('zzzzzzzzzzz') for yyyyyy in x.get('aaaaaaaaaaa') if yyyyyy['yyyyyy'].get('zzzzzzzzzzz')]
"""
fixed = """\
xxxxxxxxxxxx = [yyyyyy['yyyyyy'].get('zzzzzzzzzzz')
for yyyyyy in x.get('aaaaaaaaaaa')
if yyyyyy['yyyyyy'].get('zzzzzzzzzzz')]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_dont_split_if_looks_bad(self):
line = """\
def f():
if True:
BAD(('xxxxxxxxxxxxx', 42), 'I died for beauty, but was scarce / Adjusted in the tomb %s', yyyyyyyyyyyyy)
"""
fixed = """\
def f():
if True:
BAD(('xxxxxxxxxxxxx', 42),
'I died for beauty, but was scarce / Adjusted in the tomb %s',
yyyyyyyyyyyyy)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_list_comp(self):
line = """\
xxxxxxxxxxxs = [xxxxxxxxxxx for xxxxxxxxxxx in xxxxxxxxxxxs if not yyyyyyyyyyyy[xxxxxxxxxxx] or not yyyyyyyyyyyy[xxxxxxxxxxx].zzzzzzzzzz]
"""
fixed = """\
xxxxxxxxxxxs = [
xxxxxxxxxxx for xxxxxxxxxxx in xxxxxxxxxxxs
if not yyyyyyyyyyyy[xxxxxxxxxxx] or
not yyyyyyyyyyyy[xxxxxxxxxxx].zzzzzzzzzz]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
line = """\
def f():
xxxxxxxxxx = [f for f in yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.zzzzzzzzzzzzzzzzzzzzzzzz.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]
"""
fixed = """\
def f():
xxxxxxxxxx = [
f
for f in
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy.zzzzzzzzzzzzzzzzzzzzzzzz.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_dict(self):
line = """\
def f():
zzzzzzzzzzzzz = {
'aaaaaa/bbbbbb/ccccc/dddddddd/eeeeeeeee/fffffffffff/ggggggggg/hhhhhhhh.py':
yyyyyyyyyyy.xxxxxxxxxxx(
'aa/bbbbbbb/cc/ddddddd/eeeeeeeeeee/fffffffffff/ggggggggg/hhhhhhh/ggggg.py',
'00000000',
yyyyyyyyyyy.xxxxxxxxx.zzzz),
}
"""
fixed = """\
def f():
zzzzzzzzzzzzz = {
'aaaaaa/bbbbbb/ccccc/dddddddd/eeeeeeeee/fffffffffff/ggggggggg/hhhhhhhh.py':
yyyyyyyyyyy.xxxxxxxxxxx(
'aa/bbbbbbb/cc/ddddddd/eeeeeeeeeee/fffffffffff/ggggggggg/hhhhhhh/ggggg.py',
'00000000', yyyyyyyyyyy.xxxxxxxxx.zzzz), }
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_indentation(self):
line = """\
class Klass(object):
'''Class docstring.'''
def Quote(self, parameter_1, parameter_2, parameter_3, parameter_4, parameter_5):
pass
"""
fixed = """\
class Klass(object):
'''Class docstring.'''
def Quote(
self, parameter_1, parameter_2, parameter_3, parameter_4,
parameter_5):
pass
"""
with autopep8_context(line, options=['--experimental',
'--indent-size=2']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_long_function_call_elements(self):
line = """\
def g():
pppppppppppppppppppppppppp1, pppppppppppppppppppppppp2 = (
zzzzzzzzzzzz.yyyyyyyyyyyyyy(aaaaaaaaa=10, bbbbbbbbbbbbbbbb='2:3',
cccccccc='{1:2}', dd=1, eeeee=0),
zzzzzzzzzzzz.yyyyyyyyyyyyyy(dd=7, aaaaaaaaa=16, bbbbbbbbbbbbbbbb='2:3',
cccccccc='{1:2}',
eeeee=xxxxxxxxxxxxxxxxx.wwwwwwwwwwwww.vvvvvvvvvvvvvvvvvvvvvvvvv))
"""
fixed = """\
def g():
pppppppppppppppppppppppppp1, pppppppppppppppppppppppp2 = (
zzzzzzzzzzzz.yyyyyyyyyyyyyy(
aaaaaaaaa=10, bbbbbbbbbbbbbbbb='2:3', cccccccc='{1:2}', dd=1,
eeeee=0),
zzzzzzzzzzzz.yyyyyyyyyyyyyy(
dd=7, aaaaaaaaa=16, bbbbbbbbbbbbbbbb='2:3', cccccccc='{1:2}',
eeeee=xxxxxxxxxxxxxxxxx.wwwwwwwwwwwww.vvvvvvvvvvvvvvvvvvvvvvvvv))
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_long_nested_tuples_in_arrays(self):
line = """\
def f():
aaaaaaaaaaa.bbbbbbb([
('xxxxxxxxxx', 'yyyyyy', 'Heaven hath no wrath like love to hatred turned. Nor hell a fury like a woman scorned.'),
('xxxxxxx', 'yyyyyyyyyyy', "To the last I grapple with thee. From hell's heart I stab at thee. For hate's sake I spit my last breath at thee!")])
"""
fixed = """\
def f():
aaaaaaaaaaa.bbbbbbb(
[('xxxxxxxxxx', 'yyyyyy',
'Heaven hath no wrath like love to hatred turned. Nor hell a fury like a woman scorned.'),
('xxxxxxx', 'yyyyyyyyyyy',
"To the last I grapple with thee. From hell's heart I stab at thee. For hate's sake I spit my last breath at thee!")])
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_func_call_open_paren_not_separated(self):
# Don't separate the opening paren of a function call from the
# function's name.
line = """\
def f():
owned_list = [o for o in owned_list if self.display['zzzzzzzzzzzzzz'] in aaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbb(o.qq, ccccccccccccccccccccccccccc.ddddddddd.eeeeeee)]
"""
fixed = """\
def f():
owned_list = [
o for o in owned_list
if self.display['zzzzzzzzzzzzzz'] in aaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbb(
o.qq, ccccccccccccccccccccccccccc.ddddddddd.eeeeeee)]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_long_dotted_object(self):
# Don't separate a long dotted object too soon. Otherwise, it may end
# up with most of its elements on separate lines.
line = """\
def f(self):
return self.xxxxxxxxxxxxxxx(aaaaaaa.bbbbb.ccccccc.ddd.eeeeee.fffffffff.ggggg.hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)
"""
fixed = """\
def f(self):
return self.xxxxxxxxxxxxxxx(
aaaaaaa.bbbbb.ccccccc.ddd.eeeeee.fffffffff.ggggg.
hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_parsing_dict_with_comments(self):
line = """\
self.display['xxxxxxxxxxxx'] = [{'title': _('Library'), #. This is the first comment.
'flag': aaaaaaaaaa.bbbbbbbbb.cccccccccc
}, {'title': _('Original'), #. This is the second comment.
'flag': aaaaaaaaaa.bbbbbbbbb.dddddddddd
}, {'title': _('Unknown'), #. This is the third comment.
'flag': aaaaaaaaaa.bbbbbbbbb.eeeeeeeeee}]
"""
fixed = """\
self.display['xxxxxxxxxxxx'] = [{'title': _('Library'), # . This is the first comment.
'flag': aaaaaaaaaa.bbbbbbbbb.cccccccccc
# . This is the second comment.
}, {'title': _('Original'),
'flag': aaaaaaaaaa.bbbbbbbbb.dddddddddd
# . This is the third comment.
}, {'title': _('Unknown'),
'flag': aaaaaaaaaa.bbbbbbbbb.eeeeeeeeee}]
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_if_line_over_limit(self):
line = """\
if not xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
return 1
"""
fixed = """\
if not xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc,
dddddddddddddddddddddd):
return 1
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_for_line_over_limit(self):
line = """\
for aaaaaaaaa in xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
pass
"""
fixed = """\
for aaaaaaaaa in xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc,
dddddddddddddddddddddd):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_while_line_over_limit(self):
line = """\
while xxxxxxxxxxxx(aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc, dddddddddddddddddddddd):
pass
"""
fixed = """\
while xxxxxxxxxxxx(
aaaaaaaaaaaaaaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccc,
dddddddddddddddddddddd):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
def test_e501_experimental_with_in(self):
line = """\
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if k_left in ('any', k_curr) and k_right in ('any', k_curr):
pass
"""
fixed = """\
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if True:
if k_left in (
'any', k_curr) and k_right in (
'any', k_curr):
pass
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
@unittest.skipIf(sys.version_info.major >= 3, 'syntax error in Python3')
def test_e501_print_isnot_function(self):
line = """\
def d():
print "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" % (111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333, 333, 333, 333)
"""
fixed = """\
def d():
print "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" % (
111, 111, 111, 111, 222, 222, 222, 222, 222, 222, 222, 222, 222, 333,
333, 333, 333)
"""
with autopep8_context(line, options=['--experimental']) as result:
self.assertEqual(fixed, result)
@contextlib.contextmanager
def autopep8_context(line, options=None):
if not options:
options = []
with temporary_file_context(line) as filename:
options = autopep8.parse_args([filename] + list(options))
yield autopep8.fix_file(filename=filename, options=options)
@contextlib.contextmanager
def autopep8_subprocess(line, options, timeout=None):
with temporary_file_context(line) as filename:
p = Popen(list(AUTOPEP8_CMD_TUPLE) + [filename] + options, stdout=PIPE)
if timeout is None:
_stdout, _ = p.communicate()
else:
try:
_stdout, _ = p.communicate(timeout=timeout)
except TypeError:
# for Python2
while p.poll() is None and timeout > 0:
time.sleep(0.5)
timeout -= 0.5
if p.poll() is None:
p.kill()
raise Exception("subprocess is timed out")
_stdout, _ = p.communicate()
yield (_stdout.decode('utf-8'), p.returncode)
@contextlib.contextmanager
def temporary_file_context(text, suffix='', prefix=''):
temporary = mkstemp(suffix=suffix, prefix=prefix)
os.close(temporary[0])
with autopep8.open_with_encoding(temporary[1],
encoding='utf-8',
mode='w') as temp_file:
temp_file.write(text)
yield temporary[1]
os.remove(temporary[1])
@contextlib.contextmanager
def readonly_temporary_file_context(text, suffix='', prefix=''):
temporary = mkstemp(suffix=suffix, prefix=prefix)
os.close(temporary[0])
with autopep8.open_with_encoding(temporary[1],
encoding='utf-8',
mode='w') as temp_file:
temp_file.write(text)
os.chmod(temporary[1], stat.S_IRUSR)
yield temporary[1]
os.remove(temporary[1])
@contextlib.contextmanager
def temporary_project_directory(prefix="autopep8test"):
temporary = mkdtemp(prefix=prefix)
yield temporary
shutil.rmtree(temporary)
@contextlib.contextmanager
def disable_stderr():
sio = StringIO()
with capture_stderr(sio):
yield
@contextlib.contextmanager
def capture_stderr(sio):
_tmp = sys.stderr
sys.stderr = sio
try:
yield
finally:
sys.stderr = _tmp
if __name__ == '__main__':
unittest.main()
| 33.792704
| 417
| 0.56823
| 29,003
| 256,588
| 4.870703
| 0.056787
| 0.067851
| 0.070017
| 0.075872
| 0.864326
| 0.838856
| 0.812275
| 0.790443
| 0.76411
| 0.740806
| 0
| 0.042957
| 0.283996
| 256,588
| 7,592
| 418
| 33.797155
| 0.725966
| 0.012121
| 0
| 0.697372
| 0
| 0.033385
| 0.444255
| 0.111292
| 0.000618
| 0
| 0
| 0.000263
| 0.10881
| 1
| 0.093045
| false
| 0.026584
| 0.019165
| 0
| 0.119475
| 0.029366
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b92c758844f5d078d36f95f18133e96bdc1ac954
| 103
|
py
|
Python
|
adversarial_search/__init__.py
|
marcelodeleon/heuristic-training
|
67a6bb35e95a1d84ef248f7abb5d689110494a7e
|
[
"MIT"
] | null | null | null |
adversarial_search/__init__.py
|
marcelodeleon/heuristic-training
|
67a6bb35e95a1d84ef248f7abb5d689110494a7e
|
[
"MIT"
] | null | null | null |
adversarial_search/__init__.py
|
marcelodeleon/heuristic-training
|
67a6bb35e95a1d84ef248f7abb5d689110494a7e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import core
from . import utils
from . import games
from . import agents
| 20.6
| 23
| 0.679612
| 15
| 103
| 4.666667
| 0.6
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012048
| 0.194175
| 103
| 5
| 24
| 20.6
| 0.831325
| 0.203884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b932fe8664fe9f397483822f7b69214fb7404be6
| 10,982
|
py
|
Python
|
Packs/Anomali_Enterprise/Integrations/Anomali_Enterprise/Anomali_Enterprise_test.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | 1
|
2021-08-07T00:21:58.000Z
|
2021-08-07T00:21:58.000Z
|
Packs/Anomali_Enterprise/Integrations/Anomali_Enterprise/Anomali_Enterprise_test.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | 3
|
2019-12-13T13:27:20.000Z
|
2020-01-01T14:27:45.000Z
|
Packs/Anomali_Enterprise/Integrations/Anomali_Enterprise/Anomali_Enterprise_test.py
|
matan-xmcyber/content
|
7f02301c140b35956af3cd20cb8dfc64f34afb3e
|
[
"MIT"
] | 1
|
2021-01-05T12:20:30.000Z
|
2021-01-05T12:20:30.000Z
|
import pytest
from Anomali_Enterprise import *
def test_domain_command_benign(mocker):
"""
Given:
- a domain
When:
- mocking the server response for a benign domain, running domain_command
Then:
- validating that the domain score is unknown
- validating the returned context data
"""
client = Client(server_url='test', username='test', password='1234', verify=True, proxy=False)
return_data = {'data': {'test.com': {'malware_family': '', 'probability': 0}}, 'result': 'success'}
mocker.patch.object(client, 'domain_request', return_value=return_data)
command_results = domain_command(client, args={'domain': 'test.com'})
output = command_results[0].to_context().get('EntryContext', {})
dbot_key = 'DBotScore(val.Indicator && val.Indicator == obj.Indicator &&' \
' val.Vendor == obj.Vendor && val.Type == obj.Type)'
expected_result = {
'Domain': [
{
'Name': 'test.com'
}
],
'DBotScore': [
{
'Indicator': 'test.com',
'Type': 'domain',
'Vendor': 'Anomali Enterprise',
'Score': 0
}
]
}
assert output.get('Domain(val.Name && val.Name == obj.Name)', []) == expected_result.get('Domain')
assert output.get(dbot_key, []) == expected_result.get('DBotScore')
def test_domain_command_suspicious(mocker):
"""
Given:
- a domain
When:
- mocking the server response for a suspicious domain, running domain_command
Then:
- validating that the domain score is suspicious
- validating the returned context data, including the suspicious context
"""
client = Client(server_url='test', username='test', password='1234', verify=True, proxy=False)
return_data = {'data': {'suspicious.com': {'malware_family': 'my_suspicious', 'probability': 0.4}},
'result': 'success'}
mocker.patch.object(client, 'domain_request', return_value=return_data)
command_results = domain_command(client, args={'domain': 'suspicious.com'})
output = command_results[0].to_context().get('EntryContext', {})
dbot_key = 'DBotScore(val.Indicator && val.Indicator == obj.Indicator &&' \
' val.Vendor == obj.Vendor && val.Type == obj.Type)'
expected_result = {
'Domain': [
{'Name': 'suspicious.com', 'Tags': 'DGA'}
],
'DBotScore': [
{
'Indicator': 'suspicious.com',
'Type': 'domain',
'Vendor': 'Anomali Enterprise',
'Score': 2
}
]
}
assert output.get('Domain(val.Name && val.Name == obj.Name)', []) == expected_result.get('Domain')
assert output.get(dbot_key, []) == expected_result.get('DBotScore')
def test_domain_command_malicious(mocker):
"""
Given:
- a domain
When:
- mocking the server response for a malicious domain, running domain_command
Then:
- validating that the domain score is malicious
- validating the returned context data, including the malicious context
"""
client = Client(server_url='test', username='test', password='1234', verify=True, proxy=False)
return_data = {'data': {'malicious.com': {'malware_family': 'my_malware', 'probability': 0.9}}, 'result': 'success'}
mocker.patch.object(client, 'domain_request', return_value=return_data)
command_results = domain_command(client, args={'domain': 'malicious.com'})
output = command_results[0].to_context().get('EntryContext', {})
dbot_key = 'DBotScore(val.Indicator && val.Indicator == obj.Indicator &&' \
' val.Vendor == obj.Vendor && val.Type == obj.Type)'
expected_result = {
'Domain': [
{'Malicious': {'Description': 'my_malware', 'Vendor': 'Anomali Enterprise'},
'Name': 'malicious.com', 'Tags': 'DGA'}
],
'DBotScore': [
{
'Indicator': 'malicious.com',
'Type': 'domain',
'Vendor': 'Anomali Enterprise',
'Score': 3
}
]
}
assert output.get('Domain(val.Name && val.Name == obj.Name)', []) == expected_result.get('Domain')
assert output.get(dbot_key, []) == expected_result.get('DBotScore')
def test_start_search_job_command(mocker):
"""
Given:
- a from_, to_ and indicators to search
When:
- mocking the server response for the start of a job, running start_search_job
Then:
- validating the arguments are parsed correctly
- validating the returned context data
"""
client = Client(server_url='test', username='test', password='1234', verify=True, proxy=False)
return_data = {'jobid': '1234'}
mocker.patch.object(client, 'start_search_job_request', return_value=return_data)
command_results = start_search_job(client, args={'from': '1 month', 'indicators': '8.8.8.8'})
output = command_results.to_context().get('EntryContext', {})
expected_result = {
'status': 'in progress',
'job_id': '1234'
}
assert output.get('AnomaliEnterprise.ForensicSearch(val.job_id == obj.job_id)', []) == expected_result
def test_get_search_job_result_command_with_matches(mocker):
"""
Given:
- a job_id
When:
- mocking the server response for getting the results of a job with matches, running get_search_job_result
Then:
- validating the returned context data
"""
client = Client(server_url='test', username='test', password='1234', verify=True, proxy=False)
return_data = {
'status': 'completed', 'category': 'forensic_api_result', 'totalFiles': 1,
'streamResults': [{
'count': '1', 'indicator': '', 'itype': '', 'severity': '',
'event_time': '2020-10-14T09:10:00.000+0000', 'age': '', 'event.dest': '8.8.8.8',
'confidence': '', 'event.src': '8.8.8.8'}],
'scannedEvents': 269918, 'result_file_name': 'org0_1234_job1234_result.tar.gz',
'complete': True, 'processedFiles': 1, 'totalMatches': 1
}
mocker.patch.object(client, 'get_search_job_result_request', return_value=return_data)
command_results = get_search_job_result(client, args={'job_id': '111'})
output = command_results.to_context().get('EntryContext', {})
expected_result = {
'status': 'completed', 'category': 'forensic_api_result', 'totalFiles': 1,
'streamResults': [{
'count': '1', 'indicator': '', 'itype': '', 'severity': '',
'event_time': '2020-10-14T09:10:00.000+0000', 'age': '', 'event.dest': '8.8.8.8',
'confidence': '', 'event.src': '8.8.8.8'}],
'scannedEvents': 269918, 'result_file_name': 'org0_1234_job1234_result.tar.gz', 'complete': True,
'processedFiles': 1, 'totalMatches': 1, 'job_id': '111'
}
assert output.get('AnomaliEnterprise.ForensicSearch(val.job_id == obj.job_id)', []) == expected_result
def test_get_search_job_result_command_with_matches_and_limit(mocker):
"""
Given:
- a job_id
When:
- mocking the server response for getting the results of a job with matches, running get_search_job_result
- limit the stream results
Then:
- validating that the context was limited
"""
client = Client(server_url='test', username='test', password='1234', verify=True, proxy=False)
return_data = {
'status': 'completed', 'category': 'forensic_api_result', 'totalFiles': 1,
'streamResults': [
{
'count': '1', 'indicator': '', 'itype': '', 'severity': '',
'event_time': '2020-10-14T09:10:00.000+0000', 'age': '', 'event.dest': '8.8.8.8',
'confidence': '', 'event.src': '8.8.8.8'
},
{
'count': '1', 'indicator': '', 'itype': '', 'severity': '',
'event_time': '2020-11-14T09:10:00.000+0000', 'age': '', 'event.dest': '8.8.8.8',
'confidence': '', 'event.src': '8.8.8.8'
},
{
'count': '1', 'indicator': '', 'itype': '', 'severity': '',
'event_time': '2020-12-14T09:10:00.000+0000', 'age': '', 'event.dest': '8.8.8.8',
'confidence': '', 'event.src': '8.8.8.8'
}
],
'scannedEvents': 269918, 'result_file_name': 'org0_1234_job1234_result.tar.gz',
'complete': True, 'processedFiles': 1, 'totalMatches': 3
}
mocker.patch.object(client, 'get_search_job_result_request', return_value=return_data)
command_results = get_search_job_result(client, args={'job_id': '111', 'limit': '2'})
output = command_results.to_context().get('EntryContext', {})
assert len(output.get('AnomaliEnterprise.ForensicSearch(val.job_id == obj.job_id)', {}).get('streamResults')) == 2
def test_get_search_job_result_command_without_matches(mocker):
"""
Given:
- a job_id
When:
- mocking the server response for getting the results of a job without matches, running get_search_job_result
Then:
- validating the returned context data
- validating the returned human readable
"""
client = Client(server_url='test', username='test', password='1234', verify=True, proxy=False)
return_data = {
'totalFiles': 0, 'streamResults': [], 'scannedEvents': 269918,
'complete': True, 'processedFiles': 0, 'totalMatches': 0
}
mocker.patch.object(client, 'get_search_job_result_request', return_value=return_data)
command_results = get_search_job_result(client, args={'job_id': '222'})
output = command_results.to_context().get('EntryContext', {})
expected_result = {
'status': 'completed', 'totalFiles': 0, 'streamResults': [],
'scannedEvents': 269918, 'complete': True, 'processedFiles': 0, 'totalMatches': 0, 'job_id': '222'
}
assert output.get('AnomaliEnterprise.ForensicSearch(val.job_id == obj.job_id)', []) == expected_result
hr_ = command_results.to_context().get('HumanReadable', '')
assert hr_ == 'No matches found for the given job ID: 222.'
def test_get_search_job_result_command_expired_job_id(mocker):
"""
Given:
- a job_id
When:
- mocking the server response for an expired job id, running get_search_job_result
Then:
- validating the raised error
"""
client = Client(server_url='test', username='test', password='1234', verify=True, proxy=False)
return_data = {
'error': 'Error: Cannot find the jobId: job222'
}
mocker.patch.object(client, 'get_search_job_result_request', return_value=return_data)
with pytest.raises(Exception, match="Error: Cannot find the jobId: job222. Job ID might have expired."):
get_search_job_result(client, args={'job_id': 'job222'})
| 39.081851
| 120
| 0.606265
| 1,246
| 10,982
| 5.157303
| 0.128411
| 0.010271
| 0.010271
| 0.044818
| 0.84267
| 0.82991
| 0.81606
| 0.759882
| 0.74821
| 0.74821
| 0
| 0.036701
| 0.238299
| 10,982
| 280
| 121
| 39.221429
| 0.7315
| 0.159443
| 0
| 0.512346
| 0
| 0
| 0.354115
| 0.06885
| 0
| 0
| 0
| 0
| 0.067901
| 1
| 0.049383
| false
| 0.049383
| 0.012346
| 0
| 0.061728
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b938e38576fe7998cefc0e2d9b43b03482037de6
| 25,426
|
py
|
Python
|
tests/test_relations_hyperlink.py
|
loic/django-rest-framework
|
c5a2d501e56201c437118aa65004b33d20216b55
|
[
"Unlicense"
] | null | null | null |
tests/test_relations_hyperlink.py
|
loic/django-rest-framework
|
c5a2d501e56201c437118aa65004b33d20216b55
|
[
"Unlicense"
] | null | null | null |
tests/test_relations_hyperlink.py
|
loic/django-rest-framework
|
c5a2d501e56201c437118aa65004b33d20216b55
|
[
"Unlicense"
] | null | null | null |
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from django.test import TestCase
from rest_framework import serializers
from rest_framework.test import APIRequestFactory
from tests.models import (
ManyToManyTarget, ManyToManySource, ForeignKeyTarget, ForeignKeySource,
NullableForeignKeySource, OneToOneTarget, NullableOneToOneSource
)
factory = APIRequestFactory()
request = factory.get('/') # Just to ensure we have a request in the serializer context
dummy_view = lambda request, pk: None
urlpatterns = patterns(
'',
url(r'^dummyurl/(?P<pk>[0-9]+)/$', dummy_view, name='dummy-url'),
url(r'^manytomanysource/(?P<pk>[0-9]+)/$', dummy_view, name='manytomanysource-detail'),
url(r'^manytomanytarget/(?P<pk>[0-9]+)/$', dummy_view, name='manytomanytarget-detail'),
url(r'^foreignkeysource/(?P<pk>[0-9]+)/$', dummy_view, name='foreignkeysource-detail'),
url(r'^foreignkeytarget/(?P<pk>[0-9]+)/$', dummy_view, name='foreignkeytarget-detail'),
url(r'^nullableforeignkeysource/(?P<pk>[0-9]+)/$', dummy_view, name='nullableforeignkeysource-detail'),
url(r'^onetoonetarget/(?P<pk>[0-9]+)/$', dummy_view, name='onetoonetarget-detail'),
url(r'^nullableonetoonesource/(?P<pk>[0-9]+)/$', dummy_view, name='nullableonetoonesource-detail'),
)
# ManyToMany
class ManyToManyTargetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ManyToManyTarget
fields = ('url', 'name', 'sources')
class ManyToManySourceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ManyToManySource
fields = ('url', 'name', 'targets')
# ForeignKey
class ForeignKeyTargetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ForeignKeyTarget
fields = ('url', 'name', 'sources')
class ForeignKeySourceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ForeignKeySource
fields = ('url', 'name', 'target')
# Nullable ForeignKey
class NullableForeignKeySourceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = NullableForeignKeySource
fields = ('url', 'name', 'target')
# Nullable OneToOne
class NullableOneToOneTargetSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = OneToOneTarget
fields = ('url', 'name', 'nullable_source')
# TODO: Add test that .data cannot be accessed prior to .is_valid
class HyperlinkedManyToManyTests(TestCase):
urls = 'tests.test_relations_hyperlink'
def setUp(self):
for idx in range(1, 4):
target = ManyToManyTarget(name='target-%d' % idx)
target.save()
source = ManyToManySource(name='source-%d' % idx)
source.save()
for target in ManyToManyTarget.objects.all():
source.targets.add(target)
def test_many_to_many_retrieve(self):
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanysource/1/', 'name': 'source-1', 'targets': ['http://testserver/manytomanytarget/1/']},
{'url': 'http://testserver/manytomanysource/2/', 'name': 'source-2', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/']},
{'url': 'http://testserver/manytomanysource/3/', 'name': 'source-3', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']}
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_retrieve(self):
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/manytomanysource/1/', 'http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/manytomanysource/3/']}
]
self.assertEqual(serializer.data, expected)
def test_many_to_many_update(self):
data = {'url': 'http://testserver/manytomanysource/1/', 'name': 'source-1', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']}
instance = ManyToManySource.objects.get(pk=1)
serializer = ManyToManySourceSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanysource/1/', 'name': 'source-1', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']},
{'url': 'http://testserver/manytomanysource/2/', 'name': 'source-2', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/']},
{'url': 'http://testserver/manytomanysource/3/', 'name': 'source-3', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']}
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_update(self):
data = {'url': 'http://testserver/manytomanytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/manytomanysource/1/']}
instance = ManyToManyTarget.objects.get(pk=1)
serializer = ManyToManyTargetSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure target 1 is updated, and everything else is as expected
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/manytomanysource/1/']},
{'url': 'http://testserver/manytomanytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/manytomanysource/3/']}
]
self.assertEqual(serializer.data, expected)
def test_many_to_many_create(self):
data = {'url': 'http://testserver/manytomanysource/4/', 'name': 'source-4', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/3/']}
serializer = ManyToManySourceSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is added, and everything else is as expected
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanysource/1/', 'name': 'source-1', 'targets': ['http://testserver/manytomanytarget/1/']},
{'url': 'http://testserver/manytomanysource/2/', 'name': 'source-2', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/']},
{'url': 'http://testserver/manytomanysource/3/', 'name': 'source-3', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/2/', 'http://testserver/manytomanytarget/3/']},
{'url': 'http://testserver/manytomanysource/4/', 'name': 'source-4', 'targets': ['http://testserver/manytomanytarget/1/', 'http://testserver/manytomanytarget/3/']}
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_create(self):
data = {'url': 'http://testserver/manytomanytarget/4/', 'name': 'target-4', 'sources': ['http://testserver/manytomanysource/1/', 'http://testserver/manytomanysource/3/']}
serializer = ManyToManyTargetSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-4')
# Ensure target 4 is added, and everything else is as expected
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/manytomanytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/manytomanysource/1/', 'http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/manytomanysource/2/', 'http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/manytomanysource/3/']},
{'url': 'http://testserver/manytomanytarget/4/', 'name': 'target-4', 'sources': ['http://testserver/manytomanysource/1/', 'http://testserver/manytomanysource/3/']}
]
self.assertEqual(serializer.data, expected)
class HyperlinkedForeignKeyTests(TestCase):
urls = 'tests.test_relations_hyperlink'
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve(self):
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/3/', 'name': 'source-3', 'target': 'http://testserver/foreignkeytarget/1/'}
]
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/2/', 'http://testserver/foreignkeysource/3/']},
{'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': []},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update(self):
data = {'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/2/'}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/2/'},
{'url': 'http://testserver/foreignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/3/', 'name': 'source-3', 'target': 'http://testserver/foreignkeytarget/1/'}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_incorrect_type(self):
data = {'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 2}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['Incorrect type. Expected URL string, received int.']})
def test_reverse_foreign_key_update(self):
data = {'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/3/']}
instance = ForeignKeyTarget.objects.get(pk=2)
serializer = ForeignKeyTargetSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
# We shouldn't have saved anything to the db yet since save
# hasn't been called.
queryset = ForeignKeyTarget.objects.all()
new_serializer = ForeignKeyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/2/', 'http://testserver/foreignkeysource/3/']},
{'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': []},
]
self.assertEqual(new_serializer.data, expected)
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure target 2 is update, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/foreignkeysource/2/']},
{'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/3/']},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create(self):
data = {'url': 'http://testserver/foreignkeysource/4/', 'name': 'source-4', 'target': 'http://testserver/foreignkeytarget/2/'}
serializer = ForeignKeySourceSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 1 is updated, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/3/', 'name': 'source-3', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/foreignkeysource/4/', 'name': 'source-4', 'target': 'http://testserver/foreignkeytarget/2/'},
]
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_create(self):
data = {'url': 'http://testserver/foreignkeytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/3/']}
serializer = ForeignKeyTargetSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-3')
# Ensure target 4 is added, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/foreignkeytarget/1/', 'name': 'target-1', 'sources': ['http://testserver/foreignkeysource/2/']},
{'url': 'http://testserver/foreignkeytarget/2/', 'name': 'target-2', 'sources': []},
{'url': 'http://testserver/foreignkeytarget/3/', 'name': 'target-3', 'sources': ['http://testserver/foreignkeysource/1/', 'http://testserver/foreignkeysource/3/']},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_invalid_null(self):
data = {'url': 'http://testserver/foreignkeysource/1/', 'name': 'source-1', 'target': None}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['This field may not be null.']})
class HyperlinkedNullableForeignKeyTests(TestCase):
urls = 'tests.test_relations_hyperlink'
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
for idx in range(1, 4):
if idx == 3:
target = None
source = NullableForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve_with_null(self):
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create_with_valid_null(self):
data = {'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
{'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': ''}
expected_data = {'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, expected_data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
{'url': 'http://testserver/nullableforeignkeysource/4/', 'name': 'source-4', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_valid_null(self):
data = {'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': None},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': ''}
expected_data = {'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data, context={'request': request})
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, expected_data)
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/nullableforeignkeysource/1/', 'name': 'source-1', 'target': None},
{'url': 'http://testserver/nullableforeignkeysource/2/', 'name': 'source-2', 'target': 'http://testserver/foreignkeytarget/1/'},
{'url': 'http://testserver/nullableforeignkeysource/3/', 'name': 'source-3', 'target': None},
]
self.assertEqual(serializer.data, expected)
class HyperlinkedNullableOneToOneTests(TestCase):
urls = 'tests.test_relations_hyperlink'
def setUp(self):
target = OneToOneTarget(name='target-1')
target.save()
new_target = OneToOneTarget(name='target-2')
new_target.save()
source = NullableOneToOneSource(name='source-1', target=target)
source.save()
def test_reverse_foreign_key_retrieve_with_null(self):
queryset = OneToOneTarget.objects.all()
serializer = NullableOneToOneTargetSerializer(queryset, many=True, context={'request': request})
expected = [
{'url': 'http://testserver/onetoonetarget/1/', 'name': 'target-1', 'nullable_source': 'http://testserver/nullableonetoonesource/1/'},
{'url': 'http://testserver/onetoonetarget/2/', 'name': 'target-2', 'nullable_source': None},
]
self.assertEqual(serializer.data, expected)
| 58.585253
| 219
| 0.661331
| 2,498
| 25,426
| 6.672938
| 0.061649
| 0.133541
| 0.075469
| 0.052193
| 0.85722
| 0.828124
| 0.816966
| 0.801788
| 0.795669
| 0.782171
| 0
| 0.013755
| 0.17368
| 25,426
| 433
| 220
| 58.720554
| 0.779629
| 0.045937
| 0
| 0.594752
| 0
| 0
| 0.363873
| 0.023536
| 0
| 0
| 0
| 0.002309
| 0.154519
| 1
| 0.069971
| false
| 0
| 0.017493
| 0
| 0.145773
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9b2ee756f748c92bb03985c35eddbe6e2c1ba2d
| 104
|
py
|
Python
|
pasa/__init__.py
|
sonoisa/pasa
|
90dbcd72890bfe390d2a58f2a4cdb79d42a9f9f8
|
[
"MIT"
] | 5
|
2018-07-23T05:45:24.000Z
|
2021-04-04T14:59:15.000Z
|
pasa/__init__.py
|
sonoisa/pasa
|
90dbcd72890bfe390d2a58f2a4cdb79d42a9f9f8
|
[
"MIT"
] | 2
|
2019-01-28T04:33:12.000Z
|
2019-11-20T14:30:27.000Z
|
pasa/__init__.py
|
sonoisa/pasa
|
90dbcd72890bfe390d2a58f2a4cdb79d42a9f9f8
|
[
"MIT"
] | 1
|
2020-02-07T08:09:12.000Z
|
2020-02-07T08:09:12.000Z
|
# -*- coding: utf-8 -*-
from . import init
from . import parse
from . import result
from . import dict
| 14.857143
| 23
| 0.663462
| 15
| 104
| 4.6
| 0.6
| 0.57971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.211538
| 104
| 6
| 24
| 17.333333
| 0.829268
| 0.201923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a3e3118a996ef5c50f54ed54d0642c7d849b9cb
| 11,545
|
py
|
Python
|
hgraph/hgnn.py
|
siboehm/hgraph2graph
|
91d0dc763be1f9bc97e0c2473b0a00a058b4a9f9
|
[
"MIT"
] | null | null | null |
hgraph/hgnn.py
|
siboehm/hgraph2graph
|
91d0dc763be1f9bc97e0c2473b0a00a058b4a9f9
|
[
"MIT"
] | null | null | null |
hgraph/hgnn.py
|
siboehm/hgraph2graph
|
91d0dc763be1f9bc97e0c2473b0a00a058b4a9f9
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import rdkit.Chem as Chem
import torch.nn.functional as F
from hgraph.mol_graph import MolGraph
from hgraph.encoder import HierMPNEncoder
from hgraph.decoder import HierMPNDecoder
from hgraph.nnutils import *
def make_cuda(tensors):
tree_tensors, graph_tensors = tensors
make_tensor = lambda x: x if type(x) is torch.Tensor else torch.tensor(x)
tree_tensors = [make_tensor(x).cuda().long() for x in tree_tensors[:-1]] + [tree_tensors[-1]]
graph_tensors = [make_tensor(x).cuda().long() for x in graph_tensors[:-1]] + [
graph_tensors[-1]
]
return tree_tensors, graph_tensors
class HierVAE(nn.Module):
def __init__(self, args):
super(HierVAE, self).__init__()
self.encoder = HierMPNEncoder(
args.vocab,
args.atom_vocab,
args.rnn_type,
args.embed_size,
args.hidden_size,
args.depthT,
args.depthG,
args.dropout,
)
self.decoder = HierMPNDecoder(
args.vocab,
args.atom_vocab,
args.rnn_type,
args.embed_size,
args.hidden_size,
args.latent_size,
args.diterT,
args.diterG,
args.dropout,
)
self.encoder.tie_embedding(self.decoder.hmpn)
self.latent_size = args.latent_size
self.R_mean = nn.Linear(args.hidden_size, args.latent_size)
self.R_var = nn.Linear(args.hidden_size, args.latent_size)
def rsample(self, z_vecs, W_mean, W_var, perturb=True):
batch_size = z_vecs.size(0)
z_mean = W_mean(z_vecs)
z_log_var = -torch.abs(W_var(z_vecs))
kl_loss = (
-0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
)
epsilon = torch.randn_like(z_mean).cuda()
z_vecs = z_mean + torch.exp(z_log_var / 2) * epsilon if perturb else z_mean
return z_vecs, kl_loss
def sample(self, batch_size, greedy):
root_vecs = torch.randn(batch_size, self.latent_size).cuda()
return self.decoder.decode(
(root_vecs, root_vecs, root_vecs), greedy=greedy, max_decode_step=150
)
def reconstruct(self, batch):
graphs, tensors, _ = batch
tree_tensors, graph_tensors = tensors = make_cuda(tensors)
root_vecs, tree_vecs, _, graph_vecs = self.encoder(tree_tensors, graph_tensors)
root_vecs, root_kl = self.rsample(root_vecs, self.R_mean, self.R_var, perturb=False)
return self.decoder.decode(
(root_vecs, root_vecs, root_vecs), greedy=True, max_decode_step=150
)
def forward(self, graphs, tensors, orders, beta, perturb_z=True):
tree_tensors, graph_tensors = tensors = make_cuda(tensors)
root_vecs, tree_vecs, _, graph_vecs = self.encoder(tree_tensors, graph_tensors)
root_vecs, root_kl = self.rsample(root_vecs, self.R_mean, self.R_var, perturb_z)
kl_div = root_kl
loss, wacc, iacc, tacc, sacc = self.decoder(
(root_vecs, root_vecs, root_vecs), graphs, tensors, orders
)
return loss + beta * kl_div, kl_div.item(), wacc, iacc, tacc, sacc
class HierVGNN(nn.Module):
def __init__(self, args):
super(HierVGNN, self).__init__()
self.latent_size = args.latent_size
self.encoder = HierMPNEncoder(
args.vocab,
args.atom_vocab,
args.rnn_type,
args.embed_size,
args.hidden_size,
args.depthT,
args.depthG,
args.dropout,
)
self.decoder = HierMPNDecoder(
args.vocab,
args.atom_vocab,
args.rnn_type,
args.embed_size,
args.hidden_size,
args.hidden_size,
args.diterT,
args.diterG,
args.dropout,
attention=True,
)
self.encoder.tie_embedding(self.decoder.hmpn)
self.T_mean = nn.Linear(args.hidden_size, args.latent_size)
self.T_var = nn.Linear(args.hidden_size, args.latent_size)
self.G_mean = nn.Linear(args.hidden_size, args.latent_size)
self.G_var = nn.Linear(args.hidden_size, args.latent_size)
self.W_tree = nn.Sequential(
nn.Linear(args.hidden_size + args.latent_size, args.hidden_size), nn.ReLU()
)
self.W_graph = nn.Sequential(
nn.Linear(args.hidden_size + args.latent_size, args.hidden_size), nn.ReLU()
)
def encode(self, tensors):
tree_tensors, graph_tensors = tensors
root_vecs, tree_vecs, _, graph_vecs = self.encoder(tree_tensors, graph_tensors)
tree_vecs = stack_pad_tensor([tree_vecs[st : st + le] for st, le in tree_tensors[-1]])
graph_vecs = stack_pad_tensor([graph_vecs[st : st + le] for st, le in graph_tensors[-1]])
return root_vecs, tree_vecs, graph_vecs
def translate(self, tensors, num_decode, enum_root, greedy=True):
tensors = make_cuda(tensors)
root_vecs, tree_vecs, graph_vecs = self.encode(tensors)
all_smiles = []
if enum_root:
repeat = num_decode // len(root_vecs)
modulo = num_decode % len(root_vecs)
root_vecs = torch.cat([root_vecs] * repeat + [root_vecs[:modulo]], dim=0)
tree_vecs = torch.cat([tree_vecs] * repeat + [tree_vecs[:modulo]], dim=0)
graph_vecs = torch.cat([graph_vecs] * repeat + [graph_vecs[:modulo]], dim=0)
batch_size = len(root_vecs)
z_tree = (
torch.randn(batch_size, 1, self.latent_size).expand(-1, tree_vecs.size(1), -1).cuda()
)
z_graph = (
torch.randn(batch_size, 1, self.latent_size).expand(-1, graph_vecs.size(1), -1).cuda()
)
z_tree_vecs = self.W_tree(torch.cat([tree_vecs, z_tree], dim=-1))
z_graph_vecs = self.W_graph(torch.cat([graph_vecs, z_graph], dim=-1))
return self.decoder.decode((root_vecs, z_tree_vecs, z_graph_vecs), greedy=greedy)
def rsample(self, z_vecs, W_mean, W_var):
batch_size = z_vecs.size(0)
z_mean = W_mean(z_vecs)
z_log_var = -torch.abs(W_var(z_vecs))
kl_loss = (
-0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
)
epsilon = torch.randn_like(z_mean).cuda()
z_vecs = z_mean + torch.exp(z_log_var / 2) * epsilon
return z_vecs, kl_loss
def forward(self, x_graphs, x_tensors, y_graphs, y_tensors, y_orders, beta):
x_tensors = make_cuda(x_tensors)
y_tensors = make_cuda(y_tensors)
x_root_vecs, x_tree_vecs, x_graph_vecs = self.encode(x_tensors)
_, y_tree_vecs, y_graph_vecs = self.encode(y_tensors)
diff_tree_vecs = y_tree_vecs.sum(dim=1) - x_tree_vecs.sum(dim=1)
diff_graph_vecs = y_graph_vecs.sum(dim=1) - x_graph_vecs.sum(dim=1)
diff_tree_vecs, tree_kl = self.rsample(diff_tree_vecs, self.T_mean, self.T_var)
diff_graph_vecs, graph_kl = self.rsample(diff_graph_vecs, self.G_mean, self.G_var)
kl_div = tree_kl + graph_kl
diff_tree_vecs = diff_tree_vecs.unsqueeze(1).expand(-1, x_tree_vecs.size(1), -1)
diff_graph_vecs = diff_graph_vecs.unsqueeze(1).expand(-1, x_graph_vecs.size(1), -1)
x_tree_vecs = self.W_tree(torch.cat([x_tree_vecs, diff_tree_vecs], dim=-1))
x_graph_vecs = self.W_graph(torch.cat([x_graph_vecs, diff_graph_vecs], dim=-1))
loss, wacc, iacc, tacc, sacc = self.decoder(
(x_root_vecs, x_tree_vecs, x_graph_vecs), y_graphs, y_tensors, y_orders
)
return loss + beta * kl_div, kl_div.item(), wacc, iacc, tacc, sacc
class HierCondVGNN(HierVGNN):
def __init__(self, args):
super(HierCondVGNN, self).__init__(args)
self.W_tree = nn.Sequential(
nn.Linear(args.hidden_size + args.latent_size + args.cond_size, args.hidden_size),
nn.ReLU(),
)
self.W_graph = nn.Sequential(
nn.Linear(args.hidden_size + args.latent_size + args.cond_size, args.hidden_size),
nn.ReLU(),
)
self.U_tree = nn.Sequential(
nn.Linear(args.hidden_size + args.cond_size, args.hidden_size), nn.ReLU()
)
self.U_graph = nn.Sequential(
nn.Linear(args.hidden_size + args.cond_size, args.hidden_size), nn.ReLU()
)
def translate(self, tensors, cond, num_decode, enum_root):
assert enum_root
tensors = make_cuda(tensors)
root_vecs, tree_vecs, graph_vecs = self.encode(tensors)
cond = cond.view(1, 1, -1)
tree_cond = cond.expand(num_decode, tree_vecs.size(1), -1)
graph_cond = cond.expand(num_decode, graph_vecs.size(1), -1)
if enum_root:
repeat = num_decode // len(root_vecs)
modulo = num_decode % len(root_vecs)
root_vecs = torch.cat([root_vecs] * repeat + [root_vecs[:modulo]], dim=0)
tree_vecs = torch.cat([tree_vecs] * repeat + [tree_vecs[:modulo]], dim=0)
graph_vecs = torch.cat([graph_vecs] * repeat + [graph_vecs[:modulo]], dim=0)
z_tree = (
torch.randn(num_decode, 1, self.latent_size).expand(-1, tree_vecs.size(1), -1).cuda()
)
z_graph = (
torch.randn(num_decode, 1, self.latent_size).expand(-1, graph_vecs.size(1), -1).cuda()
)
z_tree_vecs = self.W_tree(torch.cat([tree_vecs, z_tree, tree_cond], dim=-1))
z_graph_vecs = self.W_graph(torch.cat([graph_vecs, z_graph, graph_cond], dim=-1))
return self.decoder.decode((root_vecs, z_tree_vecs, z_graph_vecs))
def forward(self, x_graphs, x_tensors, y_graphs, y_tensors, y_orders, cond, beta):
x_tensors = make_cuda(x_tensors)
y_tensors = make_cuda(y_tensors)
cond = torch.tensor(cond).float().cuda()
x_root_vecs, x_tree_vecs, x_graph_vecs = self.encode(x_tensors)
_, y_tree_vecs, y_graph_vecs = self.encode(y_tensors)
diff_tree_vecs = y_tree_vecs.sum(dim=1) - x_tree_vecs.sum(dim=1)
diff_graph_vecs = y_graph_vecs.sum(dim=1) - x_graph_vecs.sum(dim=1)
diff_tree_vecs = self.U_tree(
torch.cat([diff_tree_vecs, cond], dim=-1)
) # combine condition for posterior
diff_graph_vecs = self.U_graph(
torch.cat([diff_graph_vecs, cond], dim=-1)
) # combine condition for posterior
diff_tree_vecs, tree_kl = self.rsample(diff_tree_vecs, self.T_mean, self.T_var)
diff_graph_vecs, graph_kl = self.rsample(diff_graph_vecs, self.G_mean, self.G_var)
kl_div = tree_kl + graph_kl
diff_tree_vecs = torch.cat(
[diff_tree_vecs, cond], dim=-1
) # combine condition for posterior
diff_graph_vecs = torch.cat(
[diff_graph_vecs, cond], dim=-1
) # combine condition for posterior
diff_tree_vecs = diff_tree_vecs.unsqueeze(1).expand(-1, x_tree_vecs.size(1), -1)
diff_graph_vecs = diff_graph_vecs.unsqueeze(1).expand(-1, x_graph_vecs.size(1), -1)
x_tree_vecs = self.W_tree(torch.cat([x_tree_vecs, diff_tree_vecs], dim=-1))
x_graph_vecs = self.W_graph(torch.cat([x_graph_vecs, diff_graph_vecs], dim=-1))
loss, wacc, iacc, tacc, sacc = self.decoder(
(x_root_vecs, x_tree_vecs, x_graph_vecs), y_graphs, y_tensors, y_orders
)
return loss + beta * kl_div, kl_div.item(), wacc, iacc, tacc, sacc
| 41.82971
| 99
| 0.629104
| 1,675
| 11,545
| 4.01791
| 0.079403
| 0.065379
| 0.047845
| 0.045468
| 0.847103
| 0.826449
| 0.802823
| 0.769391
| 0.751263
| 0.7263
| 0
| 0.010355
| 0.255522
| 11,545
| 275
| 100
| 41.981818
| 0.772659
| 0.011
| 0
| 0.561181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004219
| 1
| 0.059072
| false
| 0
| 0.033755
| 0
| 0.151899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbe5488b0c9dd7e0247e4e990ce64e39938abad2
| 8,031
|
py
|
Python
|
polyaxon_cli/cli/bookmark.py
|
vfdev-5/polyaxon-cli
|
9232c3b614d3025b9e31c79fbe632cd35fcfcc64
|
[
"MIT"
] | null | null | null |
polyaxon_cli/cli/bookmark.py
|
vfdev-5/polyaxon-cli
|
9232c3b614d3025b9e31c79fbe632cd35fcfcc64
|
[
"MIT"
] | null | null | null |
polyaxon_cli/cli/bookmark.py
|
vfdev-5/polyaxon-cli
|
9232c3b614d3025b9e31c79fbe632cd35fcfcc64
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
import click
from polyaxon_cli.cli.getters.user import get_username_or_local
from polyaxon_cli.client import PolyaxonClient
from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError
from polyaxon_cli.logger import clean_outputs
from polyaxon_cli.utils.formatting import (
Printer,
dict_tabulate,
get_meta_response,
list_dicts_to_tabulate
)
from polyaxon_client.exceptions import PolyaxonClientException
@click.group()
@click.option('--username', '-u', type=str)
@click.pass_context
@clean_outputs
def bookmark(ctx, username): # pylint:disable=redefined-outer-name
"""Commands for bookmarks."""
ctx.obj = ctx.obj or {}
ctx.obj['username'] = username
@bookmark.command()
@click.option('--page', type=int, help='To paginate through the list of projects.')
@click.pass_context
@clean_outputs
def projects(ctx, page):
"""List bookmarked projects for user.
Uses [Caching](/polyaxon_cli/introduction#Caching)
Examples:
\b
```bash
$ polyaxon bookmark projects
```
\b
```bash
$ polyaxon bookmark -u adam projects
```
"""
user = get_username_or_local(ctx.obj.get('username'))
page = page or 1
try:
response = PolyaxonClient().bookmark.projects(username=user, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error(
'Could not get bookmarked projects for user `{}`.'.format(user))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Bookmarked projects for user `{}`.'.format(user))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No bookmarked projects found for user `{}`.'.format(user))
objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response['results']]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Projects:")
dict_tabulate(objects, is_list_dict=True)
@bookmark.command()
@click.option('--page', type=int, help='To paginate through the list of groups.')
@click.pass_context
@clean_outputs
def groups(ctx, page):
"""List bookmarked experiment groups for user.
Uses [Caching](/polyaxon_cli/introduction#Caching)
Examples:
\b
```bash
$ polyaxon bookmark groups
```
\b
```bash
$ polyaxon bookmark -u adam groups
```
"""
user = get_username_or_local(ctx.obj.get('username'))
page = page or 1
try:
response = PolyaxonClient().bookmark.groups(username=user, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error(
'Could not get bookmarked experiment groups for user `{}`.'.format(user))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Bookmarked experiment groups for user `{}`.'.format(user))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No bookmarked experiment groups found for user `{}`.'.format(user))
objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response['results']]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Experiment groups:")
dict_tabulate(objects, is_list_dict=True)
@bookmark.command()
@click.option('--page', type=int, help='To paginate through the list of experiments.')
@click.pass_context
@clean_outputs
def experiments(ctx, page):
"""List bookmarked experiments for user.
Uses [Caching](/polyaxon_cli/introduction#Caching)
Examples:
\b
```bash
$ polyaxon bookmark experiments
```
\b
```bash
$ polyaxon bookmark -u adam experiments
```
"""
user = get_username_or_local(ctx.obj.get('username'))
page = page or 1
try:
response = PolyaxonClient().bookmark.experiments(username=user, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error(
'Could not get bookmarked experiments for user `{}`.'.format(user))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Bookmarked experiments for user `{}`.'.format(user))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No bookmarked experiments found for user `{}`.'.format(user))
objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response['results']]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Experiments:")
dict_tabulate(objects, is_list_dict=True)
@bookmark.command()
@click.option('--page', type=int, help='To paginate through the list of jobs.')
@click.pass_context
@clean_outputs
def jobs(ctx, page):
"""List bookmarked jobs for user.
Uses [Caching](/polyaxon_cli/introduction#Caching)
Examples:
\b
```bash
$ polyaxon bookmark jobs
```
\b
```bash
$ polyaxon bookmark -u adam jobs
```
"""
user = get_username_or_local(ctx.obj.get('username'))
page = page or 1
try:
response = PolyaxonClient().bookmark.jobs(username=user, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error(
'Could not get bookmarked jobs for user `{}`.'.format(user))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Bookmarked jobs for user `{}`.'.format(user))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No bookmarked jobs found for user `{}`.'.format(user))
objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response['results']]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Jobs:")
dict_tabulate(objects, is_list_dict=True)
@bookmark.command()
@click.option('--page', type=int, help='To paginate through the list of builds.')
@click.pass_context
@clean_outputs
def builds(ctx, page):
"""List bookmarked builds for user.
Uses [Caching](/polyaxon_cli/introduction#Caching)
Examples:
\b
```bash
$ polyaxon bookmark builds
```
\b
```bash
$ polyaxon bookmark -u adam builds
```
"""
user = get_username_or_local(ctx.obj.get('username'))
page = page or 1
try:
response = PolyaxonClient().bookmark.builds(username=user, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error(
'Could not get bookmarked builds for user `{}`.'.format(user))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Bookmarked builds for user `{}`.'.format(user))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No bookmarked builds found for user `{}`.'.format(user))
objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response['results']]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Builds:")
dict_tabulate(objects, is_list_dict=True)
| 29.855019
| 97
| 0.671772
| 953
| 8,031
| 5.499475
| 0.114376
| 0.068689
| 0.068689
| 0.048655
| 0.815112
| 0.810151
| 0.749857
| 0.74356
| 0.732494
| 0.726388
| 0
| 0.001727
| 0.206824
| 8,031
| 268
| 98
| 29.966418
| 0.821036
| 0.130494
| 0
| 0.653846
| 0
| 0
| 0.174062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0.038462
| 0.057692
| 0
| 0.096154
| 0.198718
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbec1484508cf913e48b6ca0d59283ded58ac0f0
| 12,219
|
py
|
Python
|
homeassistant/components/zwave/discovery_schemas.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-01-21T18:09:09.000Z
|
2022-01-17T08:06:03.000Z
|
homeassistant/components/zwave/discovery_schemas.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 39
|
2016-12-16T12:40:34.000Z
|
2017-02-13T17:53:42.000Z
|
homeassistant/components/zwave/discovery_schemas.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-03-03T18:14:10.000Z
|
2020-10-04T06:52:45.000Z
|
"""Z-Wave discovery schemas."""
from . import const
DEFAULT_VALUES_SCHEMA = {
"power": {
const.DISC_SCHEMAS: [
{
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_POWER],
},
{
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_METER],
const.DISC_INDEX: [const.INDEX_METER_POWER],
},
],
const.DISC_OPTIONAL: True,
}
}
DISCOVERY_SCHEMAS = [
{
const.DISC_COMPONENT: "binary_sensor",
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_ENTRY_CONTROL,
const.GENERIC_TYPE_SENSOR_ALARM,
const.GENERIC_TYPE_SENSOR_BINARY,
const.GENERIC_TYPE_SWITCH_BINARY,
const.GENERIC_TYPE_METER,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SENSOR_NOTIFICATION,
const.GENERIC_TYPE_THERMOSTAT,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_BINARY],
const.DISC_TYPE: const.TYPE_BOOL,
const.DISC_GENRE: const.GENRE_USER,
},
"off_delay": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [9],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "climate",
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_THERMOSTAT,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT]
},
"temperature": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_TEMPERATURE],
const.DISC_OPTIONAL: True,
},
"mode": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_MODE],
const.DISC_OPTIONAL: True,
},
"fan_mode": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_FAN_MODE],
const.DISC_OPTIONAL: True,
},
"operating_state": {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_THERMOSTAT_OPERATING_STATE
],
const.DISC_OPTIONAL: True,
},
"fan_action": {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_THERMOSTAT_FAN_ACTION
],
const.DISC_OPTIONAL: True,
},
"zxt_120_swing_mode": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [33],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "cover", # Rollershutter
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL,
],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_GENRE: const.GENRE_USER,
},
"open": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_BRIGHT],
const.DISC_OPTIONAL: True,
},
"close": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_DIM],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "cover", # Garage Door Switch
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL,
],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY],
const.DISC_GENRE: const.GENRE_USER,
}
},
),
},
{
const.DISC_COMPONENT: "cover", # Garage Door Barrier
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL,
],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_BARRIER_OPERATOR],
const.DISC_INDEX: [const.INDEX_BARRIER_OPERATOR_LABEL],
}
},
),
},
{
const.DISC_COMPONENT: "fan",
const.DISC_GENERIC_DEVICE_CLASS: [const.GENERIC_TYPE_SWITCH_MULTILEVEL],
const.DISC_SPECIFIC_DEVICE_CLASS: [const.SPECIFIC_TYPE_FAN_SWITCH],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_LEVEL],
const.DISC_TYPE: const.TYPE_BYTE,
}
},
),
},
{
const.DISC_COMPONENT: "light",
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_REMOTE,
],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_POWER_SWITCH_MULTILEVEL,
const.SPECIFIC_TYPE_SCENE_SWITCH_MULTILEVEL,
const.SPECIFIC_TYPE_NOT_USED,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_LEVEL],
const.DISC_TYPE: const.TYPE_BYTE,
},
"dimming_duration": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_DURATION],
const.DISC_OPTIONAL: True,
},
"color": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_COLOR],
const.DISC_INDEX: [const.INDEX_SWITCH_COLOR_COLOR],
const.DISC_OPTIONAL: True,
},
"color_channels": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_COLOR],
const.DISC_INDEX: [const.INDEX_SWITCH_COLOR_CHANNELS],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "lock",
const.DISC_GENERIC_DEVICE_CLASS: [const.GENERIC_TYPE_ENTRY_CONTROL],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_DOOR_LOCK,
const.SPECIFIC_TYPE_ADVANCED_DOOR_LOCK,
const.SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK,
const.SPECIFIC_TYPE_SECURE_LOCKBOX,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_DOOR_LOCK],
const.DISC_INDEX: [const.INDEX_DOOR_LOCK_LOCK],
},
"access_control": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_ACCESS_CONTROL],
const.DISC_OPTIONAL: True,
},
"alarm_type": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_TYPE],
const.DISC_OPTIONAL: True,
},
"alarm_level": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_LEVEL],
const.DISC_OPTIONAL: True,
},
"v2btze_advanced": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [12],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "sensor",
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
const.COMMAND_CLASS_ALARM,
const.COMMAND_CLASS_SENSOR_ALARM,
const.COMMAND_CLASS_INDICATOR,
],
const.DISC_GENRE: const.GENRE_USER,
}
},
),
},
{
const.DISC_COMPONENT: "switch",
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_METER,
const.GENERIC_TYPE_SENSOR_ALARM,
const.GENERIC_TYPE_SENSOR_BINARY,
const.GENERIC_TYPE_SWITCH_BINARY,
const.GENERIC_TYPE_ENTRY_CONTROL,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SENSOR_NOTIFICATION,
const.GENERIC_TYPE_GENERIC_CONTROLLER,
const.GENERIC_TYPE_SWITCH_REMOTE,
const.GENERIC_TYPE_REPEATER_SLAVE,
const.GENERIC_TYPE_THERMOSTAT,
const.GENERIC_TYPE_WALL_CONTROLLER,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY],
const.DISC_TYPE: const.TYPE_BOOL,
const.DISC_GENRE: const.GENRE_USER,
}
},
),
},
]
| 38.790476
| 88
| 0.548245
| 1,105
| 12,219
| 5.561086
| 0.085068
| 0.172823
| 0.088527
| 0.095688
| 0.879089
| 0.813507
| 0.78633
| 0.750041
| 0.703173
| 0.653051
| 0
| 0.00118
| 0.376054
| 12,219
| 314
| 89
| 38.914013
| 0.804827
| 0.006465
| 0
| 0.569132
| 0
| 0
| 0.019205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003215
| 0
| 0.003215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbfdd93c750cfcec4ab1203d229b04930d5e07b0
| 111
|
py
|
Python
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/parse/__init__.py
|
hito0512/Vitis-AI
|
996459fb96cb077ed2f7e789d515893b1cccbc95
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/parse/__init__.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/parse/__init__.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
from .trace_helper import *
from .parser import *
from .node_transformer import *
from .script_helper import *
| 22.2
| 31
| 0.783784
| 15
| 111
| 5.6
| 0.533333
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144144
| 111
| 4
| 32
| 27.75
| 0.884211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e00da7aa608b57094acd1a2b272e291b47817bfb
| 29
|
py
|
Python
|
agds_API/app/__init__.py
|
Maelstro/twitter-semantic-analysis
|
16425590c0644b25cc070c2e9fcf490946b9f9db
|
[
"W3C"
] | null | null | null |
agds_API/app/__init__.py
|
Maelstro/twitter-semantic-analysis
|
16425590c0644b25cc070c2e9fcf490946b9f9db
|
[
"W3C"
] | null | null | null |
agds_API/app/__init__.py
|
Maelstro/twitter-semantic-analysis
|
16425590c0644b25cc070c2e9fcf490946b9f9db
|
[
"W3C"
] | null | null | null |
from text_processing import *
| 29
| 29
| 0.862069
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e010f79371a109bfdbf1eed1e95a7b753b0eeca0
| 8,468
|
py
|
Python
|
tests/test_ddl_compiler.py
|
jferg368/sqlalchemy-redshift
|
5bd6e0dcefbd0a2f3cee0001efc5565ac2395cd2
|
[
"MIT"
] | null | null | null |
tests/test_ddl_compiler.py
|
jferg368/sqlalchemy-redshift
|
5bd6e0dcefbd0a2f3cee0001efc5565ac2395cd2
|
[
"MIT"
] | null | null | null |
tests/test_ddl_compiler.py
|
jferg368/sqlalchemy-redshift
|
5bd6e0dcefbd0a2f3cee0001efc5565ac2395cd2
|
[
"MIT"
] | null | null | null |
import difflib
import pytest
from sqlalchemy import Table, Column, Integer, String, MetaData
from sqlalchemy.exc import CompileError
from sqlalchemy.schema import CreateTable
from sqlalchemy_redshift.dialect import RedshiftDDLCompiler, RedshiftDialect
class TestDDLCompiler(object):
@pytest.fixture
def compiler(self):
compiler = RedshiftDDLCompiler(RedshiftDialect(), None)
return compiler
def _compare_strings(self, expected, actual):
assert expected is not None, "Expected was None"
assert actual is not None, "Actual was None"
a = [(c, c.encode('hex')) if c is not None else None for c in expected]
b = [(c, c.encode('hex')) if c is not None else None for c in actual]
return u"-expected, +actual\n" + u"\n".join(difflib.ndiff(a, b))
def test_create_table_simple(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True),
Column('name', String))
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_table_with_identity(self, compiler):
table = Table(
't1',
MetaData(),
Column('id', Integer, primary_key=True, redshift_identity=[1, 2]),
Column('name', String),
)
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER IDENTITY(1,2) NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_table_with_diststyle(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True),
Column('name', String),
redshift_diststyle="EVEN")
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n) "
u"DISTSTYLE EVEN\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_invalid_diststyle(self, compiler):
table = Table(
't1',
MetaData(),
Column('id', Integer, primary_key=True),
Column('name', String),
redshift_diststyle="NOTEVEN"
)
create_table = CreateTable(table)
with pytest.raises(CompileError):
compiler.process(create_table)
def test_create_table_with_distkey(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True),
Column('name', String),
redshift_distkey="id")
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n) "
u"DISTKEY (id)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_table_with_sortkey(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True),
Column('name', String),
redshift_sortkey="id")
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n) "
u"SORTKEY (id)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_table_with_unicode_sortkey(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True),
Column('name', String),
redshift_sortkey=u"id")
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n) "
u"SORTKEY (id)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_table_with_multiple_sortkeys(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True),
Column('name', String),
redshift_sortkey=["id", "name"])
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n) "
u"SORTKEY (id, name)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_table_all_together(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True),
Column('name', String),
redshift_diststyle="KEY",
redshift_distkey="id",
redshift_sortkey=["id", "name"])
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n) "
u"DISTSTYLE KEY DISTKEY (id) SORTKEY (id, name)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_column_with_sortkey(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True,
redshift_sortkey=True),
Column('name', String)
)
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER SORTKEY NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_column_with_distkey(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True,
redshift_distkey=True),
Column('name', String)
)
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER DISTKEY NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
def test_create_column_with_encoding(self, compiler):
table = Table('t1',
MetaData(),
Column('id', Integer, primary_key=True,
redshift_encode="LZO"),
Column('name', String)
)
create_table = CreateTable(table)
actual = compiler.process(create_table)
expected = (
u"\nCREATE TABLE t1 ("
u"\n\tid INTEGER ENCODE LZO NOT NULL, "
u"\n\tname VARCHAR, "
u"\n\tPRIMARY KEY (id)\n)\n\n"
)
assert expected == actual, self._compare_strings(expected, actual)
| 34.704918
| 79
| 0.527515
| 886
| 8,468
| 4.905192
| 0.092551
| 0.015647
| 0.04694
| 0.060746
| 0.818914
| 0.814772
| 0.814772
| 0.814772
| 0.814772
| 0.814772
| 0
| 0.004999
| 0.362187
| 8,468
| 243
| 80
| 34.847737
| 0.799667
| 0
| 0
| 0.671569
| 0
| 0
| 0.157298
| 0
| 0
| 0
| 0
| 0
| 0.063725
| 1
| 0.068627
| false
| 0
| 0.029412
| 0
| 0.112745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e016265bd6b3ef20b07955f576a3ed6b89e6e085
| 179
|
py
|
Python
|
microfreshener/core/importer/iimporter.py
|
di-unipi-socc/micro-tosca
|
5d5c9361b34eeabaed8955ddc62282607672bd81
|
[
"MIT"
] | null | null | null |
microfreshener/core/importer/iimporter.py
|
di-unipi-socc/micro-tosca
|
5d5c9361b34eeabaed8955ddc62282607672bd81
|
[
"MIT"
] | 3
|
2019-10-02T13:55:39.000Z
|
2021-06-01T22:55:20.000Z
|
microfreshener/core/importer/iimporter.py
|
di-unipi-socc/microFreshener-core
|
5d5c9361b34eeabaed8955ddc62282607672bd81
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from ..model import MicroToscaModel
class Importer(ABC):
@abstractmethod
def Import(self, path:str)->MicroToscaModel:
pass
| 22.375
| 48
| 0.72067
| 20
| 179
| 6.45
| 0.65
| 0.263566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201117
| 179
| 8
| 49
| 22.375
| 0.902098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e01702c547a5af98158f2ccdfc89232e44603f5c
| 140
|
py
|
Python
|
campuscats/campus/admin.py
|
CaptainMorch/CampusCats
|
82c35fcb3c498fb969726c3d4c30efa7aaf985cc
|
[
"MIT"
] | 1
|
2021-09-29T07:26:19.000Z
|
2021-09-29T07:26:19.000Z
|
campuscats/campus/admin.py
|
CaptainMorch/CampusCats
|
82c35fcb3c498fb969726c3d4c30efa7aaf985cc
|
[
"MIT"
] | null | null | null |
campuscats/campus/admin.py
|
CaptainMorch/CampusCats
|
82c35fcb3c498fb969726c3d4c30efa7aaf985cc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Campus, Location
# Register your models here.
admin.site.register((Campus, Location,))
| 28
| 40
| 0.792857
| 19
| 140
| 5.842105
| 0.631579
| 0.252252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 140
| 5
| 40
| 28
| 0.895161
| 0.185714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e03c0e31b5b6e774529fe162c64db5b6d9411dc2
| 80
|
py
|
Python
|
test/test_vis.py
|
quizlet/abracadabra
|
eda599bd02f14b96efdc521f53132d93c9100ede
|
[
"MIT"
] | 24
|
2020-06-12T16:12:32.000Z
|
2021-09-01T12:25:38.000Z
|
test/test_vis.py
|
quizlet/abracadabra
|
eda599bd02f14b96efdc521f53132d93c9100ede
|
[
"MIT"
] | 20
|
2020-06-12T06:26:08.000Z
|
2022-03-12T00:57:51.000Z
|
test/test_vis.py
|
quizlet/abracadabra
|
eda599bd02f14b96efdc521f53132d93c9100ede
|
[
"MIT"
] | 4
|
2020-06-14T12:14:11.000Z
|
2021-05-28T15:36:44.000Z
|
from abra import vis
def test_colors():
assert hasattr(vis.COLORS, 'blue')
| 16
| 38
| 0.7125
| 12
| 80
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 80
| 5
| 38
| 16
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0.049383
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e048cb88abb2c24f0f497aa995457a16647491cb
| 389
|
py
|
Python
|
exercicios/ex115c.py
|
EduardoPessanha/Git-Python
|
87aa10af09510469032732ed2c55d0d65eb4c1d6
|
[
"MIT"
] | null | null | null |
exercicios/ex115c.py
|
EduardoPessanha/Git-Python
|
87aa10af09510469032732ed2c55d0d65eb4c1d6
|
[
"MIT"
] | null | null | null |
exercicios/ex115c.py
|
EduardoPessanha/Git-Python
|
87aa10af09510469032732ed2c55d0d65eb4c1d6
|
[
"MIT"
] | null | null | null |
from utilitarios import titulo
# *********************** Desafio 115c *********************** #
# Finalizando o projeto #
# Vamos finalizar o projeto de acesso a arquivos em Python. #
# ************************************************************ #
titulo('Finalizando o projeto')
# ************************************************************ #
| 43.222222
| 64
| 0.33162
| 23
| 389
| 5.608696
| 0.73913
| 0.186047
| 0.294574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009677
| 0.203085
| 389
| 8
| 65
| 48.625
| 0.406452
| 0.791774
| 0
| 0
| 0
| 0
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0eb9037c8a405ad1bb82c4f5b85e5a70f2001cc7
| 22
|
py
|
Python
|
project/apps/task_queue/implementation/__init__.py
|
expert-m/crazy-bear
|
46b7a907f940116831378e1eff6d01badbdcc975
|
[
"MIT"
] | null | null | null |
project/apps/task_queue/implementation/__init__.py
|
expert-m/crazy-bear
|
46b7a907f940116831378e1eff6d01badbdcc975
|
[
"MIT"
] | 5
|
2021-08-14T03:12:56.000Z
|
2022-02-16T12:37:15.000Z
|
project/apps/task_queue/implementation/__init__.py
|
expert-m/crazy-bear
|
46b7a907f940116831378e1eff6d01badbdcc975
|
[
"MIT"
] | null | null | null |
from .thread import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0ef328fc401a01ebf89010f58f3848fa8b5218b2
| 5,874
|
py
|
Python
|
valid8/validation_lib/types.py
|
smarie/python-validate
|
c8a10ccede1c0782355439b0966f532bf00dfcab
|
[
"BSD-3-Clause"
] | 26
|
2018-01-10T03:44:19.000Z
|
2021-11-28T07:56:31.000Z
|
valid8/validation_lib/types.py
|
smarie/python-validate
|
c8a10ccede1c0782355439b0966f532bf00dfcab
|
[
"BSD-3-Clause"
] | 55
|
2017-11-06T14:45:47.000Z
|
2021-05-12T08:28:11.000Z
|
valid8/validation_lib/types.py
|
smarie/python-valid8
|
c8a10ccede1c0782355439b0966f532bf00dfcab
|
[
"BSD-3-Clause"
] | null | null | null |
from valid8.base import ValidationFailure
class HasWrongType(ValidationFailure, TypeError):
""" Custom ValidationFailure raised by instance_of """
help_msg = "Value should be an instance of {ref_type}"
def __init__(self,
wrong_value,
ref_type,
**kwargs
):
super(HasWrongType, self).__init__(wrong_value=wrong_value, ref_type=ref_type, **kwargs)
def instance_of(*args):
"""
This type validation function can be used in two modes:
* providing two arguments (x, ref_type), it returns `True` if isinstance(x, ref_type) and raises a HasWrongType
error if not. If ref_type is a set of types, any match with one of the included types will do
* providing a single argument (ref_type), this is a function generator. It returns a validation function to check
that `instance_of(x, ref_type)`.
:param args:
:return:
"""
if len(args) == 2:
# Standard mode
value, ref_type = args
if not isinstance(ref_type, set):
# ref_type is a single type
if isinstance(value, ref_type):
return True
else:
raise HasWrongType(wrong_value=value, ref_type=ref_type)
else:
# ref_type is a set
match = False
# test against each of the provided types
for ref in ref_type:
if isinstance(value, ref):
match = True
break
if match:
return True
else:
raise HasWrongType(wrong_value=value, ref_type=ref_type,
help_msg='Value should be an instance of any of {ref_type}')
elif len(args) == 1:
# Function generator mode
ref_type = args[0]
if not isinstance(ref_type, set):
# ref_type is a single type
def instance_of_ref(x):
# noinspection PyTypeHints
if isinstance(x, ref_type):
return True
else:
raise HasWrongType(wrong_value=x, ref_type=ref_type)
else:
# ref_type is a set
def instance_of_ref(x):
_match = False
# test against each of the provided types
for _ref in ref_type:
if isinstance(x, _ref):
_match = True
break
if _match:
return True
else:
raise HasWrongType(wrong_value=x, ref_type=ref_type,
help_msg='Value should be an instance of any of {ref_type}')
instance_of_ref.__name__ = 'instance_of_%s' % ref_type
return instance_of_ref
else:
raise TypeError('instance_of expected 2 (normal) or 1 (function generator) arguments, got ' + str(len(args)))
class IsWrongType(ValidationFailure, TypeError):
""" Custom ValidationFailure raised by subclass_of """
help_msg = 'Value should be a type that is a subclass of {ref_type}'
def __init__(self,
wrong_value,
ref_type,
**kwargs
):
super(IsWrongType, self).__init__(wrong_value=wrong_value, ref_type=ref_type, **kwargs)
def subclass_of(*args):
"""
This type validation function can be used in two modes:
* providing two arguments (c, ref_type), it returns `True` if issubclass(c, ref_type) and raises a IsWrongType
error if not. If ref_type is a set of types, any match with one of the included types will do
* providing a single argument (ref_type), this is a function generator. It returns a validation function to check
that `subclass_of(c, ref_type)`.
:param args:
:return:
"""
if len(args) == 2:
# Standard mode
typ, ref_type = args
if not isinstance(ref_type, set):
# ref_type is a single type
if issubclass(typ, ref_type):
return True
else:
raise IsWrongType(wrong_value=typ, ref_type=ref_type)
else:
# ref_type is a set
match = False
# test against each of the provided types
for ref in ref_type:
if issubclass(typ, ref):
match = True
break
if match:
return True
else:
raise IsWrongType(wrong_value=typ, ref_type=ref_type,
help_msg='Value should be a subclass of any of {ref_type}')
elif len(args) == 1:
# Function generator mode
ref_type = args[0]
if not isinstance(ref_type, set):
def subclass_of_ref(x):
# noinspection PyTypeHints
if issubclass(x, ref_type):
return True
else:
raise IsWrongType(wrong_value=x, ref_type=ref_type)
else:
# ref_type is a set
def subclass_of_ref(x):
_match = False
# test against each of the provided types
for _ref in ref_type:
if issubclass(x, _ref):
_match = True
break
if _match:
return True
else:
raise IsWrongType(wrong_value=x, ref_type=ref_type,
help_msg='Value should be a subclass of any of {ref_type}')
subclass_of_ref.__name__ = 'subclass_of_%s' % ref_type
return subclass_of_ref
else:
raise TypeError('subclass_of expected 2 (normal) or 1 (function generator) arguments, got ' + str(len(args)))
| 36.7125
| 118
| 0.545965
| 699
| 5,874
| 4.380544
| 0.1402
| 0.144024
| 0.032658
| 0.045722
| 0.901372
| 0.839974
| 0.753756
| 0.753103
| 0.743305
| 0.734161
| 0
| 0.00306
| 0.387981
| 5,874
| 159
| 119
| 36.943396
| 0.848679
| 0.23919
| 0
| 0.679612
| 0
| 0
| 0.105456
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07767
| false
| 0
| 0.009709
| 0
| 0.223301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ef6cea666315dd67960efaac5ad382c01a34b3f
| 51
|
py
|
Python
|
src/pytorch_beam_search/__init__.py
|
jarobyte91/pytorch_text_generation
|
ff3b2d6f0f650c5818bba53788229742bdfeafdd
|
[
"MIT"
] | 4
|
2021-11-09T01:38:14.000Z
|
2021-12-17T15:03:20.000Z
|
src/pytorch_beam_search/__init__.py
|
jarobyte91/pytorch_text_generation
|
ff3b2d6f0f650c5818bba53788229742bdfeafdd
|
[
"MIT"
] | null | null | null |
src/pytorch_beam_search/__init__.py
|
jarobyte91/pytorch_text_generation
|
ff3b2d6f0f650c5818bba53788229742bdfeafdd
|
[
"MIT"
] | null | null | null |
from . import autoregressive
from . import seq2seq
| 17
| 28
| 0.803922
| 6
| 51
| 6.833333
| 0.666667
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.156863
| 51
| 2
| 29
| 25.5
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
163df5a096915502a8e5dc85448b0ed5e475c7bb
| 91
|
py
|
Python
|
alttprbot/alttprgen/randomizer/smb3r.py
|
floresmatthew/sahasrahbot
|
a3fcc2aba9cd204331ce612ecf269d8a48a1ebc4
|
[
"MIT"
] | 15
|
2019-10-15T21:35:59.000Z
|
2022-03-31T19:49:39.000Z
|
alttprbot/alttprgen/randomizer/smb3r.py
|
floresmatthew/sahasrahbot
|
a3fcc2aba9cd204331ce612ecf269d8a48a1ebc4
|
[
"MIT"
] | 12
|
2019-10-06T01:33:13.000Z
|
2022-03-10T14:35:16.000Z
|
alttprbot/alttprgen/randomizer/smb3r.py
|
floresmatthew/sahasrahbot
|
a3fcc2aba9cd204331ce612ecf269d8a48a1ebc4
|
[
"MIT"
] | 28
|
2019-11-25T23:49:56.000Z
|
2022-03-10T04:03:31.000Z
|
import random
def roll_smb3r(flags):
return random.randrange(0, 999999999999), flags
| 15.166667
| 51
| 0.758242
| 12
| 91
| 5.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 0.153846
| 91
| 5
| 52
| 18.2
| 0.701299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
1642f450fe8402d4014bbc9fdca0cd22a88fc01f
| 32,064
|
py
|
Python
|
defectsep/check_rules.py
|
gustavorps/rtscheck
|
c3aaa8bc44d43b08f048573c3cb73092a72c88e2
|
[
"MIT"
] | 2
|
2020-07-28T15:35:39.000Z
|
2021-12-19T19:58:27.000Z
|
defectsep/check_rules.py
|
gustavorps/rtscheck
|
c3aaa8bc44d43b08f048573c3cb73092a72c88e2
|
[
"MIT"
] | null | null | null |
defectsep/check_rules.py
|
gustavorps/rtscheck
|
c3aaa8bc44d43b08f048573c3cb73092a72c88e2
|
[
"MIT"
] | 2
|
2021-01-02T03:55:33.000Z
|
2021-03-15T21:33:46.000Z
|
#!/usr/bin/python3
import os
import sys
import time
import subprocess as sub
import re
import shutil
import argparse
import datetime
import collections
import distutils.core
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) # Dir of this script
RUN_FIXED_THEN_BUGGY_TABLE = SCRIPT_DIR + '/tables/defectsep-fixed-buggy-table.tex'
RUN_FIXED_THEN_BUGGY_NUMBERS = SCRIPT_DIR + '/tables/defectsep-fixed-buggy-numbers.tex'
RUN_FIXED_TWICE_TABLE = SCRIPT_DIR + '/tables/defectsep-fixed-version-twice-table.tex'
RUN_FIXED_TWICE_NUMBERS = SCRIPT_DIR + '/tables/defectsep-fixed-version-twice-numbers.tex'
TOOLS = ['clover', 'ekstazi', 'starts']
def parseArgs(argv):
'''
Parse the args of the script.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--all', help='Check all the rules', action='store_true', required=False)
parser.add_argument('--R1', help='Check all rule R1', action='store_true', required=False)
parser.add_argument('--R2', help='Check all rule R2', action='store_true', required=False)
parser.add_argument('--R3', help='Check all rule R2', action='store_true', required=False)
parser.add_argument('--R4', help='Check all rule R4', action='store_true', required=False)
parser.add_argument('--R5', help='Check all rule R5', action='store_true', required=False)
parser.add_argument('--R6', help='Check all rule R6', action='store_true', required=False)
parser.add_argument('--R7', help='Check all rule R7', action='store_true', required=False)
if (len(argv) == 0):
parser.print_help()
exit(1)
opts = parser.parse_args(argv)
return opts
def extractMacroValue(macro, numbers_tex_file):
fr = open(numbers_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if lines[i].startswith('\\DefMacro{' + macro):
value = lines[i].split('{')[2].split('}')[0]
return value
def checkRuleR1(violation_list, tools=TOOLS):
checkRuleR1OnFixedVersion(violation_list, tools=TOOLS)
checkRuleR1OnBuggyVersion(violation_list, tools=TOOLS)
def checkRuleR1OnBuggyVersion(violation_list, tools=TOOLS, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
table_tex_file=RUN_FIXED_THEN_BUGGY_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + 'notoolNumOfFailedTestsExcludeFlaky'
retestall_num_of_failed_tests = extractMacroValue(retestall_macro, numbers_tex_file)
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + 'NumOfFailedTestsExcludeFlaky'
tool_num_of_failed_tests = extractMacroValue(tool_macro, numbers_tex_file)
if int(tool_num_of_failed_tests) < int(retestall_num_of_failed_tests):
reportViolation('R1', tool, example_id, violation_list)
def checkRuleR1OnFixedVersion(violation_list, tools=TOOLS, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
table_tex_file=RUN_FIXED_THEN_BUGGY_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + \
'notoolNumOfFailedTestsFixedExcludeFlaky'
retestall_num_of_failed_tests = extractMacroValue(retestall_macro, numbers_tex_file)
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + \
'NumOfFailedTestsFixedExcludeFlaky'
tool_num_of_failed_tests = extractMacroValue(tool_macro, numbers_tex_file)
if int(tool_num_of_failed_tests) < int(retestall_num_of_failed_tests):
reportViolation('R1', tool, example_id, violation_list)
def checkRuleR2(violation_list, tools=TOOLS):
checkRuleR2OnFixedVersion(violation_list, tools)
checkRuleR2OnBuggyVersion(violation_list, tools)
def checkRuleR2OnFixedVersion(violation_list, tools, numbers_tex_file=RUN_FIXED_TWICE_NUMBERS, \
table_tex_file=RUN_FIXED_TWICE_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + 'notoolonceNumOfRunTests'
num_of_all_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
num_of_run_tests_map = {}
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + 'onceNumOfRunTests'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
num_of_run_tests_map[tool] = tool_num_of_run_tests
if list(num_of_run_tests_map.values()).count(0) == 1 and \
list(num_of_run_tests_map.values()).count(num_of_all_tests) == len(tools) - 1:
for tool in num_of_run_tests_map:
if num_of_run_tests_map[tool] == 0:
reportViolation('R2', tool, example_id, violation_list)
def checkRuleR2OnBuggyVersion(violation_list, tools, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
table_tex_file=RUN_FIXED_THEN_BUGGY_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + 'notoolNumOfRunTestsExcludeFlaky'
num_of_all_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
num_of_run_tests_map = {}
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + 'NumOfRunTestsExcludeFlaky'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
num_of_run_tests_map[tool] = tool_num_of_run_tests
if list(num_of_run_tests_map.values()).count(0) == 1 and \
list(num_of_run_tests_map.values()).count(num_of_all_tests) == len(tools) - 1:
for tool in num_of_run_tests_map:
if num_of_run_tests_map[tool] == 0:
reportViolation('R2', tool, example_id, violation_list)
def checkRuleR3(violation_list, tools=TOOLS, numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
table_tex_file=RUN_FIXED_THEN_BUGGY_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + \
'notoolNumOfRunTestsFixedExcludeFlaky'
num_of_all_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
for tool in tools:
tool_fixed_macro = example_id.replace('-', '') + tool + \
'NumOfRunTestsFixedExcludeFlaky'
tool_buggy_macro = example_id.replace('-', '') + tool + \
'NumOfRunTestsExcludeFlaky'
tool_num_of_run_tests_on_fixed_version = \
int(extractMacroValue(tool_fixed_macro, numbers_tex_file))
tool_num_of_run_tests_on_buggy_version = \
int(extractMacroValue(tool_buggy_macro, numbers_tex_file))
if tool_num_of_run_tests_on_fixed_version == num_of_all_tests and \
tool_num_of_run_tests_on_buggy_version == num_of_all_tests:
reportViolation('R3', tool, example_id, violation_list)
def checkRuleR4(violation_list, tools=TOOLS):
checkRuleR4OnFixedVersion(violation_list, tools)
checkRuleR4OnBuggyVersion(violation_list, tools)
def checkRuleR4OnFixedVersion(violation_list, tools, numbers_tex_file=RUN_FIXED_TWICE_NUMBERS, \
table_tex_file=RUN_FIXED_TWICE_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + 'notoolonceNumOfRunTests'
num_of_all_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
num_of_run_tests_map = {}
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + 'onceNumOfRunTests'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
num_of_run_tests_map[tool] = tool_num_of_run_tests
if list(num_of_run_tests_map.values()).count(0) == len(tools) - 1 and \
list(num_of_run_tests_map.values()).count(num_of_all_tests) == 1:
for tool in num_of_run_tests_map:
if num_of_run_tests_map[tool] != 0:
reportViolation('R4', tool, example_id, violation_list)
def checkRuleR4OnBuggyVersion(violation_list, tools, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
table_tex_file=RUN_FIXED_THEN_BUGGY_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + 'notoolNumOfRunTestsExcludeFlaky'
num_of_all_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
num_of_run_tests_map = {}
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + 'NumOfRunTestsExcludeFlaky'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
num_of_run_tests_map[tool] = tool_num_of_run_tests
if list(num_of_run_tests_map.values()).count(0) == len(tools) - 1 and \
list(num_of_run_tests_map.values()).count(num_of_all_tests) == 1:
for tool in num_of_run_tests_map:
if num_of_run_tests_map[tool] != 0:
reportViolation('R4', tool, example_id, violation_list)
def checkRuleR5(violation_list, tools=TOOLS, numbers_tex_file=RUN_FIXED_TWICE_NUMBERS, \
table_tex_file=RUN_FIXED_TWICE_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
num_of_run_tests_map = {}
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + 'twiceNumOfRunTests'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
if tool_num_of_run_tests != 0:
reportViolation('R5', tool, example_id, violation_list)
def checkRuleR6(violation_list, tools=TOOLS, numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
table_tex_file=RUN_FIXED_THEN_BUGGY_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + \
'notoolNumOfRunTestsFixedExcludeFlaky'
retestall_num_of_run_tests = extractMacroValue(retestall_macro, numbers_tex_file)
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + \
'NumOfRunTestsFixedExcludeFlaky'
tool_num_of_run_tests = extractMacroValue(tool_macro, numbers_tex_file)
if int(tool_num_of_run_tests) != int(retestall_num_of_run_tests):
reportViolation('R6', tool, example_id, violation_list)
def checkRuleR7(violation_list, tools=TOOLS):
checkRuleR7OnFixedVersion(violation_list, tools)
checkRuleR7OnBuggyVersion(violation_list, tools)
def checkRuleR7OnBuggyVersion(violation_list, tools=TOOLS, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
table_tex_file=RUN_FIXED_THEN_BUGGY_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + 'notoolNumOfFailedTestsExcludeFlaky'
retestall_num_of_failed_tests = extractMacroValue(retestall_macro, numbers_tex_file)
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + 'NumOfFailedTestsExcludeFlaky'
tool_num_of_failed_tests = extractMacroValue(tool_macro, numbers_tex_file)
if int(tool_num_of_failed_tests) > int(retestall_num_of_failed_tests):
reportViolation('R7', tool, example_id, violation_list)
def checkRuleR7OnFixedVersion(violation_list, tools=TOOLS, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
table_tex_file=RUN_FIXED_THEN_BUGGY_TABLE):
fr = open(table_tex_file, 'r')
lines = fr.readlines()
fr.close()
for i in range(len(lines)):
if ' & ' in lines[i] and '\\UseMacro{' in lines[i] and lines[i].strip().endswith('\\'):
example_id = lines[i].split(' & ')[0]
retestall_macro = example_id.replace('-', '') + \
'notoolNumOfFailedTestsFixedExcludeFlaky'
retestall_num_of_failed_tests = extractMacroValue(retestall_macro, numbers_tex_file)
for tool in tools:
tool_macro = example_id.replace('-', '') + tool + \
'NumOfFailedTestsFixedExcludeFlaky'
tool_num_of_failed_tests = extractMacroValue(tool_macro, numbers_tex_file)
if int(tool_num_of_failed_tests) > int(retestall_num_of_failed_tests):
reportViolation('R7', tool, example_id, violation_list)
def reportViolation(rule, tool, example_id, violation_list):
# print (rule, tool, example_id)
violation_list.append((rule, tool, example_id))
def groupViolations(violation_list, buggy_numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS, \
fixed_numbers_tex_file=RUN_FIXED_TWICE_NUMBERS):
violations_grouping_dict = collections.OrderedDict({})
for i in range(len(violation_list)):
# each group must have same rule, same tool, and same project
violation = violation_list[i]
rule = violation[0]
if rule not in violations_grouping_dict:
violations_grouping_dict[rule] = collections.OrderedDict({})
tool = violation[1]
if tool not in violations_grouping_dict[rule]:
violations_grouping_dict[rule][tool] = collections.OrderedDict({})
example_id = violation[2]
project = example_id.split('-')[0]
if project not in violations_grouping_dict[rule][tool]:
violations_grouping_dict[rule][tool][project] = []
violations_grouping_dict[rule][tool][project].append(example_id)
#print (violations_grouping_dict)
for rule in violations_grouping_dict:
for tool in violations_grouping_dict[rule]:
for project in violations_grouping_dict[rule][tool]:
if rule == 'R1':
matchGroupingRule2(rule, tool, project, violations_grouping_dict)
matchGroupingRule4(rule, tool, project, violations_grouping_dict)
elif rule == 'R2':
matchGroupingRule6(rule, tool, project, violations_grouping_dict)
matchGroupingRule5(rule, tool, project, violations_grouping_dict)
elif rule == 'R3':
matchGroupingRule1(rule, tool, project, violations_grouping_dict)
elif rule == 'R4':
matchGroupingRule1(rule, tool, project, violations_grouping_dict)
elif rule == 'R5':
matchGroupingRule1_V0EqualsV1(rule, tool, project, violations_grouping_dict)
matchGroupingRule5_V0EqualsV1(rule, tool, project, violations_grouping_dict)
elif rule == 'R6':
matchGroupingRule6(rule, tool, project, violations_grouping_dict)
matchGroupingRule5(rule, tool, project, violations_grouping_dict)
elif rule == 'R7':
matchGroupingRule3(rule, tool, project, violations_grouping_dict)
matchGroupingRule4(rule, tool, project, violations_grouping_dict)
# for rule in violations_grouping_dict:
# for tool in violations_grouping_dict[rule]:
# for project in violations_grouping_dict[rule][tool]:
# for example in violations_grouping_dict[rule][tool][project]:
# print (rule, tool, project, example)
groups_dict = collections.OrderedDict({})
for rule in violations_grouping_dict:
for tool in violations_grouping_dict[rule]:
for project in violations_grouping_dict[rule][tool]:
for example in violations_grouping_dict[rule][tool][project]:
example_id = example[0]
group_name = example[1]
if group_name not in groups_dict:
groups_dict[group_name] = []
groups_dict[group_name].append((example_id, rule, tool, project))
for group_name in groups_dict:
print ('\nGROUP: ' + group_name)
for example in groups_dict[group_name]:
print ('On ' + example[0] + ', ' + example[2] + ' violates ' + example[1])
# GR1: run same as retestall in V1
def matchGroupingRule1(rule, tool, project, violations_grouping_dict, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS):
examples = violations_grouping_dict[rule][tool][project]
for i in range(len(examples)):
example = examples[i]
if type(example) is tuple:
continue
example_id = example
retestall_macro = example_id.replace('-', '') + 'notoolNumOfRunTestsExcludeFlaky'
retestall_num_of_run_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
tool_macro = example_id.replace('-', '') + tool + 'NumOfRunTestsExcludeFlaky'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
if retestall_num_of_run_tests == tool_num_of_run_tests:
examples[i] = (examples[i], rule + '-' + tool + '-' + project + '-GR1')
# GR1: run same number of tests as retestall in V1 when V0 = V1
def matchGroupingRule1_V0EqualsV1(rule, tool, project, violations_grouping_dict, \
numbers_tex_file=RUN_FIXED_TWICE_NUMBERS):
examples = violations_grouping_dict[rule][tool][project]
for i in range(len(examples)):
example = examples[i]
if type(example) is tuple:
continue
example_id = example
retestall_macro = example_id.replace('-', '') + 'notooltwiceNumOfRunTests'
retestall_num_of_run_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
tool_macro = example_id.replace('-', '') + tool + 'twiceNumOfRunTests'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
if retestall_num_of_run_tests == tool_num_of_run_tests:
examples[i] = (examples[i], rule + '-' + tool + '-' + project + '-GR1')
# GR2: fail X less than retestall in V1
def matchGroupingRule2(rule, tool, project, violations_grouping_dict, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS):
if rule != 'R1':
return False
examples = violations_grouping_dict[rule][tool][project]
failed_less_map = collections.OrderedDict({})
for i in range(len(examples)):
example = examples[i]
if type(example) is tuple:
continue
example_id = example
retestall_macro = example_id.replace('-', '') + 'notoolNumOfFailedTestsExcludeFlaky'
retestall_num_of_failed_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
tool_macro = example_id.replace('-', '') + tool + 'NumOfFailedTestsExcludeFlaky'
tool_num_of_failed_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
num_of_less_failed_tests = tool_num_of_failed_tests - retestall_num_of_failed_tests
if num_of_less_failed_tests not in failed_less_map:
failed_less_map[num_of_less_failed_tests] = []
failed_less_map[num_of_less_failed_tests].append(i)
for num_of_less_failed_tests in failed_less_map:
for i in failed_less_map[num_of_less_failed_tests]:
examples[i] = (examples[i], rule + '-' + tool + '-' + project + '-GR2-fail-' + \
str(num_of_less_failed_tests) + '-less')
# GR3: fail X more than retestall in V1
def matchGroupingRule3(rule, tool, project, violations_grouping_dict, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS):
if rule != 'R7':
return False
examples = violations_grouping_dict[rule][tool][project]
failed_more_map = collections.OrderedDict({})
for i in range(len(examples)):
example = examples[i]
if type(example) is tuple:
continue
example_id = example
retestall_macro = example_id.replace('-', '') + 'notoolNumOfFailedTestsExcludeFlaky'
retestall_num_of_failed_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
tool_macro = example_id.replace('-', '') + tool + 'NumOfFailedTestsExcludeFlaky'
tool_num_of_failed_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
num_of_more_failed_tests = tool_num_of_failed_tests - retestall_num_of_failed_tests
if num_of_more_failed_tests not in failed_more_map:
failed_more_map[num_of_more_failed_tests] = []
failed_more_map[num_of_more_failed_tests].append(i)
for num_of_more_failed_tests in failed_more_map:
for i in failed_more_map[num_of_more_failed_tests]:
examples[i] = (examples[i], rule + '-' + tool + '-' + project + '-GR3-fail-' + \
str(num_of_more_failed_tests) + '-more')
# GR4: fail same number of tests in V1
def matchGroupingRule4(rule, tool, project, violations_grouping_dict, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS):
examples = violations_grouping_dict[rule][tool][project]
failed_map = collections.OrderedDict({})
for i in range(len(examples)):
example = examples[i]
if type(example) is tuple:
continue
example_id = example
tool_macro = example_id.replace('-', '') + tool + 'NumOfFailedTestsExcludeFlaky'
tool_num_of_failed_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
if tool_num_of_failed_tests not in failed_map:
failed_map[tool_num_of_failed_tests] = []
failed_map[tool_num_of_failed_tests].append(i)
for tool_num_of_failed_tests in failed_map:
for i in failed_map[tool_num_of_failed_tests]:
examples[i] = (examples[i], rule + '-' + tool + '-' + project + '-GR4-fail-' + \
str(tool_num_of_failed_tests))
# GR5: run same number of tests in V1
def matchGroupingRule5(rule, tool, project, violations_grouping_dict, \
numbers_tex_file=RUN_FIXED_THEN_BUGGY_NUMBERS):
examples = violations_grouping_dict[rule][tool][project]
run_map = collections.OrderedDict({})
for i in range(len(examples)):
example = examples[i]
if type(example) is tuple:
continue
example_id = example
retestall_macro = example_id.replace('-', '') + 'notoolNumOfRunTestsExcludeFlaky'
retestall_num_of_run_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
tool_macro = example_id.replace('-', '') + tool + 'NumOfRunTestsExcludeFlaky'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
tool_run_tests_percentage = tool_num_of_run_tests / retestall_num_of_run_tests
if 0 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.1:
bucket = "0-10%"
elif 0.1 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.2:
bucket = "10-20%"
elif 0.2 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.3:
bucket = "20-30%"
elif 0.3 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.4:
bucket = "30-40%"
elif 0.4 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.5:
bucket = "40-50%"
elif 0.5 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.6:
bucket = "50-60%"
elif 0.6 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.7:
bucket = "60-70%"
elif 0.7 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.8:
bucket = "70-80%"
elif 0.8 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.9:
bucket = "80-90%"
elif 0.9 <= tool_run_tests_percentage and tool_run_tests_percentage < 1.0:
bucket = "90-100%"
if bucket not in run_map:
run_map[bucket] = []
run_map[bucket].append(i)
for bucket in run_map:
for i in run_map[bucket]:
examples[i] = (examples[i], rule + '-' + tool + '-' + project + '-GR5-run-' + \
str(bucket))
# GR5: run same number of tests in V1 when V0 = V1, only apply to R5
def matchGroupingRule5_V0EqualsV1(rule, tool, project, violations_grouping_dict, \
numbers_tex_file=RUN_FIXED_TWICE_NUMBERS):
examples = violations_grouping_dict[rule][tool][project]
run_map = collections.OrderedDict({})
for i in range(len(examples)):
example = examples[i]
if type(example) is tuple:
continue
example_id = example
retestall_macro = example_id.replace('-', '') + 'notoolonceNumOfRunTests'
retestall_num_of_run_tests = int(extractMacroValue(retestall_macro, numbers_tex_file))
tool_macro = example_id.replace('-', '') + tool + 'twiceNumOfRunTests'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
tool_run_tests_percentage = tool_num_of_run_tests / retestall_num_of_run_tests
### 10% span
# if 0 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.1:
# bucket = "0-10%"
# elif 0.1 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.2:
# bucket = "10-20%"
# elif 0.2 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.3:
# bucket = "20-30%"
# elif 0.3 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.4:
# bucket = "30-40%"
# elif 0.4 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.5:
# bucket = "40-50%"
# elif 0.5 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.6:
# bucket = "50-60%"
# elif 0.6 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.7:
# bucket = "60-70%"
# elif 0.7 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.8:
# bucket = "70-80%"
# elif 0.8 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.9:
# bucket = "80-90%"
# elif 0.9 <= tool_run_tests_percentage and tool_run_tests_percentage <= 1.0:
# bucket = "90-100%"
### 20% span
if 0 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.2:
bucket = "0-20%"
elif 0.2 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.4:
bucket = "20-40%"
elif 0.4 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.6:
bucket = "40-60%"
elif 0.6 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.8:
bucket = "60-80%"
elif 0.8 <= tool_run_tests_percentage and tool_run_tests_percentage <= 1.0:
bucket = "80-100%"
### 50% span
# if 0 <= tool_run_tests_percentage and tool_run_tests_percentage < 0.5:
# bucket = "0-50%"
# elif 0.5 <= tool_run_tests_percentage and tool_run_tests_percentage <= 1.0:
# bucket = "50-100%"
if bucket not in run_map:
run_map[bucket] = []
run_map[bucket].append(i)
for bucket in run_map:
for i in run_map[bucket]:
examples[i] = (examples[i], rule + '-' + tool + '-' + project + '-GR5-run-' + \
str(bucket))
# GR6: run same number of tests in V0, only apply to R2, R4, and R6
def matchGroupingRule6(rule, tool, project, violations_grouping_dict, \
numbers_tex_file=RUN_FIXED_TWICE_NUMBERS):
examples = violations_grouping_dict[rule][tool][project]
run_map = collections.OrderedDict({})
for i in range(len(examples)):
example = examples[i]
if type(example) is tuple:
continue
example_id = example
tool_macro = example_id.replace('-', '') + tool + 'onceNumOfRunTests'
tool_num_of_run_tests = int(extractMacroValue(tool_macro, numbers_tex_file))
if tool_num_of_run_tests not in run_map:
run_map[tool_num_of_run_tests] = []
run_map[tool_num_of_run_tests].append(i)
for tool_num_of_run_tests in run_map:
for i in run_map[tool_num_of_run_tests]:
examples[i] = (examples[i], rule + '-' + tool + '-' + project + '-GR6-run-' + \
str(tool_num_of_run_tests))
if __name__ == '__main__':
opts = parseArgs(sys.argv[1:])
violation_list = []
if opts.all:
checkRuleR1(violation_list)
checkRuleR2(violation_list)
checkRuleR3(violation_list)
checkRuleR4(violation_list)
checkRuleR5(violation_list)
checkRuleR6(violation_list)
checkRuleR7(violation_list)
# for v in violation_list:
# print (v)
groupViolations(violation_list)
exit(0)
if opts.R1:
checkRuleR1(violation_list)
if opts.R2:
checkRuleR2(violation_list)
if opts.R3:
checkRuleR3(violation_list)
if opts.R4:
checkRuleR4(violation_list)
if opts.R5:
checkRuleR5(violation_list)
if opts.R6:
checkRuleR6(violation_list)
if opts.R7:
checkRuleR7(violation_list)
| 53.44
| 97
| 0.638754
| 3,898
| 32,064
| 4.906619
| 0.053874
| 0.031894
| 0.027606
| 0.04486
| 0.850779
| 0.816532
| 0.79222
| 0.760535
| 0.719178
| 0.704747
| 0
| 0.016789
| 0.256955
| 32,064
| 599
| 98
| 53.529215
| 0.785981
| 0.064371
| 0
| 0.610039
| 0
| 0
| 0.065782
| 0.035748
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052124
| false
| 0
| 0.019305
| 0
| 0.079151
| 0.005792
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1646fcaf425cf9303b58bc9f34d153f983898af5
| 165
|
py
|
Python
|
django/code/TheSphinx/consumers/__init__.py
|
aitalshashank2/The-Meeting-Sphinx
|
7825c8f94748298f0c81bb0f40eeac56177526f7
|
[
"Apache-2.0"
] | 4
|
2021-05-16T18:02:30.000Z
|
2021-05-21T16:32:17.000Z
|
django/code/TheSphinx/consumers/__init__.py
|
aitalshashank2/The-Meeting-Sphinx
|
7825c8f94748298f0c81bb0f40eeac56177526f7
|
[
"Apache-2.0"
] | 5
|
2021-05-07T16:31:27.000Z
|
2021-06-04T12:24:28.000Z
|
django/code/TheSphinx/consumers/__init__.py
|
aitalshashank2/The-Meeting-Sphinx
|
7825c8f94748298f0c81bb0f40eeac56177526f7
|
[
"Apache-2.0"
] | 2
|
2021-05-24T06:56:53.000Z
|
2021-08-05T10:10:58.000Z
|
from TheSphinx.consumers.meeting import MeetingConsumer
from TheSphinx.consumers.chat import ChatConsumer
from TheSphinx.consumers.videoCall import VideoCallConsumer
| 55
| 59
| 0.89697
| 18
| 165
| 8.222222
| 0.555556
| 0.263514
| 0.445946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 165
| 3
| 59
| 55
| 0.961039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
166af83276d70f368643f6ee3e6488bd684c1eb9
| 250
|
py
|
Python
|
src/Lib/python/roadwork/roadwork/server/__init__.py
|
Random-Word/Roadwork-RL
|
146d96a9adbee3284d46b17bac28a43bae8a72c1
|
[
"MIT"
] | null | null | null |
src/Lib/python/roadwork/roadwork/server/__init__.py
|
Random-Word/Roadwork-RL
|
146d96a9adbee3284d46b17bac28a43bae8a72c1
|
[
"MIT"
] | null | null | null |
src/Lib/python/roadwork/roadwork/server/__init__.py
|
Random-Word/Roadwork-RL
|
146d96a9adbee3284d46b17bac28a43bae8a72c1
|
[
"MIT"
] | null | null | null |
from roadwork.server.base_server import Server
from roadwork.server.actor_service import RoadworkActorService
from roadwork.server.actor_interface import RoadworkActorInterface
__all__ = [ "Server", "RoadworkActorService", "RoadworkActorInterface" ]
| 50
| 72
| 0.856
| 25
| 250
| 8.28
| 0.44
| 0.173913
| 0.26087
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076
| 250
| 5
| 72
| 50
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0.191235
| 0.087649
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16da482d6ae9826ff55d1dbda34f4cd21eb64288
| 642
|
py
|
Python
|
Packages/Patterns_Package/symbols/filled_symbols/Reverse_Left_Faced_Right_angle_Triangle.py
|
saribalarakeshreddy/Python-3.9.0
|
25b4c74feb2a27b91e69aa82becde23e356e82c4
|
[
"MIT"
] | null | null | null |
Packages/Patterns_Package/symbols/filled_symbols/Reverse_Left_Faced_Right_angle_Triangle.py
|
saribalarakeshreddy/Python-3.9.0
|
25b4c74feb2a27b91e69aa82becde23e356e82c4
|
[
"MIT"
] | null | null | null |
Packages/Patterns_Package/symbols/filled_symbols/Reverse_Left_Faced_Right_angle_Triangle.py
|
saribalarakeshreddy/Python-3.9.0
|
25b4c74feb2a27b91e69aa82becde23e356e82c4
|
[
"MIT"
] | null | null | null |
def for_Reverse_Left_faced_Right_Angled_Triangle():
""" pattern for : Reverse_Left_faced_Right_Angled_Triangle"""
for i in range(5):
for j in range(5):
if i<=j:
print('*',end=' ')
else:
print(' ',end=' ')
print()
def while_Reverse_Left_faced_Right_Angled_Triangle():
""" pattern for : Reverse_Left_faced_Right_Angled_Triangle"""
i=0
while i<5:
j=0
while j<5:
if i<=j:
print('*',end=' ')
else:
print(' ',end=' ')
j+=1
i+=1
print()
| 29.181818
| 66
| 0.46729
| 74
| 642
| 3.756757
| 0.283784
| 0.158273
| 0.230216
| 0.302158
| 0.766187
| 0.766187
| 0.766187
| 0.755396
| 0.755396
| 0.57554
| 0
| 0.020942
| 0.404984
| 642
| 22
| 67
| 29.181818
| 0.706806
| 0.169782
| 0
| 0.5
| 0
| 0
| 0.016
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.1
| 0.3
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16e161c49470115f0bd3115c390fca3a8ad4fe82
| 96
|
py
|
Python
|
cmspy/MS/__init__.py
|
AlanLoh/cmspy
|
baafb1e0fea8d3b192f20db165be1dbb23be664a
|
[
"MIT"
] | null | null | null |
cmspy/MS/__init__.py
|
AlanLoh/cmspy
|
baafb1e0fea8d3b192f20db165be1dbb23be664a
|
[
"MIT"
] | null | null | null |
cmspy/MS/__init__.py
|
AlanLoh/cmspy
|
baafb1e0fea8d3b192f20db165be1dbb23be664a
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
from .plot_func import *
from .util_func import *
| 13.714286
| 24
| 0.635417
| 14
| 96
| 4.214286
| 0.785714
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025316
| 0.177083
| 96
| 6
| 25
| 16
| 0.721519
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc59b8474161495951d86586cc215624b0af3fe3
| 240
|
py
|
Python
|
consul/__init__.py
|
authbox-lib/python-consul
|
347347df750eaa2a39d410fc5f32faf58619180c
|
[
"MIT"
] | null | null | null |
consul/__init__.py
|
authbox-lib/python-consul
|
347347df750eaa2a39d410fc5f32faf58619180c
|
[
"MIT"
] | null | null | null |
consul/__init__.py
|
authbox-lib/python-consul
|
347347df750eaa2a39d410fc5f32faf58619180c
|
[
"MIT"
] | 1
|
2018-04-02T22:06:34.000Z
|
2018-04-02T22:06:34.000Z
|
__version__ = '0.3.15'
from consul.std import Consul
from consul.base import ConsulException
from consul.base import ACLPermissionDenied
from consul.base import ACLDisabled
from consul.base import NotFound
from consul.base import Timeout
| 24
| 43
| 0.833333
| 34
| 240
| 5.764706
| 0.411765
| 0.306122
| 0.357143
| 0.510204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018957
| 0.120833
| 240
| 9
| 44
| 26.666667
| 0.909953
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc8564e9b3ab1d6a0723b63240d862e2402161e9
| 220
|
py
|
Python
|
Commands/Command_Verify_Tensorflow_Keras_Theano.py
|
amrfarouqa/Egyptian-Hieroglyph-Recognition
|
0b9220b8d3fd0756270cfa68899ddcc5a8024b3a
|
[
"Unlicense"
] | null | null | null |
Commands/Command_Verify_Tensorflow_Keras_Theano.py
|
amrfarouqa/Egyptian-Hieroglyph-Recognition
|
0b9220b8d3fd0756270cfa68899ddcc5a8024b3a
|
[
"Unlicense"
] | null | null | null |
Commands/Command_Verify_Tensorflow_Keras_Theano.py
|
amrfarouqa/Egyptian-Hieroglyph-Recognition
|
0b9220b8d3fd0756270cfa68899ddcc5a8024b3a
|
[
"Unlicense"
] | null | null | null |
# theano
import theano
print('theano: %s' % theano.__version__)
# tensorflow
import tensorflow
print('tensorflow: %s' % tensorflow.__version__)
# keras
from tensorflow import keras
print('keras: %s' % keras.__version__)
| 24.444444
| 48
| 0.759091
| 26
| 220
| 5.961538
| 0.307692
| 0.206452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118182
| 220
| 9
| 49
| 24.444444
| 0.798969
| 0.104545
| 0
| 0
| 0
| 0
| 0.170103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
bcbcb9eaad857eeb9664871e3f9414cc0364ddc8
| 30
|
py
|
Python
|
tradssat/weather/wth/__init__.py
|
FabioSeixas/traDSSAT
|
e7ac31a3c2d0f4f303c64c0ea685ce2903502b4c
|
[
"MIT"
] | 6
|
2020-10-05T11:50:37.000Z
|
2022-02-24T08:36:22.000Z
|
tradssat/weather/wth/__init__.py
|
FabioSeixas/traDSSAT
|
e7ac31a3c2d0f4f303c64c0ea685ce2903502b4c
|
[
"MIT"
] | 23
|
2018-11-08T19:16:36.000Z
|
2021-07-20T23:34:18.000Z
|
tradssat/weather/wth/__init__.py
|
FabioSeixas/traDSSAT
|
e7ac31a3c2d0f4f303c64c0ea685ce2903502b4c
|
[
"MIT"
] | 9
|
2018-11-06T21:04:07.000Z
|
2021-06-19T05:43:24.000Z
|
from .wth_file import WTHFile
| 15
| 29
| 0.833333
| 5
| 30
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4c0a323200596f3d988ecdf03d74699fd003f5b9
| 23
|
py
|
Python
|
drop/demo/__init__.py
|
jemten/drop
|
6e9b586304875c30862dacee320959d16cc98cfe
|
[
"MIT"
] | 58
|
2019-10-18T22:53:21.000Z
|
2022-03-30T08:37:05.000Z
|
drop/demo/__init__.py
|
jemten/drop
|
6e9b586304875c30862dacee320959d16cc98cfe
|
[
"MIT"
] | 185
|
2020-01-10T13:39:12.000Z
|
2022-03-31T15:25:01.000Z
|
drop/demo/__init__.py
|
jemten/drop
|
6e9b586304875c30862dacee320959d16cc98cfe
|
[
"MIT"
] | 32
|
2019-10-15T15:13:20.000Z
|
2022-03-22T05:25:25.000Z
|
from .fixPaths import *
| 23
| 23
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 23
| 1
| 23
| 23
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9128e9f5103fbf01b673ff33229cc1aca74bc939
| 160
|
py
|
Python
|
index.py
|
coder-yuan/vue-template-api
|
135f13d7c32b4a2830366fc0b79a1e2a1eda6923
|
[
"MIT"
] | null | null | null |
index.py
|
coder-yuan/vue-template-api
|
135f13d7c32b4a2830366fc0b79a1e2a1eda6923
|
[
"MIT"
] | null | null | null |
index.py
|
coder-yuan/vue-template-api
|
135f13d7c32b4a2830366fc0b79a1e2a1eda6923
|
[
"MIT"
] | null | null | null |
from app import flask_app
if __name__ == '__main__':
flask_app.run(host=flask_app.config.get('HOST'),
port=flask_app.config.get('PORT'))
| 26.666667
| 52
| 0.65
| 23
| 160
| 4
| 0.521739
| 0.347826
| 0.304348
| 0.369565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20625
| 160
| 5
| 53
| 32
| 0.724409
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9137b5c28733918046e5eacb19c4728ba5c7e3ab
| 140
|
py
|
Python
|
run-server.py
|
teodorescuserban/hxl-proxy
|
5bd535edf0ca84fc76adeb000f366a4cf16bce5e
|
[
"Unlicense"
] | null | null | null |
run-server.py
|
teodorescuserban/hxl-proxy
|
5bd535edf0ca84fc76adeb000f366a4cf16bce5e
|
[
"Unlicense"
] | null | null | null |
run-server.py
|
teodorescuserban/hxl-proxy
|
5bd535edf0ca84fc76adeb000f366a4cf16bce5e
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
"""Run a local dev copy of the HXL Proxy"""
import sys
from hxl_proxy import app
app.run(debug=True, host='0.0.0.0')
| 20
| 43
| 0.7
| 29
| 140
| 3.344828
| 0.724138
| 0.061856
| 0.28866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.142857
| 140
| 6
| 44
| 23.333333
| 0.775
| 0.414286
| 0
| 0
| 0
| 0
| 0.092105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
914c2c5928fe6cc64f0b23915b391e57e2e59696
| 4,099
|
py
|
Python
|
benchmark/bench_vigra.py
|
k-dominik/fastfilters
|
715281e8ee20e15080e416b60e13e1d33984908f
|
[
"MIT"
] | 7
|
2016-06-13T13:48:28.000Z
|
2021-05-30T06:49:43.000Z
|
benchmark/bench_vigra.py
|
hci-unihd/fastfilters
|
715281e8ee20e15080e416b60e13e1d33984908f
|
[
"MIT"
] | 16
|
2016-06-14T22:00:05.000Z
|
2019-10-05T12:12:24.000Z
|
benchmark/bench_vigra.py
|
hci-unihd/fastfilters
|
715281e8ee20e15080e416b60e13e1d33984908f
|
[
"MIT"
] | 10
|
2016-06-13T19:55:54.000Z
|
2021-01-28T06:43:28.000Z
|
import numpy as np
import fastfilters as ff
import vigra
import time
class Timer(object):
def __enter__(self):
self.a = time.clock()
return self
def __exit__(self, *args):
self.b = time.clock()
self.delta = self.b - self.a
a = np.zeros((5000,5000)).astype(np.float32)
for order in [0,1,2]:
for sigma in [1,2,3,4,5,6,7,8,9,10]:
with Timer() as tvigra:
resvigra = vigra.filters.gaussianDerivative(a, sigma, [order, order])
with Timer() as tff:
resff = ff.gaussianDerivative(a, sigma, [order, order])
fact = tvigra.delta / tff.delta
print("Timing gaussian 2D with order = %d and sigma = %d: vigra = %f, ff = %f --> speedup: %f" % (order, sigma, tvigra.delta, tff.delta, fact))
for sigma in [1,2,3,4,5,6,7,8,9,10]:
with Timer() as tvigra:
resvigra = vigra.filters.gaussianGradientMagnitude(a, sigma)
with Timer() as tff:
resff = ff.gaussianGradientMagnitude(a, sigma)
fact = tvigra.delta / tff.delta
print("Timing gradient magnitude 2D with sigma = %f: vigra = %f, ff = %f --> speedup: %f" % (sigma, tvigra.delta, tff.delta, fact))
for sigma in [1,2,3,4,5,6,7,8,9,10]:
with Timer() as tvigra:
resvigra = vigra.filters.laplacianOfGaussian(a, sigma)
with Timer() as tff:
resff = ff.laplacianOfGaussian(a, sigma)
fact = tvigra.delta / tff.delta
print("Timing laplacian 2D with sigma = %f: vigra = %f, ff = %f --> speedup: %f" % (sigma, tvigra.delta, tff.delta, fact))
for sigma in [1,2,3,4,5,6,7,8,9,10]:
with Timer() as tvigra:
resvigra = vigra.filters.hessianOfGaussianEigenvalues(a, sigma)
with Timer() as tff:
resff = ff.hessianOfGaussianEigenvalues(a, sigma)
fact = tvigra.delta / tff.delta
print("Timing HOG 2D with sigma = %f: vigra = %f, ff = %f --> speedup: %f" % (sigma, tvigra.delta, tff.delta, fact))
for sigma in [1,2,3,4,5,6,7,8,9,10]:
sigma2 = 2*sigma
with Timer() as tvigra:
resvigra = vigra.filters.structureTensorEigenvalues(a, sigma, sigma2)
with Timer() as tff:
resff = ff.structureTensorEigenvalues(a, sigma, sigma2)
fact = tvigra.delta / tff.delta
print("Timing ST 2D with sigma = %f: vigra = %f, ff = %f --> speedup: %f" % (sigma, tvigra.delta, tff.delta, fact))
a = np.zeros((100,100,100)).astype(np.float32)
for order in [0,1,2]:
for sigma in [1,2,3,4,5]:
with Timer() as tvigra:
resvigra = vigra.filters.gaussianDerivative(a, sigma, [order, order, order])
with Timer() as tff:
resff = ff.gaussianDerivative(a, sigma, [order, order, order])
fact = tvigra.delta / tff.delta
print("Timing gaussian 3D with order = %d and sigma = %d: vigra = %f, ff = %f --> speedup: %f" % (order, sigma, tvigra.delta, tff.delta, fact))
for sigma in [1,2,3,4,5]:
with Timer() as tvigra:
resvigra = vigra.filters.gaussianGradientMagnitude(a, sigma)
with Timer() as tff:
resff = ff.gaussianGradientMagnitude(a, sigma)
fact = tvigra.delta / tff.delta
print("Timing gradient magnitude 3D with sigma = %f: vigra = %f, ff = %f --> speedup: %f" % (sigma, tvigra.delta, tff.delta, fact))
for sigma in [1,2,3,4,5]:
with Timer() as tvigra:
resvigra = vigra.filters.laplacianOfGaussian(a, sigma)
with Timer() as tff:
resff = ff.laplacianOfGaussian(a, sigma)
fact = tvigra.delta / tff.delta
print("Timing laplacian 3D with sigma = %f: vigra = %f, ff = %f --> speedup: %f" % (sigma, tvigra.delta, tff.delta, fact))
for sigma in [1,2,3,4,5]:
with Timer() as tvigra:
resvigra = vigra.filters.hessianOfGaussianEigenvalues(a, sigma)
with Timer() as tff:
resff = ff.hessianOfGaussianEigenvalues(a, sigma)
fact = tvigra.delta / tff.delta
print("Timing HOG 3D with sigma = %f: vigra = %f, ff = %f --> speedup: %f" % (sigma, tvigra.delta, tff.delta, fact))
for sigma in [1,2,3,4,5]:
sigma2 = 2*sigma
with Timer() as tvigra:
resvigra = vigra.filters.structureTensorEigenvalues(a, sigma, sigma2)
with Timer() as tff:
resff = ff.structureTensorEigenvalues(a, sigma, sigma2)
fact = tvigra.delta / tff.delta
print("Timing ST 3D with sigma = %f: vigra = %f, ff = %f --> speedup: %f" % (sigma, tvigra.delta, tff.delta, fact))
| 29.489209
| 146
| 0.661625
| 632
| 4,099
| 4.278481
| 0.107595
| 0.066568
| 0.081361
| 0.140533
| 0.928254
| 0.928254
| 0.928254
| 0.928254
| 0.928254
| 0.928254
| 0
| 0.037225
| 0.180776
| 4,099
| 139
| 147
| 29.489209
| 0.768017
| 0
| 0
| 0.689655
| 0
| 0.114943
| 0.182927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022989
| false
| 0
| 0.045977
| 0
| 0.091954
| 0.114943
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e684ea01851888d9d790557a56c7131cfc4e2a3a
| 99
|
py
|
Python
|
pycfmodel/__init__.py
|
donatoaz/pycfmodel
|
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
|
[
"Apache-2.0"
] | 23
|
2018-06-28T10:45:01.000Z
|
2021-05-07T11:12:39.000Z
|
pycfmodel/__init__.py
|
donatoaz/pycfmodel
|
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
|
[
"Apache-2.0"
] | 27
|
2019-03-09T08:33:22.000Z
|
2022-03-03T14:59:11.000Z
|
pycfmodel/__init__.py
|
donatoaz/pycfmodel
|
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
|
[
"Apache-2.0"
] | 7
|
2019-03-09T02:18:18.000Z
|
2021-07-22T20:33:09.000Z
|
from pycfmodel.model.cf_model import CFModel
def parse(template):
return CFModel(**template)
| 16.5
| 44
| 0.767677
| 13
| 99
| 5.769231
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141414
| 99
| 5
| 45
| 19.8
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
e6ec0d1d9663366e31b42d23b63d4548e5e0df59
| 31
|
py
|
Python
|
tests/syntax/future_typo.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 287
|
2019-04-08T13:18:29.000Z
|
2021-03-14T19:10:21.000Z
|
tests/syntax/future_typo.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 191
|
2019-04-08T14:39:18.000Z
|
2021-03-14T22:14:56.000Z
|
tests/syntax/future_typo.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 9
|
2019-04-08T12:54:08.000Z
|
2020-11-20T02:26:27.000Z
|
from __future__ import divisio
| 15.5
| 30
| 0.870968
| 4
| 31
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc235d2973db62136fc3a17ebc5c70adb40edc1b
| 1,418
|
py
|
Python
|
sppas/documentation/scripting_solutions/skeleton.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/documentation/scripting_solutions/skeleton.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/documentation/scripting_solutions/skeleton.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
#!/usr/bin python
"""
:author: Fix Me
:date: Now
:contact: me@me.org
:license: GPL, v3
:copyright: Copyright (C) 2018 Fixme
:summary: This is the skeleton of a python script.
Use of this software is governed by the GNU Public License, version 3.
This is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import codecs
# ----------------------------------------------------------------------------
# Global variables
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# This is the python entry point:
if __name__ == '__main__':
pass
| 30.170213
| 78
| 0.528914
| 163
| 1,418
| 4.552147
| 0.588957
| 0.032345
| 0.052561
| 0.076819
| 0.110512
| 0.075472
| 0
| 0
| 0
| 0
| 0
| 0.005848
| 0.155853
| 1,418
| 46
| 79
| 30.826087
| 0.614035
| 0.925952
| 0
| 0
| 0
| 0
| 0.091954
| 0
| 0
| 0
| 0
| 0.021739
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fc2eae105d420dc6c2140174155a5ab683fbf3b0
| 3,079
|
py
|
Python
|
numba-dppy/numba_dppy/tests/dppl/test_arg_types.py
|
reazulhoque/numba
|
3d663cf0e8d61b481589c3b520dd1ecd2299ab88
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
numba-dppy/numba_dppy/tests/dppl/test_arg_types.py
|
reazulhoque/numba
|
3d663cf0e8d61b481589c3b520dd1ecd2299ab88
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
numba-dppy/numba_dppy/tests/dppl/test_arg_types.py
|
reazulhoque/numba
|
3d663cf0e8d61b481589c3b520dd1ecd2299ab88
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import numpy as np
import numba_dppy, numba_dppy as dppl
from numba_dppy.testing import unittest
from numba_dppy.testing import DPPLTestCase
import dpctl
@dppl.kernel
def mul_kernel(A, B, test):
i = dppl.get_global_id(0)
B[i] = A[i] * test
def call_mul_device_kernel(global_size, A, B, test):
mul_kernel[global_size, dppl.DEFAULT_LOCAL_SIZE](A, B, test)
global_size = 10
N = global_size
A = np.array(np.random.random(N), dtype=np.float32)
B = np.array(np.random.random(N), dtype=np.float32)
@unittest.skipUnless(dpctl.has_cpu_queues(), 'test only on CPU system')
class TestDPPLArrayArgCPU(DPPLTestCase):
def test_integer_arg(self):
x = np.int32(2)
with dpctl.device_context("opencl:cpu") as cpu_queue:
call_mul_device_kernel(global_size, A, B, x)
self.assertTrue(np.all((A * x) == B))
def test_float_arg(self):
x = np.float32(2.0)
with dpctl.device_context("opencl:cpu") as cpu_queue:
call_mul_device_kernel(global_size, A, B, x)
self.assertTrue(np.all(A * x == B))
x = np.float64(3.0)
call_mul_device_kernel(global_size, A, B, x)
self.assertTrue(np.all(A * x == B))
def test_bool_arg(self):
@dppl.kernel
def check_bool_kernel(A, test):
if test:
A[0] = 111
else:
A[0] = 222
A = np.array([0], dtype='float64')
with dpctl.device_context("opencl:cpu") as cpu_queue:
check_bool_kernel[global_size, dppl.DEFAULT_LOCAL_SIZE](A, True)
self.assertTrue(A[0] == 111)
check_bool_kernel[global_size, dppl.DEFAULT_LOCAL_SIZE](A, False)
self.assertTrue(A[0] == 222)
@unittest.skipUnless(dpctl.has_gpu_queues(), 'test only on GPU system')
class TestDPPLArrayArgGPU(DPPLTestCase):
def test_integer_arg(self):
x = np.int32(2)
with dpctl.device_context("opencl:gpu") as gpu_queue:
call_mul_device_kernel(global_size, A, B, x)
self.assertTrue(np.all((A * x) == B))
def test_float_arg(self):
x = np.float32(2.0)
with dpctl.device_context("opencl:gpu") as gpu_queue:
call_mul_device_kernel(global_size, A, B, x)
self.assertTrue(np.all(A * x == B))
x = np.float64(3.0)
call_mul_device_kernel(global_size, A, B, x)
self.assertTrue(np.all(A * x == B))
def test_bool_arg(self):
@dppl.kernel
def check_bool_kernel(A, test):
if test:
A[0] = 111
else:
A[0] = 222
A = np.array([0], dtype='float64')
with dpctl.device_context("opencl:gpu") as gpu_queue:
check_bool_kernel[global_size, dppl.DEFAULT_LOCAL_SIZE](A, True)
self.assertTrue(A[0] == 111)
check_bool_kernel[global_size, dppl.DEFAULT_LOCAL_SIZE](A, False)
self.assertTrue(A[0] == 222)
if __name__ == '__main__':
unittest.main()
| 31.742268
| 77
| 0.616759
| 450
| 3,079
| 3.982222
| 0.166667
| 0.078125
| 0.107143
| 0.074219
| 0.777902
| 0.748884
| 0.748884
| 0.748884
| 0.710938
| 0.66183
| 0
| 0.029476
| 0.261773
| 3,079
| 96
| 78
| 32.072917
| 0.758909
| 0
| 0
| 0.716216
| 0
| 0
| 0.041572
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 1
| 0.135135
| false
| 0
| 0.081081
| 0
| 0.243243
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc578ccd7f1950ce831f7e496bd197698482b240
| 149
|
py
|
Python
|
lib/datasets/__init__.py
|
giussepi/semantic-segmentation-codebase
|
163b0edfa30a8e1147b532a737d0784ea09f4fc2
|
[
"MIT"
] | 37
|
2021-01-12T06:37:23.000Z
|
2022-03-23T08:14:09.000Z
|
lib/datasets/__init__.py
|
giussepi/semantic-segmentation-codebase
|
163b0edfa30a8e1147b532a737d0784ea09f4fc2
|
[
"MIT"
] | 8
|
2021-01-17T07:53:24.000Z
|
2021-11-16T08:55:48.000Z
|
lib/datasets/__init__.py
|
giussepi/semantic-segmentation-codebase
|
163b0edfa30a8e1147b532a737d0784ea09f4fc2
|
[
"MIT"
] | 6
|
2021-03-14T11:09:30.000Z
|
2021-08-24T11:40:53.000Z
|
from .VOCDataset import *
#from .COCODataset import *
#from .CityscapesDataset import *
#from .ADE20KDataset import *
#from .ContextDataset import *
| 24.833333
| 33
| 0.771812
| 15
| 149
| 7.666667
| 0.466667
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015504
| 0.134228
| 149
| 5
| 34
| 29.8
| 0.875969
| 0.771812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fca48c3d09922796bc5df56eea8ca633eac15102
| 40
|
py
|
Python
|
backend/addons/sale_invoice_line_note/wizard/__init__.py
|
maherjaballi/odoo-react-cicd
|
e99f0e3216094818d94e99df19da9626afe7f9d8
|
[
"MIT"
] | null | null | null |
backend/addons/sale_invoice_line_note/wizard/__init__.py
|
maherjaballi/odoo-react-cicd
|
e99f0e3216094818d94e99df19da9626afe7f9d8
|
[
"MIT"
] | null | null | null |
backend/addons/sale_invoice_line_note/wizard/__init__.py
|
maherjaballi/odoo-react-cicd
|
e99f0e3216094818d94e99df19da9626afe7f9d8
|
[
"MIT"
] | null | null | null |
from . import sale_make_invoice_advance
| 20
| 39
| 0.875
| 6
| 40
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d792ccebf329d3d5ffca3372bae084018989e3e
| 26,990
|
py
|
Python
|
code/scGCO_code/scGCO/Visualization.py
|
Coke-Zhang/scGCO-1
|
cca9955e28334560e7a134a8738de5b67312b85f
|
[
"MIT"
] | null | null | null |
code/scGCO_code/scGCO/Visualization.py
|
Coke-Zhang/scGCO-1
|
cca9955e28334560e7a134a8738de5b67312b85f
|
[
"MIT"
] | null | null | null |
code/scGCO_code/scGCO/Visualization.py
|
Coke-Zhang/scGCO-1
|
cca9955e28334560e7a134a8738de5b67312b85f
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.path as mplPath
from scipy.spatial import Voronoi, voronoi_plot_2d, Delaunay, KDTree, ConvexHull
from matplotlib.patches import Polygon
from matplotlib.collections import LineCollection, PatchCollection
from PIL import Image
from matplotlib.backends.backend_pdf import PdfPages
from scipy.stats import gaussian_kde
import seaborn as sns
from scipy.spatial.distance import cdist
from .Graph_cut import *
def plot_tsne(tsne_locs,tsne_labels,fileName=None):
palette = sns.color_palette('deep', tsne_labels.max() + 1)
colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in tsne_labels.astype(int)]
plt.scatter(tsne_locs[:,0],tsne_locs[:,1], c=colors, s=28)
plt.xlabel('TSNE component 1')
plt.ylabel('TSNE component 2')
for i in tsne_labels:
position = np.max(tsne_locs[tsne_labels== i], axis=0)
plt.gcf().gca().text(position[0], position[1]-1,str(i), fontsize=12)
if fileName !=None:
plt.savefig(fileName)
plt.show()
def visualize_tsne_density(tsne_proj, threshold=0.001, bins=100, fileName=None,title=None,ax=None,fig=None):
'''
perform kde density estimationg for tsne projection to visualize genes clusters.
:param file: tsne_proj: shape (m, 2)
threshold=0.001, bins=100, fileName=None
'''
# fig,ax=plt.subplots()
tsne_proj=tsne_proj.copy()
kde = gaussian_kde(tsne_proj.T, bw_method = 'scott')
z = kde(tsne_proj.T)
x = np.ma.masked_where(z > threshold, tsne_proj[:,0])
y = np.ma.masked_where(z > threshold, tsne_proj[:,1])
# plot unmasked points
ax.scatter(list(tsne_proj[:,0]), list(tsne_proj[:,1]), c='black', marker='o', s=5)
# get bounds from axes
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
# prepare grid for density map
xedges = np.linspace(xmin, xmax, bins)
yedges = np.linspace(ymin, ymax, bins)
xx, yy = np.meshgrid(xedges, yedges)
gridpoints = np.array([xx.ravel(), yy.ravel()])
# compute density map
zz = np.reshape(kde(gridpoints), xx.shape)
# plot density map
im = ax.imshow(zz, cmap='Spectral_r', interpolation='nearest',
origin='lower', extent=[xmin, xmax, ymin, ymax],
aspect='auto')
# plot threshold contour
cs = ax.contour(xx, yy, zz, levels=[threshold], colors='black', line_width=10)
# show
fig.colorbar(im,ax=ax)
if title !=None:
ax.set_title(title,fontsize=12)
if fileName != None:
plt.savefig(fileName)
# plt.show()
return z
def visualize_spatial_genes(df, locs, data_norm, point_size= 0.5):
'''
plot Voronoi tessellation of cells, highlight boundaries of graph cut
:param file: df: dataframe of graph cut results; locs: spatial coordinates (n, 2);
data_norm: normalized count: shape (n, m);
point_size = 0.5;
'''
i = 0
while i < df.shape[0]:
plt.figure(figsize=(6,2.5), dpi=300)
p1 = plt.subplot(121)
p2 = plt.subplot(122)
geneID = df.index[i]
exp = data_norm.loc[:,geneID].values
best_Labels = df.loc[geneID,][4:].values.astype(int)
subplot_voronoi_boundary(geneID, locs, exp, best_Labels,
df.loc[geneID,].fdr, ax=p1,
fdr=True, point_size = point_size, class_line_width=2)
i = i + 1
if i < df.shape[0]:
geneID = df.index[i]
exp = data_norm.loc[:,geneID].values
best_Labels = df.loc[geneID,][4:].values.astype(int)
subplot_voronoi_boundary(geneID, locs, exp, best_Labels,
df.loc[geneID,].fdr, ax=p2, fdr=True,
point_size = point_size)
else:
p2.axis('off')
plt.show()
i= i + 1
def plot_voronoi_boundary(geneID, coord, count, classLabel, p, fdr=False,
fileName=None, point_size=5,
line_colors="k", class_line_width=2.5,
line_width=0.5, line_alpha=1.0,**kw):
'''
plot spatial expression as voronoi tessellation
highlight boundary between classes
:param file: geneID; spatial coordinates shape (n, 2); normalized count: shape (n);
predicted cell class calls shape (n); prediction p-value.
fdr=False; line_colors = 'k'; class_line_width = 3;
line_width = 0.5; line_alpha = 1.0
'''
points = coord
count = count
newLabels =classLabel
# first estimate mean distance between points--
p_dist = cdist(points, points)
p_dist[p_dist == 0] = np.max(p_dist, axis = 0)[0]
norm_dist = np.mean(np.min(p_dist, axis = 0))
# find points at edge, add three layers of new points
x_min = np.min(points, axis = 0)[0] - 3*norm_dist
y_min = np.min(points, axis = 0)[1] - 3*norm_dist
x_max = np.max(points, axis = 0)[0] + 3*norm_dist
y_max = np.max(points, axis = 0)[1] + 3*norm_dist
n_x = int((x_max - x_min)/norm_dist) + 1
n_y = int((y_max - y_min)/norm_dist) + 1
# create a mesh
x = np.linspace(x_min, x_max, n_x)
y = np.linspace(y_min, y_max, n_y)
xv, yv = np.meshgrid(x, y)
# now select points outside of hull, and merge
hull = Delaunay(points)
grid_points = np.hstack((xv.reshape(-1,1), yv.reshape(-1,1)))
pad_points = grid_points[np.where(hull.find_simplex(grid_points)< 0)[0]]
pad_dist = cdist(pad_points, points)
pad_points = pad_points[np.where(np.min(pad_dist, axis = 1) > norm_dist)[0]]
all_points = np.vstack((points, pad_points))
ori_len = points.shape[0]
vor = Voronoi(all_points)
if kw.get("show_points",True):
plt.plot(points[0:ori_len,0], points[0:ori_len,1], ".", markersize=point_size)
patches = []
# but we onl use the original points fot plotting
for i in np.arange(ori_len):
good_ver = vor.vertices[vor.regions[vor.point_region[i]]]
polygon = Polygon(good_ver, True)
patches.append(polygon)
pc = PatchCollection(patches, cmap= cm.PiYG, alpha=1)
pc.set_array(np.array(count))
plt.gca().add_collection(pc)
# for loop for plotting is slow, consider to vectorize to speedup
# doesn;t mater for now unless you have many point or genes
finite_segments=[]
boundary_segments=[]
for kk, ii in vor.ridge_dict.items():
if kk[0] < ori_len and kk[1] < ori_len:
if newLabels[kk[0]] != newLabels[kk[1]]:
boundary_segments.append(vor.vertices[ii])
else:
finite_segments.append(vor.vertices[ii])
plt.gca().add_collection(LineCollection(boundary_segments,
colors="k",
lw=class_line_width,
alpha=1,
linestyles="solid"))
plt.gca().add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle="solid"))
plt.xlim(x_min + 1*norm_dist, x_max - 1*norm_dist)
plt.ylim(y_min + 1*norm_dist, y_max - 1*norm_dist)
# also remember to add color bar
plt.colorbar(pc)
if fdr:
titleText = geneID + '\n' + 'fdr: ' + str("{:.2e}".format(p))
else:
titleText = geneID + '\n' + 'p_value: ' + str("{:.2e}".format(p))
titleText=kw.get("set_title",titleText)
fontsize=kw.get("fontsize",12)
plt.title(titleText, fontname="Arial", fontsize=fontsize)
plt.xlabel('X coordinate')
plt.ylabel('Y coordinate')
if fileName!=None:
plt.savefig(fileName)
plt.show()
def pdf_voronoi_boundary(geneID, coord, count, classLabel, p ,fileName, fdr=False, point_size=5,
line_colors="k", class_line_width=2.5,
line_width=0.5, line_alpha=1.0,**kw):
'''
save spatial expression as voronoi tessellation to pdf
highlight boundary between classes.
:param file: geneID; spatial coordinates shape (n, 2); normalized count: shape (n);
predicted cell class calls shape (n); prediction p-value; pdf fileName;
fdr=False; line_colors = 'k'; class_line_width = 3;
line_width = 0.5; line_alpha = 1.0
'''
points = coord
count = count
newLabels =classLabel
# first estimate mean distance between points--
p_dist = cdist(points, points)
p_dist[p_dist == 0] = np.max(p_dist, axis = 0)[0]
norm_dist = np.mean(np.min(p_dist, axis = 0))
# find points at edge, add three layers of new points
x_min = np.min(points, axis = 0)[0] - 3*norm_dist
y_min = np.min(points, axis = 0)[1] - 3*norm_dist
x_max = np.max(points, axis = 0)[0] + 3*norm_dist
y_max = np.max(points, axis = 0)[1] + 3*norm_dist
n_x = int((x_max - x_min)/norm_dist) + 1
n_y = int((y_max - y_min)/norm_dist) + 1
# create a mesh
x = np.linspace(x_min, x_max, n_x)
y = np.linspace(y_min, y_max, n_y)
xv, yv = np.meshgrid(x, y)
# now select points outside of hull, and merge
hull = Delaunay(points)
grid_points = np.hstack((xv.reshape(-1,1), yv.reshape(-1,1)))
pad_points = grid_points[np.where(hull.find_simplex(grid_points)< 0)[0]]
pad_dist = cdist(pad_points, points)
pad_points = pad_points[np.where(np.min(pad_dist, axis = 1) > norm_dist)[0]]
all_points = np.vstack((points, pad_points))
ori_len = points.shape[0]
vor = Voronoi(all_points)
if kw.get("show_points",True):
plt.plot(points[0:ori_len,0], points[0:ori_len,1], ".", markersize=point_size)
patches = []
# but we onl use the original points fot plotting
for i in np.arange(ori_len):
good_ver = vor.vertices[vor.regions[vor.point_region[i]]]
polygon = Polygon(good_ver, True)
patches.append(polygon)
pc = PatchCollection(patches, cmap=cm.PiYG, alpha=1)
pc.set_array(np.array(count))
plt.gca().add_collection(pc)
# for loop for plotting is slow, consider to vectorize to speedup
# doesn;t mater for now unless you have many point or genes
finite_segments=[]
boundary_segments=[]
for kk, ii in vor.ridge_dict.items():
if kk[0] < ori_len and kk[1] < ori_len:
if newLabels[kk[0]] != newLabels[kk[1]]:
boundary_segments.append(vor.vertices[ii])
else:
finite_segments.append(vor.vertices[ii])
plt.gca().add_collection(LineCollection(boundary_segments,
colors="k",
lw=class_line_width,
alpha=1,
linestyles="solid"))
plt.gca().add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle="solid"))
plt.xlim(x_min + 1*norm_dist, x_max - 1*norm_dist)
plt.ylim(y_min + 1*norm_dist, y_max - 1*norm_dist)
# also remember to add color bar
plt.colorbar(pc)
if fdr:
titleText = geneID + '\n' + 'fdr: ' + str("{:.2e}".format(p))
else:
titleText = geneID + '\n' + 'p_value: ' + str("{:.2e}".format(p))
titleText=kw.get("set_title",titleText)
fontsize=kw.get("fontsize",12)
plt.title(titleText, fontname="Arial", fontsize=fontsize)
plt.axis('off')
# plt.xlabel('X coordinate')
# plt.ylabel('Y coordinate')
if fileName != None:
plt.savefig(fileName)
else:
print('ERROR! Please supply a file name.')
def subplot_voronoi_boundary(geneID, coord, count, classLabel, p ,ax ,fdr=False, point_size=5,
line_colors="k", class_line_width=2.5,
line_width=0.5, line_alpha=1.0,**kw):
'''
plot spatial expression as voronoi tessellation
highlight boundary between classes
:param file: geneID; spatial coordinates (n, 2); normalized gene expression: count;
predicted cell class calls (n); p_value; ax number;
'''
points = coord
count = count
newLabels =classLabel
# first estimate mean distance between points--
p_dist = cdist(points, points)
p_dist[p_dist == 0] = np.max(p_dist, axis = 0)[0]
norm_dist = np.mean(np.min(p_dist, axis = 0))
# find points at edge, add three layers of new points
x_min = np.min(points, axis = 0)[0] - 3*norm_dist
y_min = np.min(points, axis = 0)[1] - 3*norm_dist
x_max = np.max(points, axis = 0)[0] + 3*norm_dist
y_max = np.max(points, axis = 0)[1] + 3*norm_dist
n_x = int((x_max - x_min)/norm_dist) + 1
n_y = int((y_max - y_min)/norm_dist) + 1
# create a mesh
x = np.linspace(x_min, x_max, n_x)
y = np.linspace(y_min, y_max, n_y)
xv, yv = np.meshgrid(x, y)
# now select points outside of hull, and merge
hull = Delaunay(points)
grid_points = np.hstack((xv.reshape(-1,1), yv.reshape(-1,1)))
pad_points = grid_points[np.where(hull.find_simplex(grid_points)< 0)[0]]
pad_dist = cdist(pad_points, points)
pad_points = pad_points[np.where(np.min(pad_dist, axis = 1) > norm_dist)[0]]
all_points = np.vstack((points, pad_points))
ori_len = points.shape[0]
vor = Voronoi(all_points)
if kw.get("show_points",True):
ax.plot(points[0:ori_len,0], points[0:ori_len,1], ".", markersize=point_size)
## plt.full(color)
patches = []
# but we onl use the original points fot plotting
for i in np.arange(ori_len):
good_ver = vor.vertices[vor.regions[vor.point_region[i]]]
polygon = Polygon(good_ver, True)
patches.append(polygon)
pc = PatchCollection(patches, cmap=cm.PiYG, alpha=1)
pc.set_array(np.array(count))
ax.add_collection(pc)
# for loop for plotting is slow, consider to vectorize to speedup
# doesn;t mater for now unless you have many point or genes
finite_segments=[]
boundary_segments=[]
for kk, ii in vor.ridge_dict.items():
if kk[0] < ori_len and kk[1] < ori_len:
if newLabels[kk[0]] != newLabels[kk[1]]:
boundary_segments.append(vor.vertices[ii])
else:
finite_segments.append(vor.vertices[ii])
ax.add_collection(LineCollection(boundary_segments,
colors="k",
lw=class_line_width,
alpha=1,
linestyles="solid"))
ax.add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle="solid"))
ax.set_xlim(x_min + 1*norm_dist, x_max - 1*norm_dist)
ax.set_ylim(y_min + 1*norm_dist, y_max - 1*norm_dist)
# also remember to add color bar
#plt.colorbar(pc)
if fdr:
titleText = geneID + '\n' + 'fdr: ' + str("{:.2e}".format(p))
else:
titleText = geneID + '\n' + 'p_value: ' + str("{:.2e}".format(p))
titleText=kw.get("set_title",titleText)
fontsize=kw.get("fontsize",8)
ax.set_title(titleText, fontname="Arial", fontsize=fontsize)
def subplot_voronoi_boundary_12x18(geneID, coord, count,
classLabel, p, ax, fdr=False, point_size = 0.5,
line_colors = 'k', class_line_width = 0.8,
line_width = 0.05, line_alpha = 1.0,**kw):
'''
plot spatial expression as voronoi tessellation
highlight boundary between classes
:param file: geneID; coord: spatial coordinates (n, 2); count: normalized gene expression;
predicted cell class calls (n); p: graph cut p-value.
'''
points = coord
count = count
newLabels =classLabel
p_dist = cdist(points, points)
p_dist[p_dist == 0] = np.max(p_dist, axis = 0)[0]
norm_dist = np.mean(np.min(p_dist, axis = 0))
# find points at edge, add three layers of new points
x_min = np.min(points, axis = 0)[0] - 3*norm_dist
y_min = np.min(points, axis = 0)[1] - 3*norm_dist
x_max = np.max(points, axis = 0)[0] + 3*norm_dist
y_max = np.max(points, axis = 0)[1] + 3*norm_dist
n_x = int((x_max - x_min)/norm_dist) + 1
n_y = int((y_max - y_min)/norm_dist) + 1
# create a mesh
x = np.linspace(x_min, x_max, n_x)
y = np.linspace(y_min, y_max, n_y)
xv, yv = np.meshgrid(x, y)
# now select points outside of hull, and merge
hull = Delaunay(points)
grid_points = np.hstack((xv.reshape(-1,1), yv.reshape(-1,1)))
pad_points = grid_points[np.where(hull.find_simplex(grid_points)< 0)[0]]
pad_dist = cdist(pad_points, points)
pad_points = pad_points[np.where(np.min(pad_dist, axis = 1) > norm_dist)[0]]
all_points = np.vstack((points, pad_points))
ori_len = points.shape[0]
vor = Voronoi(all_points)
if kw.get("show_points",True):
ax.plot(points[0:ori_len,0], points[0:ori_len,1], ".", markersize=point_size)
patches = []
# but we onl use the original points fot plotting
for i in np.arange(ori_len):
good_ver = vor.vertices[vor.regions[vor.point_region[i]]]
polygon = Polygon(good_ver, True)
patches.append(polygon)
pc = PatchCollection(patches, cmap=cm.PiYG, alpha=1)
pc.set_array(np.array(count))
ax.add_collection(pc)
# for loop for plotting is slow, consider to vectorize to speedup
# doesn;t mater for now unless you have many point or genes
finite_segments=[]
boundary_segments=[]
for kk, ii in vor.ridge_dict.items():
if kk[0] < ori_len and kk[1] < ori_len:
if newLabels[kk[0]] != newLabels[kk[1]]:
boundary_segments.append(vor.vertices[ii])
else:
finite_segments.append(vor.vertices[ii])
ax.add_collection(LineCollection(boundary_segments, ### boundary
colors="k",
lw=class_line_width,
alpha=1,
linestyles="solid"))
ax.add_collection(LineCollection(finite_segments, ## other line in loop
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle="solid"))
ax.set_xlim(x_min + 1*norm_dist, x_max - 1*norm_dist)
ax.set_ylim(y_min + 1*norm_dist, y_max - 1*norm_dist)
# also remember to add color bar
#plt.colorbar(pc)
if fdr:
titleText = geneID + ' ' + '' + str("{0:.1e}".format(p))
else:
titleText = geneID + ' ' + 'p_value: ' + str("{0:1e}".format(p))
titleText=kw.get("set_title",titleText)
fontsize=kw.get("fontsize",3.5)
ax.set_title(titleText, fontname="Arial", fontsize=fontsize, y = 0.85)
def multipage_pdf_visualize_spatial_genes(df, locs, data_norm, cellGraph, fileName,
point_size=0.,**kw):
'''
save spatial expression as voronoi tessellation to pdf highlight boundary between classes
format: 12 by 18.
:param file: df: graph cuts results; locs: spatial coordinates (n, 2); data_norm: normalized gene expression;
pdf filename; point_size=0.5.
'''
points = locs
vor = Voronoi(points)
nb_plots = int(df.shape[0])
numCols = 12
numRows = 18
nb_plots_per_page = numCols*numRows
t_numRows = int(df.shape[0]/numCols) + 1
with PdfPages(fileName) as pdf:
for i in np.arange(df.shape[0]):
if i % nb_plots_per_page == 0:
fig, axs = plt.subplots(numRows, numCols, # 8 11
figsize = (8,11))
fig.subplots_adjust(hspace=0.3, wspace=0.3,
top=0.925, right=0.925, bottom=0.075, left = 0.075)
geneID = df.index[i]
exp = data_norm.loc[:,geneID].values
if np.isnan(df.loc[geneID,].fdr):
best_Labels = np.zeros(data_norm.shape[0])
else:
best_Labels = df.loc[geneID,][4:].values.astype(int)
m = int(i/numCols) % numRows
n = i % numCols
ax = axs[m,n]
subplot_voronoi_boundary_12x18(geneID, locs, exp, best_Labels,
df.loc[geneID,].fdr, ax=ax, fdr=True,
point_size = point_size,**kw)
if (i + 1) % nb_plots_per_page == 0 or (i + 1) == nb_plots:
for ii in np.arange(numRows):
for jj in np.arange(numCols):
axs[ii,jj].axis('off')
pdf.savefig(fig)
fig.clear()
plt.close()
def add_HE_image(image,ax):
img=Image.open(image)
extent_size = [1,33,1,35]
img_transpose=img.transpose(Image.FLIP_TOP_BOTTOM)
ax.imshow(img_transpose,extent=extent_size)
def subplot_boundary(geneID, coord, count, classLabel, p, ax=None,
fdr=False, point_size=5,
class_line_width=2.5,
**kw):
'''
plot spatial expression as voronoi tessellation.
:param file: geneID; spatial coordinates (n, 2); normalized count: shape (n);
'''
points = coord
count = count
newLabels =classLabel
# first estimate mean distance between points--
p_dist = cdist(points, points)
p_dist[p_dist == 0] = np.max(p_dist, axis = 0)[0]
norm_dist = np.mean(np.min(p_dist, axis = 0))
# find points at edge, add three layers of new points
x_min = np.min(points, axis = 0)[0] - 3*norm_dist
y_min = np.min(points, axis = 0)[1] - 3*norm_dist
x_max = np.max(points, axis = 0)[0] + 3*norm_dist
y_max = np.max(points, axis = 0)[1] + 3*norm_dist
n_x = int((x_max - x_min)/norm_dist) + 1
n_y = int((y_max - y_min)/norm_dist) + 1
# create a mesh
x = np.linspace(x_min, x_max, n_x)
y = np.linspace(y_min, y_max, n_y)
xv, yv = np.meshgrid(x, y)
# now select points outside of hull, and merge
hull = Delaunay(points)
grid_points = np.hstack((xv.reshape(-1,1), yv.reshape(-1,1)))
pad_points = grid_points[np.where(hull.find_simplex(grid_points)< 0)[0]]
pad_dist = cdist(pad_points, points)
pad_points = pad_points[np.where(np.min(pad_dist, axis = 1) > norm_dist)[0]]
all_points = np.vstack((points, pad_points))
ori_len = points.shape[0]
vor = Voronoi(all_points)
if kw.get("show_points",False):
ax.plot(points[0:ori_len,0], points[0:ori_len,1], ".", markersize=point_size)
# for loop for plotting is slow, consider to vectorize to speedup
# doesn;t mater for now unless you have many point or genes
boundary_segments=[]
for kk, ii in vor.ridge_dict.items():
if kk[0] < ori_len and kk[1] < ori_len:
if newLabels[kk[0]] != newLabels[kk[1]]:
boundary_segments.append(vor.vertices[ii])
ax.add_collection(LineCollection(boundary_segments,
colors="k",
lw=class_line_width,
alpha=1,
linestyles="solid"))
ax.set_xlim(x_min + 1*norm_dist, x_max - 1*norm_dist)
ax.set_ylim(y_min + 1*norm_dist, y_max - 1*norm_dist)
if fdr:
titleText = geneID + '\n' + 'fdr: ' + str("{:.2e}".format(p))
else:
titleText = geneID + '\n' + 'p_value: ' + str("{:.2e}".format(p))
titleText=kw.get("set_title",titleText)
fontsize=kw.get("fontsize",8)
ax.set_title(titleText, fontname="Arial", fontsize=8)
def plot_tissue_pattern(locs,data_norm,tissue_mat,image,colors,title,nrows=4,ncols=5,s=15):
## Task2: Tissue mat
nb_plots=tissue_mat.shape[0]
nrows=nrows
ncols=ncols
nb_box=nrows*ncols
fig,ax=plt.subplots(nrows,ncols,figsize=(ncols*3,nrows*3),dpi=180)
fig.subplots_adjust(hspace=0.3, wspace=0.3,
top=0.925, right=0.925, bottom=0.075, left = 0.075)
for i in range(tissue_mat.shape[0]):
x=int(i/ncols)
y=i%ncols
axes=ax[x,y]
add_HE_image(image,axes)
axes.scatter(locs[:,0], locs[:,1], c=tissue_mat[i],
cmap=matplotlib.colors.ListedColormap(colors) ,s=s)
axes.set_title(title,fontsize=8)
points=locs
p_dist = cdist(points, points)
p_dist[p_dist == 0] = np.max(p_dist, axis = 0)[0]
norm_dist = np.mean(np.min(p_dist, axis = 0))
# find points at edge, add three layers of new points
x_min = np.min(points, axis = 0)[0] - 3*norm_dist
y_min = np.min(points, axis = 0)[1] - 3*norm_dist
x_max = np.max(points, axis = 0)[0] + 3*norm_dist
y_max = np.max(points, axis = 0)[1] + 3*norm_dist
axes.set_xlim(x_min + 1*norm_dist, x_max - 1*norm_dist)
axes.set_ylim(y_min + 1*norm_dist, y_max - 1*norm_dist)
if (i + 1) == nb_plots:
for ii in np.arange(nb_plots,nb_box):
ax[int(ii/ncols),ii%ncols].axis('off')
def subplot_HE_with_labels(locs,labels,image,ax,colors,title,s=30):
# import matplotlib
add_HE_image(image,ax)
ax.scatter(locs[:,0], locs[:,1], c=labels,
cmap=matplotlib.colors.ListedColormap(colors) ,s=s)
ax.set_title(title,fontsize=8)
points=locs
p_dist = cdist(points, points)
p_dist[p_dist == 0] = np.max(p_dist, axis = 0)[0]
norm_dist = np.mean(np.min(p_dist, axis = 0))
x_min = np.min(points, axis = 0)[0] - 3*norm_dist
y_min = np.min(points, axis = 0)[1] - 3*norm_dist
x_max = np.max(points, axis = 0)[0] + 3*norm_dist
y_max = np.max(points, axis = 0)[1] + 3*norm_dist
ax.set_xlim(x_min + 1*norm_dist, x_max - 1*norm_dist)
ax.set_ylim(y_min + 1*norm_dist, y_max - 1*norm_dist)
| 38.338068
| 113
| 0.577992
| 3,811
| 26,990
| 3.934138
| 0.095775
| 0.041619
| 0.019809
| 0.009338
| 0.798373
| 0.780564
| 0.766424
| 0.738144
| 0.726206
| 0.713133
| 0
| 0.026026
| 0.29674
| 26,990
| 704
| 114
| 38.338068
| 0.763869
| 0.148685
| 0
| 0.71097
| 0
| 0
| 0.022711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025316
| false
| 0
| 0.033755
| 0
| 0.061181
| 0.00211
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d9661ee8a1cad30a17363390c19aeb6a6f8c2f7
| 193
|
py
|
Python
|
python/testData/quickFixes/PyAddCallSuperQuickFixTest/positionalOnlyParameters_after.py
|
tgodzik/intellij-community
|
f5ef4191fc30b69db945633951fb160c1cfb7b6f
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/quickFixes/PyAddCallSuperQuickFixTest/positionalOnlyParameters_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2022-02-19T09:45:05.000Z
|
2022-02-27T20:32:55.000Z
|
python/testData/quickFixes/PyAddCallSuperQuickFixTest/positionalOnlyParameters_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A:
def __init__(self, a, /, b, *args, c, **kwargs):
pass
class B(A):
def __init__(self, a, /, b, *args, c, **kwargs):
super().__init__(a, b, *args, c=c, **kwargs)
| 27.571429
| 52
| 0.518135
| 30
| 193
| 2.933333
| 0.366667
| 0.068182
| 0.204545
| 0.238636
| 0.568182
| 0.568182
| 0.568182
| 0.568182
| 0.568182
| 0
| 0
| 0
| 0.259067
| 193
| 7
| 53
| 27.571429
| 0.615385
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
5d9fef922e3a1a44b55cb160a1d8ecf32f992c5b
| 186
|
py
|
Python
|
0x03-python-data_structures/dev/3-main.py
|
johncoleman83/bootcampschool-higher_level_programming
|
a83c3b7092cfe893c87e495f8d8eec9228c9b808
|
[
"MIT"
] | null | null | null |
0x03-python-data_structures/dev/3-main.py
|
johncoleman83/bootcampschool-higher_level_programming
|
a83c3b7092cfe893c87e495f8d8eec9228c9b808
|
[
"MIT"
] | null | null | null |
0x03-python-data_structures/dev/3-main.py
|
johncoleman83/bootcampschool-higher_level_programming
|
a83c3b7092cfe893c87e495f8d8eec9228c9b808
|
[
"MIT"
] | 1
|
2020-09-25T17:54:36.000Z
|
2020-09-25T17:54:36.000Z
|
#!/usr/bin/python3
print_reversed_list_integer = __import__('3-print_reversed_list_integer').print_reversed_list_integer
my_list = [1, 2, 3, 4, 5]
print_reversed_list_integer(my_list)
| 26.571429
| 101
| 0.811828
| 30
| 186
| 4.433333
| 0.466667
| 0.390977
| 0.511278
| 0.721805
| 0.451128
| 0.451128
| 0
| 0
| 0
| 0
| 0
| 0.040698
| 0.075269
| 186
| 6
| 102
| 31
| 0.732558
| 0.091398
| 0
| 0
| 0
| 0
| 0.173653
| 0.173653
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
5de33b7b48e4d293610f65bcae57bf882d427918
| 32
|
py
|
Python
|
base_processor/__init__.py
|
Pennsieve/base-processor
|
5800e81e15aab6d9f32e799086c68c8a87e4f251
|
[
"Apache-2.0"
] | null | null | null |
base_processor/__init__.py
|
Pennsieve/base-processor
|
5800e81e15aab6d9f32e799086c68c8a87e4f251
|
[
"Apache-2.0"
] | null | null | null |
base_processor/__init__.py
|
Pennsieve/base-processor
|
5800e81e15aab6d9f32e799086c68c8a87e4f251
|
[
"Apache-2.0"
] | null | null | null |
from .base import BaseProcessor
| 16
| 31
| 0.84375
| 4
| 32
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f8ffdb16d1922b3ce56c67199e8da6020a31679a
| 78
|
py
|
Python
|
tests/test_app.py
|
harshav7/dscookie
|
c4623165fdd8a95c841f3f79a9f41e3e5f640a51
|
[
"FTL"
] | null | null | null |
tests/test_app.py
|
harshav7/dscookie
|
c4623165fdd8a95c841f3f79a9f41e3e5f640a51
|
[
"FTL"
] | null | null | null |
tests/test_app.py
|
harshav7/dscookie
|
c4623165fdd8a95c841f3f79a9f41e3e5f640a51
|
[
"FTL"
] | null | null | null |
from app import index
def test_index():
assert index() == "Hello, world"
| 15.6
| 36
| 0.666667
| 11
| 78
| 4.636364
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 78
| 5
| 36
| 15.6
| 0.822581
| 0
| 0
| 0
| 0
| 0
| 0.151899
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d2e8aed642f8360e6a87e6ad8e3d5f0f3389934
| 8,023
|
py
|
Python
|
utils/dingding.py
|
huangzp152/CyptoGridStrategy
|
58c2f30dff65ee6a44674a515e257c70bf84fd71
|
[
"MIT"
] | 1
|
2021-07-03T06:08:42.000Z
|
2021-07-03T06:08:42.000Z
|
utils/dingding.py
|
huangzp152/CyptoGridStrategy
|
58c2f30dff65ee6a44674a515e257c70bf84fd71
|
[
"MIT"
] | null | null | null |
utils/dingding.py
|
huangzp152/CyptoGridStrategy
|
58c2f30dff65ee6a44674a515e257c70bf84fd71
|
[
"MIT"
] | 1
|
2022-03-13T07:19:41.000Z
|
2022-03-13T07:19:41.000Z
|
# author-wechat:findpanpan
import requests,json
# windows
# from app.authorization import dingding_token, recv_window,api_secret,api_key
# from app.BinanceAPI import BinanceAPI
# linux
class Message:
def do_buy_limit_msg(self,market, quantity, price, profit_usdt=0):
'''
合约开多,带有钉钉消息的封装
:param market:
:param quantity: 数量
:param rate: 价格
:return:
'''
try:
res = BinanceAPI(api_key,api_secret).limit_future_order("SELL",market, quantity,"LONG", price)
if res['orderId']:
buy_info = "报警:币种为:{cointype}。做多卖单价为:{price}。卖单量为:{num}.".format(cointype=market,price=price,num=quantity)
self.dingding_warn(buy_info)
return res
except BaseException as e:
error_info = "报警:币种为:{cointype},做多多单失败.api返回内容为:{reject}".format(cointype=market,reject=res['msg'])
self.dingding_warn(error_info+str(res))
return res
def open_buy_market_msg(self, market, quantity):
'''
合约开多 市价单
:param market:
:param quantity: 数量
:param price: 价格
:return:
'''
try:
res = BinanceAPI(api_key, api_secret).market_future_order("BUY", market, quantity, "LONG")
if res['orderId']:
buy_info = "报警:币种为:{cointype}。开多买单量为:{num}".format(cointype=market,num=quantity)
self.dingding_warn(buy_info)
return res
except BaseException as e:
error_info = "报警:币种为:{cointype},开多多单失败.api返回内容为:{reject}".format(cointype=market, reject=res['msg'])
def do_buy_market_msg(self, market, quantity,profit_usdt=0):
'''
合约平多 市价单
:param market:
:param quantity: 数量
:param price: 价格
:return:
'''
try:
res = BinanceAPI(api_key, api_secret).market_future_order("SELL", market, quantity, "LONG")
if res['orderId']:
buy_info = "报警:币种为:{cointype}。做多卖单量为:{num}.".format(cointype=market,num=quantity)
self.dingding_warn(buy_info)
return res
except BaseException as e:
error_info = "报警:币种为:{cointype},开多多单失败.api返回内容为:{reject}".format(cointype=market, reject=res['msg'])
def open_sell_market_msg(self, market, quantity):
'''
合约开空 市价单
:param market:
:param quantity: 数量
:param price: 价格
:return:
'''
try:
res = BinanceAPI(api_key, api_secret).market_future_order("SELL", market, quantity, "SHORT")
if res['orderId']:
buy_info = "报警:币种为:{cointype}。开空买单量为:{num}".format(cointype=market,num=quantity)
self.dingding_warn(buy_info)
return res
except BaseException as e:
error_info = "报警:币种为:{cointype},开多多单失败.api返回内容为:{reject}".format(cointype=market, reject=res['msg'])
def do_sell_market_msg(self, market, quantity,profit_usdt=0):
'''
合约平空 市价单
:param market:
:param quantity: 数量
:param price: 价格
:return:
'''
try:
res = BinanceAPI(api_key, api_secret).market_future_order("BUY", market, quantity, "SHORT")
if res['orderId']:
buy_info = "报警:币种为:{cointype}。做空卖单量为:{num}.".format(cointype=market,num=quantity)
self.dingding_warn(buy_info)
return res
except BaseException as e:
error_info = "报警:币种为:{cointype},开多多单失败.api返回内容为:{reject}".format(cointype=market, reject=res['msg'])
# def open_buy_limit_msg(self,market, quantity, price):
# '''
# 合约开多
# :param market:
# :param quantity: 数量
# :param price: 价格
# :return:
# '''
# try:
# res = BinanceAPI(api_key,api_secret).limit_future_order("BUY",market, quantity,"LONG", price)
# if res['orderId']:
# buy_info = "报警:币种为:{cointype}。开多买单价为:{price}。买单量为:{num}".format(cointype=market,price=price,num=quantity)
# self.dingding_warn(buy_info)
# return res
# except BaseException as e:
# error_info = "报警:币种为:{cointype},开多多单失败.api返回内容为:{reject}".format(cointype=market,reject=res['msg'])
# self.dingding_warn(error_info)
# def open_sell_future_msg(self,market, quantity, price):
# '''
# 合约开空单,带有钉钉消息
# :param market: 交易对
# :param quantity: 数量
# :param price: 价格
# :return:
# '''
# try:
# res = BinanceAPI(api_key,api_secret).limit_future_order('SELL', market, quantity,"SHORT", price)
# if res['orderId']:
# buy_info = "报警:币种为:{cointype}。开空买入价格为:{price}。数量为:{num}".format(cointype=market,price=price,num=quantity)
# self.dingding_warn(buy_info)
# return res
# except BaseException as e:
# error_info = "报警:币种为:{cointype},开空空单失败.api返回内容为:{reject}".format(cointype=market,reject=res['msg'])
# self.dingding_warn(error_info+str(res))
# return res
def do_sell_future_msg(self,market, quantity, price,profit_usdt=0):
'''
合约做空单,带有钉钉消息
:param market: 交易对
:param quantity: 数量
:param price: 价格
:return:
'''
try:
res = BinanceAPI(api_key,api_secret).limit_future_order('BUY', market, quantity,"SHORT", price)
if res['orderId']:
buy_info = "报警:币种为:{cointype}。做空卖单价为:{price}。数量为:{num}。".format(cointype=market,price=price,num=quantity)
self.dingding_warn(buy_info)
return res
except BaseException as e:
error_info = "报警:币种为:{cointype},做空空单失败.api返回内容为:{reject}".format(cointype=market,reject=res['msg'])
self.dingding_warn(error_info+str(res))
return res
def buy_market_msg(self, market, quantity):
'''
现货市价买入
:param market:
:param quantity:
:return:
'''
try:
res = BinanceAPI(api_key,api_secret).buy_market(market, quantity)
if res['orderId']:
buy_info = "报警:币种为:{cointype}。买单量为:{num}".format(cointype=market,num=quantity)
self.dingding_warn(buy_info)
return res
except BaseException as e:
error_info = "报警:币种为:{cointype},买单失败.".format(cointype=market)
self.dingding_warn(error_info)
def sell_market_msg(self,market, quantity):
'''
现货市价卖出
:param market:
:param quantity: 数量
:param rate: 价格
:return:
'''
try:
res = BinanceAPI(api_key,api_secret).sell_market(market, quantity)
if res['orderId']:
buy_info = "报警:币种为:{cointype}。卖单量为:{num}".format(cointype=market,num=quantity)
self.dingding_warn(buy_info)
return res
except BaseException as e:
error_info = "报警:币种为:{cointype},卖单失败".format(cointype=market)
self.dingding_warn(error_info)
return res
@staticmethod
def dingding_warn(text):
headers = {'Content-Type': 'application/json;charset=utf-8'}
api_url = "https://api.telegram.org/bot%s/sendMessage?chat_id=%s&text=%s" % ('1858698079:AAEo4iunenZ3mZSVUICqVAKFoiHU4LGnO6U', '1540332281', text)
# json_text = self._msg(text)
requests.post(api_url, headers=headers).content
def _msg(self,text):
json_text = {
"msgtype": "text",
"at": {
"atMobiles": [
"11111"
],
"isAtAll": False
},
"text": {
"content": text
}
}
return json_text
if __name__ == "__main__":
msg = Message()
print(msg.buy_limit_future_msg("EOSUSDT",3,2))
| 36.468182
| 154
| 0.564502
| 890
| 8,023
| 4.924719
| 0.141573
| 0.063883
| 0.041068
| 0.077572
| 0.830253
| 0.826603
| 0.795802
| 0.775724
| 0.712526
| 0.712526
| 0
| 0.006488
| 0.308363
| 8,023
| 220
| 155
| 36.468182
| 0.783384
| 0.260127
| 0
| 0.495146
| 0
| 0.009709
| 0.165385
| 0.11685
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097087
| false
| 0
| 0.009709
| 0
| 0.23301
| 0.009709
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d3036704d47d3166704fd2f26ded0878965ca89
| 57
|
py
|
Python
|
katas/kyu_8/removing_elements.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/kyu_8/removing_elements.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/kyu_8/removing_elements.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
def remove_every_other(my_list):
return my_list[::2]
| 19
| 32
| 0.736842
| 10
| 57
| 3.8
| 0.8
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0.140351
| 57
| 2
| 33
| 28.5
| 0.755102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5d525706c06d07e70fd7ce1f86b5a9e85f7ff43c
| 31
|
py
|
Python
|
remotejoystick/__init__.py
|
littlecodersh/remotejoysticks
|
10d5e39bd143dafd49ad04b1bb746b58a86228a3
|
[
"MIT"
] | 8
|
2016-09-13T06:36:58.000Z
|
2021-11-18T13:57:44.000Z
|
remotejoystick/__init__.py
|
littlecodersh/RemoteJoystick
|
10d5e39bd143dafd49ad04b1bb746b58a86228a3
|
[
"MIT"
] | 1
|
2016-09-13T06:21:31.000Z
|
2016-09-13T06:21:31.000Z
|
remotejoystick/__init__.py
|
littlecodersh/RemoteJoystick
|
10d5e39bd143dafd49ad04b1bb746b58a86228a3
|
[
"MIT"
] | 6
|
2017-08-07T04:13:59.000Z
|
2019-04-16T01:46:02.000Z
|
from .main import __version__
| 15.5
| 30
| 0.806452
| 4
| 31
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 1
| 31
| 31
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5392e6a6b92a074003712a8b932b58c33fbf5ad4
| 76
|
py
|
Python
|
PyPunch/__init__.py
|
TheToddLuci0/PyPunch
|
c4d7d092333d3ca365a94944986afb50fb958b82
|
[
"BSD-3-Clause"
] | 3
|
2020-04-29T00:59:27.000Z
|
2021-04-28T19:57:49.000Z
|
PyPunch/__init__.py
|
TheToddLuci0/PyPunch
|
c4d7d092333d3ca365a94944986afb50fb958b82
|
[
"BSD-3-Clause"
] | null | null | null |
PyPunch/__init__.py
|
TheToddLuci0/PyPunch
|
c4d7d092333d3ca365a94944986afb50fb958b82
|
[
"BSD-3-Clause"
] | null | null | null |
from PyPunch.main import build_card
from PyPunch.mappings import MAPPINGS
| 25.333333
| 38
| 0.842105
| 11
| 76
| 5.727273
| 0.636364
| 0.349206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 76
| 2
| 39
| 38
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53f9f9095e765f769cac3d7f3c2b6d5b4f4605fe
| 14,985
|
py
|
Python
|
tests/api/v3_1_0/test_anc_policy.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 36
|
2021-05-18T16:24:19.000Z
|
2022-03-05T13:44:41.000Z
|
tests/api/v3_1_0/test_anc_policy.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 15
|
2021-06-08T19:03:37.000Z
|
2022-02-25T14:47:33.000Z
|
tests/api/v3_1_0/test_anc_policy.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 6
|
2021-06-10T09:32:01.000Z
|
2022-01-12T08:34:39.000Z
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI anc_policy API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_anc_policy_by_name(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_983a095b061f564ebba331f66505b0e3_v3_1_0').validate(obj.response)
return True
def get_anc_policy_by_name(api):
endpoint_result = api.anc_policy.get_anc_policy_by_name(
name='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_get_anc_policy_by_name(api, validator):
try:
assert is_valid_get_anc_policy_by_name(
validator,
get_anc_policy_by_name(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_anc_policy_by_name_default(api):
endpoint_result = api.anc_policy.get_anc_policy_by_name(
name='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_get_anc_policy_by_name_default(api, validator):
try:
assert is_valid_get_anc_policy_by_name(
validator,
get_anc_policy_by_name_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_anc_policy_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_f41f77362663580d8cc3e6e88623889d_v3_1_0').validate(obj.response)
return True
def get_anc_policy_by_id(api):
endpoint_result = api.anc_policy.get_anc_policy_by_id(
id='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_get_anc_policy_by_id(api, validator):
try:
assert is_valid_get_anc_policy_by_id(
validator,
get_anc_policy_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_anc_policy_by_id_default(api):
endpoint_result = api.anc_policy.get_anc_policy_by_id(
id='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_get_anc_policy_by_id_default(api, validator):
try:
assert is_valid_get_anc_policy_by_id(
validator,
get_anc_policy_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_anc_policy_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_1d79b507bda155c180d42f0a67ef64d5_v3_1_0').validate(obj.response)
return True
def update_anc_policy_by_id(api):
endpoint_result = api.anc_policy.update_anc_policy_by_id(
actions=['string'],
active_validation=False,
id='string',
name='string',
payload=None
)
return endpoint_result
@pytest.mark.anc_policy
def test_update_anc_policy_by_id(api, validator):
try:
assert is_valid_update_anc_policy_by_id(
validator,
update_anc_policy_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_anc_policy_by_id_default(api):
endpoint_result = api.anc_policy.update_anc_policy_by_id(
active_validation=False,
id='string',
actions=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.anc_policy
def test_update_anc_policy_by_id_default(api, validator):
try:
assert is_valid_update_anc_policy_by_id(
validator,
update_anc_policy_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_anc_policy_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_7c6b8dd764e052699d4d7a0d8ba43640_v3_1_0').validate(obj.response)
return True
def delete_anc_policy_by_id(api):
endpoint_result = api.anc_policy.delete_anc_policy_by_id(
id='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_delete_anc_policy_by_id(api, validator):
try:
assert is_valid_delete_anc_policy_by_id(
validator,
delete_anc_policy_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_anc_policy_by_id_default(api):
endpoint_result = api.anc_policy.delete_anc_policy_by_id(
id='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_delete_anc_policy_by_id_default(api, validator):
try:
assert is_valid_delete_anc_policy_by_id(
validator,
delete_anc_policy_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_anc_policy(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_440813c9722c56108cac8ca50bf8f01c_v3_1_0').validate(obj.response)
return True
def get_anc_policy(api):
endpoint_result = api.anc_policy.get_anc_policy(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sortasc='string',
sortdsc='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_get_anc_policy(api, validator):
try:
assert is_valid_get_anc_policy(
validator,
get_anc_policy(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_anc_policy_default(api):
endpoint_result = api.anc_policy.get_anc_policy(
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None
)
return endpoint_result
@pytest.mark.anc_policy
def test_get_anc_policy_default(api, validator):
try:
assert is_valid_get_anc_policy(
validator,
get_anc_policy_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_anc_policy(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_2acfdb4060de5a1895b383238c205986_v3_1_0').validate(obj.response)
return True
def create_anc_policy(api):
endpoint_result = api.anc_policy.create_anc_policy(
actions=['string'],
active_validation=False,
name='string',
payload=None
)
return endpoint_result
@pytest.mark.anc_policy
def test_create_anc_policy(api, validator):
try:
assert is_valid_create_anc_policy(
validator,
create_anc_policy(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_anc_policy_default(api):
endpoint_result = api.anc_policy.create_anc_policy(
active_validation=False,
actions=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.anc_policy
def test_create_anc_policy_default(api, validator):
try:
assert is_valid_create_anc_policy(
validator,
create_anc_policy_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_version(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_b01a12e2b55e582084fab915465bf962_v3_1_0').validate(obj.response)
return True
def get_version(api):
endpoint_result = api.anc_policy.get_version(
)
return endpoint_result
@pytest.mark.anc_policy
def test_get_version(api, validator):
try:
assert is_valid_get_version(
validator,
get_version(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_version_default(api):
endpoint_result = api.anc_policy.get_version(
)
return endpoint_result
@pytest.mark.anc_policy
def test_get_version_default(api, validator):
try:
assert is_valid_get_version(
validator,
get_version_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_bulk_request_for_anc_policy(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_4d67f9f6fba65dcbbcf64ca3e31b39a6_v3_1_0').validate(obj.response)
return True
def bulk_request_for_anc_policy(api):
endpoint_result = api.anc_policy.bulk_request_for_anc_policy(
active_validation=False,
operation_type='string',
payload=None,
resource_media_type='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_bulk_request_for_anc_policy(api, validator):
try:
assert is_valid_bulk_request_for_anc_policy(
validator,
bulk_request_for_anc_policy(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def bulk_request_for_anc_policy_default(api):
endpoint_result = api.anc_policy.bulk_request_for_anc_policy(
active_validation=False,
operation_type=None,
payload=None,
resource_media_type=None
)
return endpoint_result
@pytest.mark.anc_policy
def test_bulk_request_for_anc_policy_default(api, validator):
try:
assert is_valid_bulk_request_for_anc_policy(
validator,
bulk_request_for_anc_policy_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_monitor_bulk_status_anc_policy(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_10023cdff02b5185b9b54c9e58762704_v3_1_0').validate(obj.response)
return True
def monitor_bulk_status_anc_policy(api):
endpoint_result = api.anc_policy.monitor_bulk_status_anc_policy(
bulkid='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_monitor_bulk_status_anc_policy(api, validator):
try:
assert is_valid_monitor_bulk_status_anc_policy(
validator,
monitor_bulk_status_anc_policy(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def monitor_bulk_status_anc_policy_default(api):
endpoint_result = api.anc_policy.monitor_bulk_status_anc_policy(
bulkid='string'
)
return endpoint_result
@pytest.mark.anc_policy
def test_monitor_bulk_status_anc_policy_default(api, validator):
try:
assert is_valid_monitor_bulk_status_anc_policy(
validator,
monitor_bulk_status_anc_policy_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 29.324853
| 109
| 0.704304
| 1,881
| 14,985
| 5.27698
| 0.105795
| 0.113339
| 0.048761
| 0.04322
| 0.832561
| 0.813016
| 0.808281
| 0.796393
| 0.792263
| 0.758312
| 0
| 0.019616
| 0.220954
| 14,985
| 510
| 110
| 29.382353
| 0.830649
| 0.077144
| 0
| 0.670051
| 0
| 0
| 0.065186
| 0.027999
| 0
| 0
| 0
| 0
| 0.137056
| 1
| 0.114213
| false
| 0
| 0.01269
| 0
| 0.218274
| 0.022843
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54f63d5622dacf91070079ad5e252adbbd0c4d3a
| 30,468
|
py
|
Python
|
pypykatz/smb/cmdhelper.py
|
ohio813/pypykatz
|
af308f686c7231e566d095492deff06cd12c63f2
|
[
"MIT"
] | null | null | null |
pypykatz/smb/cmdhelper.py
|
ohio813/pypykatz
|
af308f686c7231e566d095492deff06cd12c63f2
|
[
"MIT"
] | null | null | null |
pypykatz/smb/cmdhelper.py
|
ohio813/pypykatz
|
af308f686c7231e566d095492deff06cd12c63f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import os
import json
import ntpath
import asyncio
import platform
import argparse
import base64
import traceback
from pypykatz import logging
from pypykatz.commons.common import UniversalEncoder
from pypykatz.alsadecryptor.packages.msv.decryptor import LogonSession
"""
This is a wrapper for aiosmb
"""
class SMBCMDArgs:
def __init__(self):
self.smb_url = None
self.verbose = 0
self.silent = True
self.smb_url = None
self.no_interactive = False
self.commands = ['login', 'i']
smb_live_epilog = 'FOR AVAILABLE SUBCOMMANDS TYPE "... smb help" insted of "-h" '
class SMBCMDHelper:
def __init__(self):
self.live_keywords = ['smb']
self.keywords = ['smb']
def add_args(self, parser, live_parser):
smb_group = parser.add_parser('smb', help='SMB related commands')
smb_subparsers = smb_group.add_subparsers()
smb_subparsers.required = True
smb_subparsers.dest = 'smb_module'
smb_client_group = smb_subparsers.add_parser('client', help='SMB client. Use "help" instead of "-h" to get the available subcommands')
smb_client_group.add_argument('-v', '--verbose', action='count', default=0, help='Verbosity, can be stacked')
smb_client_group.add_argument('url', help="SMB connection string")
smb_client_group.add_argument('commands', nargs='*', help="!OPTIONAL! Takes a series of commands which will be executed until error encountered. If the command is 'i' is encountered during execution it drops back to interactive shell.")
smb_lsassfile_group = smb_subparsers.add_parser('lsassfile', help='Parse a remote LSASS dump file.')
smb_lsassfile_group.add_argument('url', help="SMB connection string with file in path field. Example: 'smb2+ntlm-password://TEST\\Administrator:QLFbT8zkiFGlJuf0B3Qq@10.10.10.102/C$/Users/victim/Desktop/lsass.DMP'")
smb_lsassfile_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
smb_lsassfile_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
smb_lsassfile_group.add_argument('-k', '--kerberos-dir', help = 'Save kerberos tickets to a directory.')
smb_lsassfile_group.add_argument('-g', '--grep', action='store_true', help = 'Print credentials in greppable format')
smb_lsassfile_group.add_argument('--chunksize', type=int, default=64*1024, help = 'Chunksize for file data retrival')
smb_lsassfile_group.add_argument('-p','--packages', choices = ['all','msv', 'wdigest', 'tspkg', 'ssp', 'livessp', 'dpapi', 'cloudap'], nargs="+", default = 'all', help = 'LSASS package to parse')
smb_lsassdump_group = smb_subparsers.add_parser('lsassdump', help='Remotely dumps and parses LSASS')
smb_lsassdump_group.add_argument('url', help="SMB connection string Example: 'smb2+ntlm-password://TEST\\Administrator:QLFbT8zkiFGlJuf0B3Qq@10.10.10.102'")
smb_lsassdump_group.add_argument('-m','--method', choices=['taskexec'] , default = 'taskexec', help = 'Print credentials in JSON format')
smb_lsassdump_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
smb_lsassdump_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
smb_lsassdump_group.add_argument('-k', '--kerberos-dir', help = 'Save kerberos tickets to a directory.')
smb_lsassdump_group.add_argument('-g', '--grep', action='store_true', help = 'Print credentials in greppable format')
smb_lsassdump_group.add_argument('--chunksize', type=int, default=64*1024, help = 'Chunksize for file data retrival')
smb_lsassdump_group.add_argument('-p','--packages', choices = ['all','msv', 'wdigest', 'tspkg', 'ssp', 'livessp', 'dpapi', 'cloudap'], nargs="+", default = 'all', help = 'LSASS package to parse')
smb_regfile_group = smb_subparsers.add_parser('regfile', help='Parse a remote registry hive dumps')
smb_regfile_group.add_argument('url', help="SMB connection string with folder in path field. Example: 'smb2+ntlm-password://TEST\\Administrator:QLFbT8zkiFGlJuf0B3Qq@10.10.10.102/C$/Users/victim/Desktop/'")
smb_regfile_group.add_argument('system', help='path to the SYSTEM registry hive')
smb_regfile_group.add_argument('--sam', help='path to the SAM registry hive')
smb_regfile_group.add_argument('--security', help='path to the SECURITY registry hive')
smb_regfile_group.add_argument('--software', help='path to the SOFTWARE registry hive')
smb_regfile_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
smb_regfile_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
smb_regsec_group = smb_subparsers.add_parser('regdump', help='Remotely dumps and parses registry')
smb_regsec_group.add_argument('url', help="SMB connection string. Example: 'smb2+ntlm-password://TEST\\Administrator:QLFbT8zkiFGlJuf0B3Qq@10.10.10.102'")
smb_regsec_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
smb_regsec_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
smb_dcsync_group = smb_subparsers.add_parser('dcsync', help='DcSync')
smb_dcsync_group.add_argument('url', help="SMB connection string. Example: 'smb2+ntlm-password://TEST\\Administrator:QLFbT8zkiFGlJuf0B3Qq@10.10.10.2'")
smb_dcsync_group.add_argument('-u', '--username', help='taget username')
smb_dcsync_group.add_argument('-o', '--outfile', help = 'Save results to file')
smb_secretsdump_group = smb_subparsers.add_parser('secretsdump', help='secretsdump')
smb_secretsdump_group.add_argument('url', help="SMB connection string. Example: 'smb2+ntlm-password://TEST\\Administrator:QLFbT8zkiFGlJuf0B3Qq@10.10.10.102/'")
smb_secretsdump_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
smb_secretsdump_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
smb_secretsdump_group.add_argument('-k', '--kerberos-dir', help = 'Save kerberos tickets to a directory.')
smb_secretsdump_group.add_argument('-g', '--grep', action='store_true', help = 'Print credentials in greppable format')
smb_secretsdump_group.add_argument('--chunksize', type=int, default=64*1024, help = 'Chunksize for file data retrival')
smb_secretsdump_group.add_argument('-p','--packages', choices = ['all','msv', 'wdigest', 'tspkg', 'ssp', 'livessp', 'dpapi', 'cloudap'], nargs="+", default = 'all', help = 'LSASS package to parse')
smb_shareenum_parser = smb_subparsers.add_parser('shareenum', help = 'SMB share enumerator')
smb_shareenum_parser.add_argument('-v', '--verbose', action='count', default=0, help='Verbosity, can be stacked')
smb_shareenum_parser.add_argument('--depth', type=int, default =3, help="Maximum level of folders to enum")
smb_shareenum_parser.add_argument('--maxitems', type=int, default = None, help="Maximum number of items per forlder to enumerate")
smb_shareenum_parser.add_argument('--dirsd', action='store_true', help="Fetch Security Descriptors for folders")
smb_shareenum_parser.add_argument('--filesd', action='store_true', help="Fetch Security Descriptors for files")
smb_shareenum_parser.add_argument('-w', '--worker-count', type=int, default = 10, help="Number of parallell enum workers. Always one worker/host")
smb_shareenum_parser.add_argument('-l', '--ldap', help="Use LDAP to get a list of machines to enumerate. This will return dns names so be carefule to have a correct DNS server config!")
smb_shareenum_parser.add_argument('--progress', action='store_true', help="Progress bar. Please use it with output-file set!")
smb_shareenum_parser.add_argument('-o','--out-file', help="Output file")
smb_shareenum_parser.add_argument('--json', action='store_true', help="Output format is JSON")
smb_shareenum_parser.add_argument('--tsv', action='store_true', help="Output format is TSV")
smb_shareenum_parser.add_argument('-t', '--target', nargs='*', help="Files/IPs/Hostnames for targets. Can be omitted if LDAP is used")
smb_shareenum_parser.add_argument('--max-runtime', type=int, default = None, help="Maximum runtime per host (in seconds)")
smb_shareenum_parser.add_argument('--es', '--exclude-share', nargs='*', help = 'Exclude shares with name specified')
smb_shareenum_parser.add_argument('--ed', '--exclude-dir', nargs='*', help = 'Exclude directories with name specified')
smb_shareenum_parser.add_argument('--et', '--exclude-target', nargs='*', help = 'Exclude hosts from enumeration')
smb_shareenum_parser.add_argument('smb_url', help = 'SMB connection string. Credentials specified here will be used to perform the enumeration')
live_subcommand_parser = argparse.ArgumentParser(add_help=False)
live_smb_subparsers = live_subcommand_parser.add_subparsers(help = 'LIVE SMB commands work under the current user context.')
live_smb_subparsers.required = True
live_smb_subparsers.dest = 'livesmbcommand'
live_client_parser = live_smb_subparsers.add_parser('client', help = 'SMB (live) client. Use "help" instead of "-h" to get the available subcommands')
live_client_parser.add_argument('--authmethod', choices=['ntlm', 'kerberos'], default = 'ntlm', help= 'Authentication method to use during login')
live_client_parser.add_argument('--protocol-version', choices=['2', '3'], default = '2', help= 'SMB protocol version. SMB1 is not supported.')
live_client_parser.add_argument('-v', '--verbose', action='count', default=0, help='Verbosity, can be stacked')
live_client_parser.add_argument('host', help='Target host to connect to')
live_client_parser.add_argument('commands', nargs='*', help="!OPTIONAL! Takes a series of commands which will be executed until error encountered. If the command is 'i' is encountered during execution it drops back to interactive shell.")
live_lsassdump_group = live_smb_subparsers.add_parser('lsassdump', help='Remotely dumps and parses LSASS')
live_lsassdump_group.add_argument('host', help='Target host to connect to')
live_lsassdump_group.add_argument('--authmethod', choices=['ntlm', 'kerberos'], default = 'kerberos', help= 'Authentication method to use during login. If kerberos is used, the target must be DNS or hostname, NOT IP address!')
live_lsassdump_group.add_argument('--protocol-version', choices=['2', '3'], default = '2', help= 'SMB protocol version. SMB1 is not supported.')
live_lsassdump_group.add_argument('-m','--method', choices=['taskexec'] , default = 'taskexec', help = 'Print credentials in JSON format')
live_lsassdump_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
live_lsassdump_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
live_lsassdump_group.add_argument('-k', '--kerberos-dir', help = 'Save kerberos tickets to a directory.')
live_lsassdump_group.add_argument('-g', '--grep', action='store_true', help = 'Print credentials in greppable format')
live_lsassdump_group.add_argument('--chunksize', type=int, default=64*1024, help = 'Chunksize for file data retrival')
live_lsassdump_group.add_argument('-p','--packages', choices = ['all','msv', 'wdigest', 'tspkg', 'ssp', 'livessp', 'dpapi', 'cloudap'], nargs="+", default = 'all', help = 'LSASS package to parse')
live_regsec_group = live_smb_subparsers.add_parser('regdump', help='Remotely dumps and parses registry')
live_regsec_group.add_argument('host', help='Target host to connect to')
live_regsec_group.add_argument('--authmethod', choices=['ntlm', 'kerberos'], default = 'kerberos', help= 'Authentication method to use during login. If kerberos is used, the target must be DNS or hostname, NOT IP address!')
live_regsec_group.add_argument('--protocol-version', choices=['2', '3'], default = '2', help= 'SMB protocol version. SMB1 is not supported.')
live_regsec_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
live_regsec_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
live_dcsync_group = live_smb_subparsers.add_parser('dcsync', help='DcSync')
live_dcsync_group.add_argument('host', help='Target host to connect to')
live_dcsync_group.add_argument('--authmethod', choices=['ntlm', 'kerberos'], default = 'kerberos', help= 'Authentication method to use during login. If kerberos is used, the target must be DNS or hostname, NOT IP address!')
live_dcsync_group.add_argument('--protocol-version', choices=['2', '3'], default = '2', help= 'SMB protocol version. SMB1 is not supported.')
live_dcsync_group.add_argument('-u', '--username', help='taget username')
live_dcsync_group.add_argument('-o', '--outfile', help = 'Save results to file')
live_secretsdump_group = live_smb_subparsers.add_parser('secretsdump', help='secretsdump')
live_secretsdump_group.add_argument('host', help='Target host to connect to')
live_secretsdump_group.add_argument('--authmethod', choices=['ntlm', 'kerberos'], default = 'kerberos', help= 'Authentication method to use during login. If kerberos is used, the target must be DNS or hostname, NOT IP address!')
live_secretsdump_group.add_argument('--protocol-version', choices=['2', '3'], default = '2', help= 'SMB protocol version. SMB1 is not supported.')
live_secretsdump_group.add_argument('--json', action='store_true',help = 'Print credentials in JSON format')
live_secretsdump_group.add_argument('-o', '--outfile', help = 'Save results to file (you can specify --json for json file, or text format will be written)')
live_secretsdump_group.add_argument('-k', '--kerberos-dir', help = 'Save kerberos tickets to a directory.')
live_secretsdump_group.add_argument('-g', '--grep', action='store_true', help = 'Print credentials in greppable format')
live_secretsdump_group.add_argument('--chunksize', type=int, default=64*1024, help = 'Chunksize for file data retrival')
live_secretsdump_group.add_argument('-p','--packages', choices = ['all','msv', 'wdigest', 'tspkg', 'ssp', 'livessp', 'dpapi', 'cloudap'], nargs="+", default = 'all', help = 'LSASS package to parse')
live_shareenum_parser = live_smb_subparsers.add_parser('shareenum', help = 'SMB (live) share enumerator. THE DEFAULT SETTINGS ARE OPTIMIZED TO WORK ON DOMAIN-JOINED MACHINES. This will start enumeration using the current user credentials.')
live_shareenum_parser.add_argument('--authmethod', choices=['ntlm', 'kerberos'], default = 'kerberos', help= 'Authentication method to use during login. If kerberos is used, the target must be DNS or hostname, NOT IP address!')
live_shareenum_parser.add_argument('--protocol-version', choices=['2', '3'], default = '2', help= 'SMB protocol version. SMB1 is not supported.')
live_shareenum_parser.add_argument('-v', '--verbose', action='count', default=0, help='Verbosity, can be stacked')
live_shareenum_parser.add_argument('--depth', type=int, default =3, help="Maximum level of folders to enum")
live_shareenum_parser.add_argument('--maxitems', type=int, default = None, help="Maximum number of items per forlder to enumerate")
live_shareenum_parser.add_argument('--dirsd', action='store_true', help="Fetch Security Descriptors for folders")
live_shareenum_parser.add_argument('--filesd', action='store_true', help="Fetch Security Descriptors for files")
live_shareenum_parser.add_argument('-w', '--worker-count', type=int, default = 10, help="Number of parallell enum workers. Always one worker/host")
live_shareenum_parser.add_argument('--skip-ldap', action='store_true', help="Skip fetching target hosts via LDAP")
live_shareenum_parser.add_argument('--progress', action='store_true', help="Progress bar. Please use it with output-file set!")
live_shareenum_parser.add_argument('-o','--out-file', help="Output file")
live_shareenum_parser.add_argument('--json', action='store_true', help="Output format is JSON")
live_shareenum_parser.add_argument('--tsv', action='store_true', help="Output format is TSV")
live_shareenum_parser.add_argument('-t', '--target', nargs='*', help="Files/IPs/Hostnames for targets. Can be omitted if LDAP is used")
live_shareenum_parser.add_argument('--max-runtime', type=int, default = None, help="Maximum runtime per host (in seconds)")
live_shareenum_parser.add_argument('--es', '--exclude-share', nargs='*', help = 'Exclude shares with name specified')
live_shareenum_parser.add_argument('--ed', '--exclude-dir', nargs='*', help = 'Exclude directories with name specified')
live_shareenum_parser.add_argument('--et', '--exclude-target', nargs='*', help = 'Exclude hosts from enumeration')
live_group = live_parser.add_parser('smb', help='SMB (live) commands', epilog=smb_live_epilog, parents=[live_subcommand_parser])
def execute(self, args):
if args.command in self.keywords:
asyncio.run(self.run(args))
if len(self.live_keywords) > 0 and args.command == 'live' and args.module in self.live_keywords:
asyncio.run(self.run_live(args))
async def run_live(self, args):
if platform.system().lower() != 'windows':
raise Exception('Live commands only work on Windows!')
from aiosmb import logger as smblog
from winacl.functions.highlevel import get_logon_info
info = get_logon_info()
if args.livesmbcommand != 'shareenum':
smb_url = 'smb%s+sspi-%s://%s\\%s@%s' % (args.protocol_version, args.authmethod, info['domain'], info['username'], args.host)
if args.verbose == 0:
smblog.setLevel(100)
elif args.verbose == 1:
smblog.setLevel(level=logging.INFO)
else:
level = 5 - args.verbose
smblog.setLevel(level=level)
if args.livesmbcommand == 'client':
from aiosmb.examples.smbclient import amain
la = SMBCMDArgs()
la.smb_url = smb_url
la.verbose = args.verbose
if args.commands is not None and len(args.commands) > 0:
la.commands = []
if args.commands[0] == 'help':
la.commands = ['help']
else:
if args.commands[0] != 'login':
la.commands.append('login')
for command in args.commands:
la.commands.append(command)
await amain(la)
elif args.livesmbcommand == 'lsassdump':
from pypykatz.smb.lsassutils import lsassdump
mimi = await lsassdump(smb_url, chunksize=args.chunksize, packages=args.packages)
self.process_results({'smbfile':mimi}, [], args)
elif args.livesmbcommand == 'secretsdump':
from pypykatz.smb.lsassutils import lsassdump
from pypykatz.smb.regutils import regdump
from pypykatz.smb.dcsync import dcsync
try:
mimi = await lsassdump(smb_url, chunksize=args.chunksize, packages=args.packages)
if mimi is not None:
self.process_results({'smbfile':mimi}, [], args, file_prefix='_lsass.txt')
except Exception as e:
logging.exception('[SECRETSDUMP] Failed to get LSASS secrets')
try:
po = await regdump(smb_url)
if po is not None:
if args.outfile:
po.to_file(args.outfile+'_registry.txt', args.json)
else:
if args.json:
print(json.dumps(po.to_dict(), cls = UniversalEncoder, indent=4, sort_keys=True))
else:
print(str(po))
except Exception as e:
logging.exception('[SECRETSDUMP] Failed to get registry secrets')
try:
if args.outfile is not None:
outfile = open(args.outfile+'_dcsync.txt', 'w', newline = '')
async for secret in dcsync(smb_url):
if args.outfile is not None:
outfile.write(str(secret))
else:
print(str(secret))
except Exception as e:
logging.exception('[SECRETSDUMP] Failed to perform DCSYNC')
finally:
if args.outfile is not None:
outfile.close()
elif args.livesmbcommand == 'dcsync':
from pypykatz.smb.dcsync import dcsync
if args.outfile is not None:
outfile = open(args.outfile, 'w', newline = '')
async for secret in dcsync(smb_url, args.username):
if args.outfile is not None:
outfile.write(str(secret))
else:
print(str(secret))
if args.outfile is not None:
outfile.close()
elif args.livesmbcommand == 'regdump':
from pypykatz.smb.regutils import regdump
po = await regdump(smb_url)
if po is not None:
if args.outfile:
po.to_file(args.outfile, args.json)
else:
if args.json:
print(json.dumps(po.to_dict(), cls = UniversalEncoder, indent=4, sort_keys=True))
else:
print(str(po))
elif args.livesmbcommand == 'shareenum':
from pypykatz.smb.shareenum import shareenum
output_type = 'str'
if args.json is True:
output_type = 'json'
if args.tsv is True:
output_type = 'tsv'
exclude_share = []
if args.es is not None:
exclude_share = args.es
exclude_dir = []
if args.ed is not None:
exclude_dir = args.ed
ldap_url = 'auto'
if args.skip_ldap is True:
ldap_url = None
exclude_target = []
if args.et is not None:
exclude_target = args.et
await shareenum(
smb_url = 'auto',
targets = args.target,
smb_worker_count = args.worker_count,
depth = args.depth,
out_file = args.out_file,
progress = args.progress,
max_items = args.maxitems,
dirsd = args.dirsd,
filesd = args.filesd,
authmethod = args.authmethod,
protocol_version = args.protocol_version,
output_type = output_type,
max_runtime = args.max_runtime,
exclude_share = exclude_share,
exclude_dir = exclude_dir,
ldap_url = ldap_url,
exclude_target = exclude_target,
)
async def run(self, args):
from aiosmb import logger as smblog
if args.verbose == 0:
smblog.setLevel(100)
elif args.verbose == 1:
smblog.setLevel(level=logging.INFO)
else:
level = 5 - args.verbose
smblog.setLevel(level=level)
if args.smb_module == 'lsassfile':
from pypykatz.smb.lsassutils import lsassfile
mimi = await lsassfile(args.url, chunksize=args.chunksize, packages=args.packages)
self.process_results({'smbfile':mimi}, [], args)
elif args.smb_module == 'lsassdump':
from pypykatz.smb.lsassutils import lsassdump
mimi = await lsassdump(args.url, chunksize=args.chunksize, packages=args.packages)
self.process_results({'smbfile':mimi}, [], args)
elif args.smb_module == 'secretsdump':
from pypykatz.smb.lsassutils import lsassdump
from pypykatz.smb.regutils import regdump
from pypykatz.smb.dcsync import dcsync
try:
mimi = await lsassdump(args.url, chunksize=args.chunksize, packages=args.packages)
if mimi is not None:
self.process_results({'smbfile':mimi}, [], args, file_prefix='_lsass.txt')
except Exception as e:
logging.exception('[SECRETSDUMP] Failed to get LSASS secrets')
try:
po = await regdump(args.url)
if po is not None:
if args.outfile:
po.to_file(args.outfile+'_registry.txt', args.json)
else:
if args.json:
print(json.dumps(po.to_dict(), cls = UniversalEncoder, indent=4, sort_keys=True))
else:
print(str(po))
except Exception as e:
logging.exception('[SECRETSDUMP] Failed to get registry secrets')
try:
if args.outfile is not None:
outfile = open(args.outfile+'_dcsync.txt', 'w', newline = '')
async for secret in dcsync(args.url):
if args.outfile is not None:
outfile.write(str(secret))
else:
print(str(secret))
except Exception as e:
logging.exception('[SECRETSDUMP] Failed to perform DCSYNC')
finally:
if args.outfile is not None:
outfile.close()
elif args.smb_module == 'dcsync':
from pypykatz.smb.dcsync import dcsync
if args.outfile is not None:
outfile = open(args.outfile, 'w', newline = '')
async for secret in dcsync(args.url, args.username):
if args.outfile is not None:
outfile.write(str(secret))
else:
print(str(secret))
if args.outfile is not None:
outfile.close()
elif args.smb_module == 'regdump':
from pypykatz.smb.regutils import regdump
po = await regdump(args.url)
if po is not None:
if args.outfile:
po.to_file(args.outfile, args.json)
else:
if args.json:
print(json.dumps(po.to_dict(), cls = UniversalEncoder, indent=4, sort_keys=True))
else:
print(str(po))
elif args.smb_module == 'regfile':
from pypykatz.smb.regutils import regfile
po = await regfile(args.url, args.system, sam = args.sam, security = args.security, software = args.software)
if po is not None:
if args.outfile:
po.to_file(args.outfile, args.json)
else:
if args.json:
print(json.dumps(po.to_dict(), cls = UniversalEncoder, indent=4, sort_keys=True))
else:
print(str(po))
elif args.smb_module == 'shareenum':
from pypykatz.smb.shareenum import shareenum
output_type = 'str'
if args.json is True:
output_type = 'json'
if args.tsv is True:
output_type = 'tsv'
exclude_share = []
if args.es is not None:
exclude_share = args.es
exclude_dir = []
if args.ed is not None:
exclude_dir = args.ed
exclude_target = []
if args.et is not None:
exclude_target = args.et
await shareenum(
args.smb_url,
targets = args.target,
smb_worker_count = args.worker_count,
depth = args.depth,
out_file = args.out_file,
progress = args.progress,
max_items = args.maxitems,
dirsd = args.dirsd,
filesd = args.filesd,
output_type = output_type,
max_runtime = args.max_runtime,
exclude_share = exclude_share,
exclude_dir = exclude_dir,
ldap_url = args.ldap,
exclude_target = exclude_target,
)
elif args.smb_module == 'client':
from aiosmb.examples.smbclient import amain
la = SMBCMDArgs()
la.smb_url = args.url
la.verbose = args.verbose
if args.commands is not None and len(args.commands) > 0:
la.commands = []
if args.commands[0] == 'help':
la.commands = ['help']
else:
if args.commands[0] != 'login':
la.commands.append('login')
for command in args.commands:
la.commands.append(command)
await amain(la)
def process_results(self, results, files_with_error, args, file_prefix = ''):
if args.outfile and args.json:
with open(args.outfile+file_prefix, 'w') as f:
json.dump(results, f, cls = UniversalEncoder, indent=4, sort_keys=True)
elif args.outfile and args.grep:
with open(args.outfile+file_prefix, 'w', newline = '') as f:
f.write(':'.join(LogonSession.grep_header) + '\r\n')
for result in results:
for luid in results[result].logon_sessions:
for row in results[result].logon_sessions[luid].to_grep_rows():
f.write(':'.join(row) + '\r\n')
elif args.outfile:
with open(args.outfile+file_prefix, 'w') as f:
for result in results:
f.write('FILE: ======== %s =======\n' % result)
for luid in results[result].logon_sessions:
f.write('\n'+str(results[result].logon_sessions[luid]))
if len(results[result].orphaned_creds) > 0:
f.write('\n== Orphaned credentials ==\n')
for cred in results[result].orphaned_creds:
f.write(str(cred))
if len(files_with_error) > 0:
f.write('\n== Failed to parse these files:\n')
for filename in files_with_error:
f.write('%s\n' % filename)
elif args.json:
print(json.dumps(results, cls = UniversalEncoder, indent=4, sort_keys=True))
elif args.grep:
print(':'.join(LogonSession.grep_header))
for result in results:
for luid in results[result].logon_sessions:
for row in results[result].logon_sessions[luid].to_grep_rows():
print(':'.join(row))
for cred in results[result].orphaned_creds:
t = cred.to_dict()
if t['credtype'] != 'dpapi':
if t['password'] is not None:
x = [str(t['credtype']), str(t['domainname']), str(t['username']), '', '', '', '', '', str(t['password'])]
print(':'.join(x))
else:
t = cred.to_dict()
x = [str(t['credtype']), '', '', '', '', '', str(t['masterkey']), str(t['sha1_masterkey']), str(t['key_guid']), '']
print(':'.join(x))
for pkg, err in results[result].errors:
err_str = str(err) +'\r\n' + '\r\n'.join(traceback.format_tb(err.__traceback__))
err_str = base64.b64encode(err_str.encode()).decode()
x = [pkg+'_exception_please_report', '', '', '', '', '', '', '', '', err_str]
print(':'.join(x) + '\r\n')
else:
for result in results:
print('FILE: ======== %s =======' % result)
if isinstance(results[result], str):
print(results[result])
else:
for luid in results[result].logon_sessions:
print(str(results[result].logon_sessions[luid]))
if len(results[result].orphaned_creds) > 0:
print('== Orphaned credentials ==')
for cred in results[result].orphaned_creds:
print(str(cred))
if len(results[result].errors) > 0:
print('== Errors ==')
for pkg, err in results[result].errors:
err_str = str(err) +'\r\n' + '\r\n'.join(traceback.format_tb(err.__traceback__))
err_str = base64.b64encode(err_str.encode()).decode()
print('%s %s' % (pkg+'_exception_please_report',err_str))
if len(files_with_error) > 0:
print('\n==== Parsing errors:')
for filename in files_with_error:
print(filename)
if args.kerberos_dir:
dir = os.path.abspath(args.kerberos_dir)
logging.info('Writing kerberos tickets to %s' % dir)
for filename in results:
base_filename = ntpath.basename(filename)
ccache_filename = '%s_%s.ccache' % (base_filename, os.urandom(4).hex()) #to avoid collisions
results[filename].kerberos_ccache.to_file(os.path.join(dir, ccache_filename))
for luid in results[filename].logon_sessions:
for kcred in results[filename].logon_sessions[luid].kerberos_creds:
for ticket in kcred.tickets:
ticket.to_kirbi(dir)
for cred in results[filename].orphaned_creds:
if cred.credtype == 'kerberos':
for ticket in cred.tickets:
ticket.to_kirbi(dir)
| 47.830455
| 242
| 0.702672
| 4,199
| 30,468
| 4.944987
| 0.09407
| 0.056685
| 0.051628
| 0.043826
| 0.827345
| 0.783375
| 0.765459
| 0.731795
| 0.726257
| 0.708919
| 0
| 0.007222
| 0.159282
| 30,468
| 637
| 243
| 47.830455
| 0.803396
| 0.002297
| 0
| 0.510848
| 0
| 0.045365
| 0.298303
| 0.019338
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009862
| false
| 0.015779
| 0.063116
| 0
| 0.076923
| 0.057199
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.