hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c448be188fe77732f9991217edb9ad012e025a7
| 20
|
py
|
Python
|
tvlibre/settings/prod.py
|
mjroson/tvlibre
|
b0ac862710c7f22242e2adb29c4fef32d604daf3
|
[
"MIT"
] | null | null | null |
tvlibre/settings/prod.py
|
mjroson/tvlibre
|
b0ac862710c7f22242e2adb29c4fef32d604daf3
|
[
"MIT"
] | 3
|
2016-04-01T20:48:54.000Z
|
2016-04-02T16:06:20.000Z
|
tvlibre/settings/__init__.py
|
mjroson/tvlibre
|
b0ac862710c7f22242e2adb29c4fef32d604daf3
|
[
"MIT"
] | 1
|
2019-05-07T20:34:07.000Z
|
2019-05-07T20:34:07.000Z
|
__author__ = 'docn'
| 10
| 19
| 0.7
|
__author__ = 'docn'
| true
| true
|
1c448c7c722c4c7389efd5f9e82ed4dd6eb3774d
| 538
|
py
|
Python
|
manage.py
|
kirega/stocks
|
11b0d86054bb54feca5a59d40ddd50f11ce216da
|
[
"MIT"
] | 1
|
2020-07-16T08:12:27.000Z
|
2020-07-16T08:12:27.000Z
|
manage.py
|
pankleshwaria/Django-REST-API
|
3844234036e3d6906f0ca8656d559be3dd8bcc95
|
[
"MIT"
] | 6
|
2019-03-19T12:16:29.000Z
|
2020-06-05T20:08:39.000Z
|
manage.py
|
kirega/stocks
|
11b0d86054bb54feca5a59d40ddd50f11ce216da
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stocks.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.625
| 73
| 0.685874
|
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stocks.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
1c448ce9fdfe3ff6f29e0154046191dddead72a0
| 381
|
py
|
Python
|
tests/test_vis.py
|
scott-trinkle/fiberorient
|
306cf2741008eb46a97cfccdcf81e9ec33189a8d
|
[
"MIT"
] | null | null | null |
tests/test_vis.py
|
scott-trinkle/fiberorient
|
306cf2741008eb46a97cfccdcf81e9ec33189a8d
|
[
"MIT"
] | null | null | null |
tests/test_vis.py
|
scott-trinkle/fiberorient
|
306cf2741008eb46a97cfccdcf81e9ec33189a8d
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from context import fiberorient as fo
def test_img_to_dec(img, vectors):
true_dec = np.zeros_like(vectors)
true_dec[..., 0] = fo.util.rescale(img, scale=255).astype(np.uint8)
test_dec = fo.vis.img_to_dec(img, vectors)
assert_array_equal(true_dec, test_dec)
| 29.307692
| 71
| 0.766404
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from context import fiberorient as fo
def test_img_to_dec(img, vectors):
true_dec = np.zeros_like(vectors)
true_dec[..., 0] = fo.util.rescale(img, scale=255).astype(np.uint8)
test_dec = fo.vis.img_to_dec(img, vectors)
assert_array_equal(true_dec, test_dec)
| true
| true
|
1c448d527ba265f27954470348b0ff3bc8772d49
| 25,215
|
py
|
Python
|
ansible/modules/network/nxos/nxos_igmp_interface.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/network/nxos/nxos_igmp_interface.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/network/nxos/nxos_igmp_interface.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_igmp_interface
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages IGMP interface configuration.
description:
- Manages IGMP interface configuration settings.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- When C(state=default), supported params will be reset to a default state.
These include C(version), C(startup_query_interval),
C(startup_query_count), C(robustness), C(querier_timeout), C(query_mrt),
C(query_interval), C(last_member_qrt), C(last_member_query_count),
C(group_timeout), C(report_llg), and C(immediate_leave).
- When C(state=absent), all configs for C(oif_prefix), C(oif_source), and
C(oif_routemap) will be removed.
- PIM must be enabled to use this module.
- This module is for Layer 3 interfaces.
- Route-map check not performed (same as CLI) check when configuring
route-map with 'static-oif'
- If restart is set to true with other params set, the restart will happen
last, i.e. after the configuration takes place.
options:
interface:
description:
- The full interface name for IGMP configuration.
e.g. I(Ethernet1/2).
required: true
version:
description:
- IGMP version. It can be 2 or 3.
required: false
default: null
choices: ['2', '3']
startup_query_interval:
description:
- Query interval used when the IGMP process starts up.
The range is from 1 to 18000. The default is 31.
required: false
default: null
startup_query_count:
description:
- Query count used when the IGMP process starts up.
The range is from 1 to 10. The default is 2.
required: false
default: null
robustness:
description:
- Sets the robustness variable. Values can range from 1 to 7.
The default is 2.
required: false
default: null
querier_timeout:
description:
- Sets the querier timeout that the software uses when deciding
to take over as the querier. Values can range from 1 to 65535
seconds. The default is 255 seconds.
required: false
default: null
query_mrt:
description:
- Sets the response time advertised in IGMP queries.
Values can range from 1 to 25 seconds. The default is 10 seconds.
required: false
default: null
query_interval:
description:
- Sets the frequency at which the software sends IGMP host query
messages. Values can range from 1 to 18000 seconds.
he default is 125 seconds.
required: false
default: null
last_member_qrt:
description:
- Sets the query interval waited after sending membership reports
before the software deletes the group state. Values can range
from 1 to 25 seconds. The default is 1 second.
required: false
default: null
last_member_query_count:
description:
- Sets the number of times that the software sends an IGMP query
in response to a host leave message.
Values can range from 1 to 5. The default is 2.
required: false
default: null
group_timeout:
description:
- Sets the group membership timeout for IGMPv2.
Values can range from 3 to 65,535 seconds.
The default is 260 seconds.
required: false
default: null
report_llg:
description:
- Configures report-link-local-groups.
Enables sending reports for groups in 224.0.0.0/24.
Reports are always sent for nonlink local groups.
By default, reports are not sent for link local groups.
required: false
choices: ['true', 'false']
default: false
immediate_leave:
description:
- Enables the device to remove the group entry from the multicast
routing table immediately upon receiving a leave message for
the group. Use this command to minimize the leave latency of
IGMPv2 group memberships on a given IGMP interface because the
device does not send group-specific queries.
The default is disabled.
required: false
choices: ['true', 'false']
default: false
oif_routemap:
description:
- Configure a routemap for static outgoing interface (OIF).
required: false
default: null
oif_prefix:
description:
- Configure a prefix for static outgoing interface (OIF).
required: false
default: null
oif_source:
description:
- Configure a source for static outgoing interface (OIF).
required: false
default: null
restart:
description:
- Restart IGMP.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present', 'default']
'''
EXAMPLES = '''
- nxos_igmp_interface:
interface: ethernet1/32
startup_query_interval: 30
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"}
existing:
description: k/v pairs of existing BGP configuration
returned: always
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "11.11.11.11", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
end_state:
description: k/v pairs of BGP configuration after module execution
returned: always
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "1.1.1.1", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)[0]
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'loopback' or intf_type == 'svi':
mode = 'layer3'
return mode
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_igmp_interface(module, interface):
command = 'show ip igmp interface {0}'.format(interface)
igmp = {}
key_map = {
'IGMPVersion': 'version',
'ConfiguredStartupQueryInterval': 'startup_query_interval',
'StartupQueryCount': 'startup_query_count',
'RobustnessVariable': 'robustness',
'QuerierTimeout': 'querier_timeout',
'ConfiguredMaxResponseTime': 'query_mrt',
'ConfiguredQueryInterval': 'query_interval',
'LastMemberMTR': 'last_member_qrt',
'LastMemberQueryCount': 'last_member_query_count',
'ConfiguredGroupTimeout': 'group_timeout'
}
body = execute_show_command(command, module)[0]
if body:
resource = body['TABLE_vrf']['ROW_vrf']['TABLE_if']['ROW_if']
igmp = apply_key_map(key_map, resource)
report_llg = str(resource['ReportingForLinkLocal'])
if report_llg == 'true':
igmp['report_llg'] = True
elif report_llg == 'false':
igmp['report_llg'] = False
immediate_leave = str(resource['ImmediateLeave']) # returns en or dis
if immediate_leave == 'en':
igmp['immediate_leave'] = True
elif immediate_leave == 'dis':
igmp['immediate_leave'] = False
# the next block of code is used to retrieve anything with:
# ip igmp static-oif *** i.e.. could be route-map ROUTEMAP
# or PREFIX source <ip>, etc.
command = 'show run interface {0} | inc oif'.format(interface)
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
staticoif = []
if body:
split_body = body.split('\n')
route_map_regex = ('.*ip igmp static-oif route-map\s+'
'(?P<route_map>\S+).*')
prefix_source_regex = ('.*ip igmp static-oif\s+(?P<prefix>'
'((\d+.){3}\d+))(\ssource\s'
'(?P<source>\S+))?.*')
for line in split_body:
temp = {}
try:
match_route_map = re.match(route_map_regex, line, re.DOTALL)
route_map = match_route_map.groupdict()['route_map']
except AttributeError:
route_map = ''
try:
match_prefix_source = re.match(
prefix_source_regex, line, re.DOTALL)
prefix_source_group = match_prefix_source.groupdict()
prefix = prefix_source_group['prefix']
source = prefix_source_group['source']
except AttributeError:
prefix = ''
source = ''
if route_map:
temp['route_map'] = route_map
if prefix:
temp['prefix'] = prefix
if source:
temp['source'] = source
if temp:
staticoif.append(temp)
igmp['oif_routemap'] = None
igmp['oif_prefix_source'] = []
if staticoif:
if len(staticoif) == 1 and staticoif[0].get('route_map'):
igmp['oif_routemap'] = staticoif[0]['route_map']
else:
igmp['oif_prefix_source'] = staticoif
return igmp
def config_igmp_interface(delta, found_both, found_prefix):
CMDS = {
'version': 'ip igmp version {0}',
'startup_query_interval': 'ip igmp startup-query-interval {0}',
'startup_query_count': 'ip igmp startup-query-count {0}',
'robustness': 'ip igmp robustness-variable {0}',
'querier_timeout': 'ip igmp querier-timeout {0}',
'query_mrt': 'ip igmp query-max-response-time {0}',
'query_interval': 'ip igmp query-interval {0}',
'last_member_qrt': 'ip igmp last-member-query-response-time {0}',
'last_member_query_count': 'ip igmp last-member-query-count {0}',
'group_timeout': 'ip igmp group-timeout {0}',
'report_llg': 'ip igmp report-link-local-groups',
'immediate_leave': 'ip igmp immediate-leave',
'oif_prefix_source': 'ip igmp static-oif {0} source {1} ',
'oif_routemap': 'ip igmp static-oif route-map {0}',
'oif_prefix': 'ip igmp static-oif {0}',
}
commands = []
command = None
for key, value in delta.items():
if key == 'oif_source' or found_both or found_prefix:
pass
elif key == 'oif_prefix':
if delta.get('oif_source'):
command = CMDS.get('oif_prefix_source').format(
delta.get('oif_prefix'), delta.get('oif_source'))
else:
command = CMDS.get('oif_prefix').format(
delta.get('oif_prefix'))
elif value:
command = CMDS.get(key).format(value)
elif not value:
command = 'no {0}'.format(CMDS.get(key).format(value))
if command:
if command not in commands:
commands.append(command)
command = None
return commands
def get_igmp_interface_defaults():
version = '2'
startup_query_interval = '31'
startup_query_count = '2'
robustness = '2'
querier_timeout = '255'
query_mrt = '10'
query_interval = '125'
last_member_qrt = '1'
last_member_query_count = '2'
group_timeout = '260'
report_llg = False
immediate_leave = False
args = dict(version=version, startup_query_interval=startup_query_interval,
startup_query_count=startup_query_count, robustness=robustness,
querier_timeout=querier_timeout, query_mrt=query_mrt,
query_interval=query_interval, last_member_qrt=last_member_qrt,
last_member_query_count=last_member_query_count,
group_timeout=group_timeout, report_llg=report_llg,
immediate_leave=immediate_leave)
default = dict((param, value) for (param, value) in args.items()
if value is not None)
return default
def config_default_igmp_interface(existing, delta, found_both, found_prefix):
commands = []
proposed = get_igmp_interface_defaults()
delta = dict(set(proposed.items()).difference(existing.items()))
if delta:
command = config_igmp_interface(delta, found_both, found_prefix)
if command:
for each in command:
commands.append(each)
return commands
def config_remove_oif(existing, existing_oif_prefix_source):
commands = []
command = None
if existing.get('routemap'):
command = 'no ip igmp static-oif route-map {0}'.format(
existing.get('routemap'))
if existing_oif_prefix_source:
for each in existing_oif_prefix_source:
if each.get('prefix') and each.get('source'):
command = 'no ip igmp static-oif {0} source {1} '.format(
each.get('prefix'), each.get('source')
)
elif each.get('prefix'):
command = 'no ip igmp static-oif {0}'.format(
each.get('prefix')
)
if command:
commands.append(command)
command = None
return commands
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
version=dict(required=False, type='str'),
startup_query_interval=dict(required=False, type='str'),
startup_query_count=dict(required=False, type='str'),
robustness=dict(required=False, type='str'),
querier_timeout=dict(required=False, type='str'),
query_mrt=dict(required=False, type='str'),
query_interval=dict(required=False, type='str'),
last_member_qrt=dict(required=False, type='str'),
last_member_query_count=dict(required=False, type='str'),
group_timeout=dict(required=False, type='str'),
report_llg=dict(type='bool'),
immediate_leave=dict(type='bool'),
oif_routemap=dict(required=False, type='str'),
oif_prefix=dict(required=False, type='str'),
oif_source=dict(required=False, type='str'),
restart=dict(type='bool', default=False),
state=dict(choices=['present', 'absent', 'default'],
default='present'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
interface = module.params['interface']
oif_prefix = module.params['oif_prefix']
oif_source = module.params['oif_source']
oif_routemap = module.params['oif_routemap']
if oif_source:
if not oif_prefix:
module.fail_json(msg='oif_prefix required when setting oif_source')
intf_type = get_interface_type(interface)
if get_interface_mode(interface, intf_type, module) == 'layer2':
module.fail_json(msg='this module only works on Layer 3 interfaces')
if oif_prefix and oif_routemap:
module.fail_json(msg='cannot use oif_prefix AND oif_routemap.'
' select one.')
existing = get_igmp_interface(module, interface)
existing_copy = existing.copy()
end_state = existing_copy
if not existing.get('version'):
module.fail_json(msg='pim needs to be enabled on the interface')
existing_oif_prefix_source = existing.get('oif_prefix_source')
# not json serializable
existing.pop('oif_prefix_source')
if oif_routemap and existing_oif_prefix_source:
module.fail_json(msg='Delete static-oif configurations on this '
'interface if you want to use a routemap')
if oif_prefix and existing.get('oif_routemap'):
module.fail_json(msg='Delete static-oif route-map configuration '
'on this interface if you want to config '
'static entries')
args = [
'version',
'startup_query_interval',
'startup_query_count',
'robustness',
'querier_timeout',
'query_mrt',
'query_interval',
'last_member_qrt',
'last_member_query_count',
'group_timeout',
'report_llg',
'immediate_leave',
'oif_routemap',
'oif_prefix',
'oif_source'
]
changed = False
commands = []
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
CANNOT_ABSENT = ['version', 'startup_query_interval',
'startup_query_count', 'robustness', 'querier_timeout',
'query_mrt', 'query_interval', 'last_member_qrt',
'last_member_query_count', 'group_timeout', 'report_llg',
'immediate_leave']
if state == 'absent':
for each in CANNOT_ABSENT:
if each in proposed:
module.fail_json(msg='only params: oif_prefix, oif_source, '
'oif_routemap can be used when '
'state=absent')
# delta check for all params except oif_prefix and oif_source
delta = dict(set(proposed.items()).difference(existing.items()))
# now check to see there is a delta for prefix and source command option
found_both = False
found_prefix = False
if existing_oif_prefix_source:
if oif_prefix and oif_source:
for each in existing_oif_prefix_source:
if (oif_prefix == each.get('prefix') and
oif_source == each.get('source')):
found_both = True
if not found_both:
delta['prefix'] = oif_prefix
delta['source'] = oif_source
elif oif_prefix:
for each in existing_oif_prefix_source:
if oif_prefix == each.get('prefix') and not each.get('source'):
found_prefix = True
if not found_prefix:
delta['prefix'] = oif_prefix
if state == 'present':
if delta:
command = config_igmp_interface(delta, found_both, found_prefix)
if command:
commands.append(command)
elif state == 'default':
command = config_default_igmp_interface(existing, delta,
found_both, found_prefix)
if command:
commands.append(command)
elif state == 'absent':
command = None
if existing.get('oif_routemap') or existing_oif_prefix_source:
command = config_remove_oif(existing, existing_oif_prefix_source)
if command:
commands.append(command)
command = config_default_igmp_interface(existing, delta,
found_both, found_prefix)
if command:
commands.append(command)
if module.params['restart']:
commands.append('restart igmp')
cmds = []
results = {}
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
cmds = flatten_list(commands)
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
changed = True
end_state = get_igmp_interface(module, interface)
if 'configure' in cmds:
cmds.pop(0)
results['proposed'] = proposed
results['existing'] = existing_copy
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| 36.072961
| 79
| 0.608209
|
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_igmp_interface
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages IGMP interface configuration.
description:
- Manages IGMP interface configuration settings.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- When C(state=default), supported params will be reset to a default state.
These include C(version), C(startup_query_interval),
C(startup_query_count), C(robustness), C(querier_timeout), C(query_mrt),
C(query_interval), C(last_member_qrt), C(last_member_query_count),
C(group_timeout), C(report_llg), and C(immediate_leave).
- When C(state=absent), all configs for C(oif_prefix), C(oif_source), and
C(oif_routemap) will be removed.
- PIM must be enabled to use this module.
- This module is for Layer 3 interfaces.
- Route-map check not performed (same as CLI) check when configuring
route-map with 'static-oif'
- If restart is set to true with other params set, the restart will happen
last, i.e. after the configuration takes place.
options:
interface:
description:
- The full interface name for IGMP configuration.
e.g. I(Ethernet1/2).
required: true
version:
description:
- IGMP version. It can be 2 or 3.
required: false
default: null
choices: ['2', '3']
startup_query_interval:
description:
- Query interval used when the IGMP process starts up.
The range is from 1 to 18000. The default is 31.
required: false
default: null
startup_query_count:
description:
- Query count used when the IGMP process starts up.
The range is from 1 to 10. The default is 2.
required: false
default: null
robustness:
description:
- Sets the robustness variable. Values can range from 1 to 7.
The default is 2.
required: false
default: null
querier_timeout:
description:
- Sets the querier timeout that the software uses when deciding
to take over as the querier. Values can range from 1 to 65535
seconds. The default is 255 seconds.
required: false
default: null
query_mrt:
description:
- Sets the response time advertised in IGMP queries.
Values can range from 1 to 25 seconds. The default is 10 seconds.
required: false
default: null
query_interval:
description:
- Sets the frequency at which the software sends IGMP host query
messages. Values can range from 1 to 18000 seconds.
he default is 125 seconds.
required: false
default: null
last_member_qrt:
description:
- Sets the query interval waited after sending membership reports
before the software deletes the group state. Values can range
from 1 to 25 seconds. The default is 1 second.
required: false
default: null
last_member_query_count:
description:
- Sets the number of times that the software sends an IGMP query
in response to a host leave message.
Values can range from 1 to 5. The default is 2.
required: false
default: null
group_timeout:
description:
- Sets the group membership timeout for IGMPv2.
Values can range from 3 to 65,535 seconds.
The default is 260 seconds.
required: false
default: null
report_llg:
description:
- Configures report-link-local-groups.
Enables sending reports for groups in 224.0.0.0/24.
Reports are always sent for nonlink local groups.
By default, reports are not sent for link local groups.
required: false
choices: ['true', 'false']
default: false
immediate_leave:
description:
- Enables the device to remove the group entry from the multicast
routing table immediately upon receiving a leave message for
the group. Use this command to minimize the leave latency of
IGMPv2 group memberships on a given IGMP interface because the
device does not send group-specific queries.
The default is disabled.
required: false
choices: ['true', 'false']
default: false
oif_routemap:
description:
- Configure a routemap for static outgoing interface (OIF).
required: false
default: null
oif_prefix:
description:
- Configure a prefix for static outgoing interface (OIF).
required: false
default: null
oif_source:
description:
- Configure a source for static outgoing interface (OIF).
required: false
default: null
restart:
description:
- Restart IGMP.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present', 'default']
'''
EXAMPLES = '''
- nxos_igmp_interface:
interface: ethernet1/32
startup_query_interval: 30
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"}
existing:
description: k/v pairs of existing BGP configuration
returned: always
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "11.11.11.11", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
end_state:
description: k/v pairs of BGP configuration after module execution
returned: always
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "1.1.1.1", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)[0]
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'loopback' or intf_type == 'svi':
mode = 'layer3'
return mode
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_igmp_interface(module, interface):
command = 'show ip igmp interface {0}'.format(interface)
igmp = {}
key_map = {
'IGMPVersion': 'version',
'ConfiguredStartupQueryInterval': 'startup_query_interval',
'StartupQueryCount': 'startup_query_count',
'RobustnessVariable': 'robustness',
'QuerierTimeout': 'querier_timeout',
'ConfiguredMaxResponseTime': 'query_mrt',
'ConfiguredQueryInterval': 'query_interval',
'LastMemberMTR': 'last_member_qrt',
'LastMemberQueryCount': 'last_member_query_count',
'ConfiguredGroupTimeout': 'group_timeout'
}
body = execute_show_command(command, module)[0]
if body:
resource = body['TABLE_vrf']['ROW_vrf']['TABLE_if']['ROW_if']
igmp = apply_key_map(key_map, resource)
report_llg = str(resource['ReportingForLinkLocal'])
if report_llg == 'true':
igmp['report_llg'] = True
elif report_llg == 'false':
igmp['report_llg'] = False
immediate_leave = str(resource['ImmediateLeave'])
if immediate_leave == 'en':
igmp['immediate_leave'] = True
elif immediate_leave == 'dis':
igmp['immediate_leave'] = False
command = 'show run interface {0} | inc oif'.format(interface)
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
staticoif = []
if body:
split_body = body.split('\n')
route_map_regex = ('.*ip igmp static-oif route-map\s+'
'(?P<route_map>\S+).*')
prefix_source_regex = ('.*ip igmp static-oif\s+(?P<prefix>'
'((\d+.){3}\d+))(\ssource\s'
'(?P<source>\S+))?.*')
for line in split_body:
temp = {}
try:
match_route_map = re.match(route_map_regex, line, re.DOTALL)
route_map = match_route_map.groupdict()['route_map']
except AttributeError:
route_map = ''
try:
match_prefix_source = re.match(
prefix_source_regex, line, re.DOTALL)
prefix_source_group = match_prefix_source.groupdict()
prefix = prefix_source_group['prefix']
source = prefix_source_group['source']
except AttributeError:
prefix = ''
source = ''
if route_map:
temp['route_map'] = route_map
if prefix:
temp['prefix'] = prefix
if source:
temp['source'] = source
if temp:
staticoif.append(temp)
igmp['oif_routemap'] = None
igmp['oif_prefix_source'] = []
if staticoif:
if len(staticoif) == 1 and staticoif[0].get('route_map'):
igmp['oif_routemap'] = staticoif[0]['route_map']
else:
igmp['oif_prefix_source'] = staticoif
return igmp
def config_igmp_interface(delta, found_both, found_prefix):
CMDS = {
'version': 'ip igmp version {0}',
'startup_query_interval': 'ip igmp startup-query-interval {0}',
'startup_query_count': 'ip igmp startup-query-count {0}',
'robustness': 'ip igmp robustness-variable {0}',
'querier_timeout': 'ip igmp querier-timeout {0}',
'query_mrt': 'ip igmp query-max-response-time {0}',
'query_interval': 'ip igmp query-interval {0}',
'last_member_qrt': 'ip igmp last-member-query-response-time {0}',
'last_member_query_count': 'ip igmp last-member-query-count {0}',
'group_timeout': 'ip igmp group-timeout {0}',
'report_llg': 'ip igmp report-link-local-groups',
'immediate_leave': 'ip igmp immediate-leave',
'oif_prefix_source': 'ip igmp static-oif {0} source {1} ',
'oif_routemap': 'ip igmp static-oif route-map {0}',
'oif_prefix': 'ip igmp static-oif {0}',
}
commands = []
command = None
for key, value in delta.items():
if key == 'oif_source' or found_both or found_prefix:
pass
elif key == 'oif_prefix':
if delta.get('oif_source'):
command = CMDS.get('oif_prefix_source').format(
delta.get('oif_prefix'), delta.get('oif_source'))
else:
command = CMDS.get('oif_prefix').format(
delta.get('oif_prefix'))
elif value:
command = CMDS.get(key).format(value)
elif not value:
command = 'no {0}'.format(CMDS.get(key).format(value))
if command:
if command not in commands:
commands.append(command)
command = None
return commands
def get_igmp_interface_defaults():
version = '2'
startup_query_interval = '31'
startup_query_count = '2'
robustness = '2'
querier_timeout = '255'
query_mrt = '10'
query_interval = '125'
last_member_qrt = '1'
last_member_query_count = '2'
group_timeout = '260'
report_llg = False
immediate_leave = False
args = dict(version=version, startup_query_interval=startup_query_interval,
startup_query_count=startup_query_count, robustness=robustness,
querier_timeout=querier_timeout, query_mrt=query_mrt,
query_interval=query_interval, last_member_qrt=last_member_qrt,
last_member_query_count=last_member_query_count,
group_timeout=group_timeout, report_llg=report_llg,
immediate_leave=immediate_leave)
default = dict((param, value) for (param, value) in args.items()
if value is not None)
return default
def config_default_igmp_interface(existing, delta, found_both, found_prefix):
commands = []
proposed = get_igmp_interface_defaults()
delta = dict(set(proposed.items()).difference(existing.items()))
if delta:
command = config_igmp_interface(delta, found_both, found_prefix)
if command:
for each in command:
commands.append(each)
return commands
def config_remove_oif(existing, existing_oif_prefix_source):
commands = []
command = None
if existing.get('routemap'):
command = 'no ip igmp static-oif route-map {0}'.format(
existing.get('routemap'))
if existing_oif_prefix_source:
for each in existing_oif_prefix_source:
if each.get('prefix') and each.get('source'):
command = 'no ip igmp static-oif {0} source {1} '.format(
each.get('prefix'), each.get('source')
)
elif each.get('prefix'):
command = 'no ip igmp static-oif {0}'.format(
each.get('prefix')
)
if command:
commands.append(command)
command = None
return commands
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
version=dict(required=False, type='str'),
startup_query_interval=dict(required=False, type='str'),
startup_query_count=dict(required=False, type='str'),
robustness=dict(required=False, type='str'),
querier_timeout=dict(required=False, type='str'),
query_mrt=dict(required=False, type='str'),
query_interval=dict(required=False, type='str'),
last_member_qrt=dict(required=False, type='str'),
last_member_query_count=dict(required=False, type='str'),
group_timeout=dict(required=False, type='str'),
report_llg=dict(type='bool'),
immediate_leave=dict(type='bool'),
oif_routemap=dict(required=False, type='str'),
oif_prefix=dict(required=False, type='str'),
oif_source=dict(required=False, type='str'),
restart=dict(type='bool', default=False),
state=dict(choices=['present', 'absent', 'default'],
default='present'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
interface = module.params['interface']
oif_prefix = module.params['oif_prefix']
oif_source = module.params['oif_source']
oif_routemap = module.params['oif_routemap']
if oif_source:
if not oif_prefix:
module.fail_json(msg='oif_prefix required when setting oif_source')
intf_type = get_interface_type(interface)
if get_interface_mode(interface, intf_type, module) == 'layer2':
module.fail_json(msg='this module only works on Layer 3 interfaces')
if oif_prefix and oif_routemap:
module.fail_json(msg='cannot use oif_prefix AND oif_routemap.'
' select one.')
existing = get_igmp_interface(module, interface)
existing_copy = existing.copy()
end_state = existing_copy
if not existing.get('version'):
module.fail_json(msg='pim needs to be enabled on the interface')
existing_oif_prefix_source = existing.get('oif_prefix_source')
existing.pop('oif_prefix_source')
if oif_routemap and existing_oif_prefix_source:
module.fail_json(msg='Delete static-oif configurations on this '
'interface if you want to use a routemap')
if oif_prefix and existing.get('oif_routemap'):
module.fail_json(msg='Delete static-oif route-map configuration '
'on this interface if you want to config '
'static entries')
args = [
'version',
'startup_query_interval',
'startup_query_count',
'robustness',
'querier_timeout',
'query_mrt',
'query_interval',
'last_member_qrt',
'last_member_query_count',
'group_timeout',
'report_llg',
'immediate_leave',
'oif_routemap',
'oif_prefix',
'oif_source'
]
changed = False
commands = []
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
CANNOT_ABSENT = ['version', 'startup_query_interval',
'startup_query_count', 'robustness', 'querier_timeout',
'query_mrt', 'query_interval', 'last_member_qrt',
'last_member_query_count', 'group_timeout', 'report_llg',
'immediate_leave']
if state == 'absent':
for each in CANNOT_ABSENT:
if each in proposed:
module.fail_json(msg='only params: oif_prefix, oif_source, '
'oif_routemap can be used when '
'state=absent')
delta = dict(set(proposed.items()).difference(existing.items()))
found_both = False
found_prefix = False
if existing_oif_prefix_source:
if oif_prefix and oif_source:
for each in existing_oif_prefix_source:
if (oif_prefix == each.get('prefix') and
oif_source == each.get('source')):
found_both = True
if not found_both:
delta['prefix'] = oif_prefix
delta['source'] = oif_source
elif oif_prefix:
for each in existing_oif_prefix_source:
if oif_prefix == each.get('prefix') and not each.get('source'):
found_prefix = True
if not found_prefix:
delta['prefix'] = oif_prefix
if state == 'present':
if delta:
command = config_igmp_interface(delta, found_both, found_prefix)
if command:
commands.append(command)
elif state == 'default':
command = config_default_igmp_interface(existing, delta,
found_both, found_prefix)
if command:
commands.append(command)
elif state == 'absent':
command = None
if existing.get('oif_routemap') or existing_oif_prefix_source:
command = config_remove_oif(existing, existing_oif_prefix_source)
if command:
commands.append(command)
command = config_default_igmp_interface(existing, delta,
found_both, found_prefix)
if command:
commands.append(command)
if module.params['restart']:
commands.append('restart igmp')
cmds = []
results = {}
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
cmds = flatten_list(commands)
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
changed = True
end_state = get_igmp_interface(module, interface)
if 'configure' in cmds:
cmds.pop(0)
results['proposed'] = proposed
results['existing'] = existing_copy
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| true
| true
|
1c448dbcb82a77112b5b4abec69896d7f3d2a467
| 664
|
py
|
Python
|
tests/pymath/test_expanded_form.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 3
|
2017-05-02T10:28:13.000Z
|
2019-02-06T09:10:11.000Z
|
tests/pymath/test_expanded_form.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 2
|
2017-06-21T20:39:14.000Z
|
2020-02-25T10:28:57.000Z
|
tests/pymath/test_expanded_form.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 2
|
2016-07-29T04:35:22.000Z
|
2017-01-18T17:05:36.000Z
|
import unittest
from pymath.expanded_form import expanded_form, expanded_form_2
class ExpandedFormTests(unittest.TestCase):
def test_12(self):
self.assertEqual("10 + 2", expanded_form(12))
def test_42(self):
self.assertEqual('40 + 2', expanded_form(42))
def test_70304(self):
self.assertEqual('70000 + 300 + 4', expanded_form(70304))
def test_12_expanded_2(self):
self.assertEqual("10 + 2", expanded_form_2(12))
def test_42_expanded_2(self):
self.assertEqual('40 + 2', expanded_form_2(42))
def test_70304_expanded_2(self):
self.assertEqual('70000 + 300 + 4', expanded_form_2(70304))
| 26.56
| 67
| 0.683735
|
import unittest
from pymath.expanded_form import expanded_form, expanded_form_2
class ExpandedFormTests(unittest.TestCase):
def test_12(self):
self.assertEqual("10 + 2", expanded_form(12))
def test_42(self):
self.assertEqual('40 + 2', expanded_form(42))
def test_70304(self):
self.assertEqual('70000 + 300 + 4', expanded_form(70304))
def test_12_expanded_2(self):
self.assertEqual("10 + 2", expanded_form_2(12))
def test_42_expanded_2(self):
self.assertEqual('40 + 2', expanded_form_2(42))
def test_70304_expanded_2(self):
self.assertEqual('70000 + 300 + 4', expanded_form_2(70304))
| true
| true
|
1c448e3b094d67eacf7c5e088b00bbf10ceaeef8
| 479
|
py
|
Python
|
scatterplot.py
|
daithimarkham/pands-project
|
f3d6dcb82fda1db851a3d78571a9d4a48f908eba
|
[
"Apache-2.0"
] | null | null | null |
scatterplot.py
|
daithimarkham/pands-project
|
f3d6dcb82fda1db851a3d78571a9d4a48f908eba
|
[
"Apache-2.0"
] | null | null | null |
scatterplot.py
|
daithimarkham/pands-project
|
f3d6dcb82fda1db851a3d78571a9d4a48f908eba
|
[
"Apache-2.0"
] | 1
|
2021-01-24T01:21:57.000Z
|
2021-01-24T01:21:57.000Z
|
# David Markham
# Fisher Iris Data set
# Use a Multivariate scatter-plot to distinguish the relationship between the flowers.
# Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
from pandas.plotting import scatter_matrix
# Load dataset
data = ("iris.csv")
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'species']
dataset = pd.read_csv(data, header = 0)
scatter_matrix(dataset)
plt.show()
| 21.772727
| 86
| 0.753653
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
from pandas.plotting import scatter_matrix
data = ("iris.csv")
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'species']
dataset = pd.read_csv(data, header = 0)
scatter_matrix(dataset)
plt.show()
| true
| true
|
1c448fbc518db24521418d3b53a5690ab14edbbd
| 583
|
py
|
Python
|
docassemble/setup.py
|
ttamg/docassemble
|
1429fbbddfeb60b9f8fe74c928a479236d6a6113
|
[
"MIT"
] | 1
|
2020-06-01T15:46:11.000Z
|
2020-06-01T15:46:11.000Z
|
docassemble/setup.py
|
ttamg/docassemble
|
1429fbbddfeb60b9f8fe74c928a479236d6a6113
|
[
"MIT"
] | null | null | null |
docassemble/setup.py
|
ttamg/docassemble
|
1429fbbddfeb60b9f8fe74c928a479236d6a6113
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='docassemble',
version='1.1.15',
python_requires='>=3.5',
description=('The namespace package for the docassemble system.'),
long_description=read("README.md"),
long_description_content_type='text/markdown',
author='Jonathan Pyle',
author_email='jhpyle@gmail.com',
license='MIT',
url='https://docassemble.org',
packages=find_packages(),
zip_safe = False,
)
| 29.15
| 72
| 0.665523
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='docassemble',
version='1.1.15',
python_requires='>=3.5',
description=('The namespace package for the docassemble system.'),
long_description=read("README.md"),
long_description_content_type='text/markdown',
author='Jonathan Pyle',
author_email='jhpyle@gmail.com',
license='MIT',
url='https://docassemble.org',
packages=find_packages(),
zip_safe = False,
)
| true
| true
|
1c4490d3bc0bfdfa9fc91679f196a39d1ed17257
| 81,315
|
py
|
Python
|
python/pyspark/tests.py
|
bopopescu/wso2-spark
|
6982456ded39a8fef0ad26600218f8f575aac2a5
|
[
"Apache-2.0",
"MIT"
] | 11
|
2016-05-26T12:06:38.000Z
|
2020-07-06T20:37:07.000Z
|
python/pyspark/tests.py
|
bopopescu/wso2-spark
|
6982456ded39a8fef0ad26600218f8f575aac2a5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
python/pyspark/tests.py
|
bopopescu/wso2-spark
|
6982456ded39a8fef0ad26600218f8f575aac2a5
|
[
"Apache-2.0",
"MIT"
] | 9
|
2016-07-29T01:13:50.000Z
|
2020-07-23T16:16:17.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda x_y: (x_y[0], [x_y[1]]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,512]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
| 40.902918
| 99
| 0.59465
|
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
pass
try:
import numpy as np
_have_numpy = True
except:
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda x_y: (x_y[0], [x_y[1]]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1)
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count()
time.sleep(1)
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_save_as_textfile_with_unicode(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_deleting_input_files(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
from collections import defaultdict
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,512]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
| true
| true
|
1c44913b32f366f87742a6a4c2126b81e0bf5d8f
| 1,632
|
py
|
Python
|
Module2/Day17/module2_day17_userInput.py
|
sydneybeal/100DaysPython
|
d1b004bd27a0644983f3af100172f394ee039f30
|
[
"MIT"
] | 2
|
2019-06-02T12:17:18.000Z
|
2019-07-12T16:55:55.000Z
|
Module2/Day17/module2_day17_userInput.py
|
sydneybeal/100DaysPython
|
d1b004bd27a0644983f3af100172f394ee039f30
|
[
"MIT"
] | null | null | null |
Module2/Day17/module2_day17_userInput.py
|
sydneybeal/100DaysPython
|
d1b004bd27a0644983f3af100172f394ee039f30
|
[
"MIT"
] | null | null | null |
"""
Author: <REPLACE>
Project: 100DaysPython
File: module1_day16_userInput.py
Creation Date: <REPLACE>
Description: <REPLACE>
"""
# print(input())
# print(input("How many questions will you get asked?"))
name = input("What is your name?")
print(name)
resp = input("Do you approach the bridge keeper? (y/n)")
if "y" in resp.lower():
print("Those who approach the Bridge of Death must answer me these questions three. There the other side he see.")
print(input("How do you respond?"))
name = input("What is you name?")
print(name)
quest = input("What is you quest?")
print(quest)
answer = input("What is the airspeed velocity of an unladen swallow?")
if "african" in answer.lower() or "european" in answer.lower():
print("I...I don't know that!")
print("The bridge keeper is hurtled into the pit and you are free to cross.")
elif "i don't know" in answer.lower():
print("{}, you are hurled into the pit and your quest to {} has come to an end.".format(name.capitalize(), quest))
elif "wait" in answer.lower():
print("{}, you are hurled into the pit and your quest to {} has come to an end.".format(name.capitalize(), quest))
else:
print("'{}' was a good enough answer. You cross the bridge and continue your quest to {}.".format(answer, quest))
elif "n" in resp.lower():
print("Just like Sir Robin, you soiled your armor you were so scared. You leave the Bridge of Death, defeated by your own cowardice.")
else:
print("For being unable to provide a Yes or No, you are hurled into the pit and your quest is over.")
| 45.333333
| 137
| 0.664216
|
name = input("What is your name?")
print(name)
resp = input("Do you approach the bridge keeper? (y/n)")
if "y" in resp.lower():
print("Those who approach the Bridge of Death must answer me these questions three. There the other side he see.")
print(input("How do you respond?"))
name = input("What is you name?")
print(name)
quest = input("What is you quest?")
print(quest)
answer = input("What is the airspeed velocity of an unladen swallow?")
if "african" in answer.lower() or "european" in answer.lower():
print("I...I don't know that!")
print("The bridge keeper is hurtled into the pit and you are free to cross.")
elif "i don't know" in answer.lower():
print("{}, you are hurled into the pit and your quest to {} has come to an end.".format(name.capitalize(), quest))
elif "wait" in answer.lower():
print("{}, you are hurled into the pit and your quest to {} has come to an end.".format(name.capitalize(), quest))
else:
print("'{}' was a good enough answer. You cross the bridge and continue your quest to {}.".format(answer, quest))
elif "n" in resp.lower():
print("Just like Sir Robin, you soiled your armor you were so scared. You leave the Bridge of Death, defeated by your own cowardice.")
else:
print("For being unable to provide a Yes or No, you are hurled into the pit and your quest is over.")
| true
| true
|
1c44914fc1c38f634cff5b91eb98ea2a6b442953
| 1,029
|
py
|
Python
|
day-13/problem.py
|
mkemp/aoc-2021
|
03573a0e865ff86324245896e26260b14650d2ba
|
[
"MIT"
] | 1
|
2021-12-04T15:18:56.000Z
|
2021-12-04T15:18:56.000Z
|
day-13/problem.py
|
mkemp/aoc-2021
|
03573a0e865ff86324245896e26260b14650d2ba
|
[
"MIT"
] | null | null | null |
day-13/problem.py
|
mkemp/aoc-2021
|
03573a0e865ff86324245896e26260b14650d2ba
|
[
"MIT"
] | null | null | null |
with open('input') as f:
coordinates, creases = f.read().strip().split('\n\n')
dots = [tuple(map(int, line.split(','))) for line in coordinates.split('\n')]
folds = [(line[11:12], int(line[13:])) for line in creases.split('\n')]
def do_fold(dots, x_or_y, at):
new_dots = set()
if x_or_y == 'x':
for x, y in dots:
if x > at:
new_dots.add((at - (x - at), y))
else:
new_dots.add((x, y))
else:
for x, y in dots:
if y > at:
new_dots.add((x, at - (y - at)))
else:
new_dots.add((x, y))
return new_dots
def print_dots(dots):
for y in range(max(y for x, y in dots) + 1):
text = ''
for x in range(max(x for x, y in dots) + 1):
text += '#' if (x, y) in dots else '.'
print(text)
# Part 1
d = set(dots)
print(len(do_fold(d, *folds[0])))
# 671
# Part 2
d = set(dots)
for fold in folds:
d = do_fold(d, *fold)
print_dots(d)
# PCPHARKL
| 22.866667
| 81
| 0.489796
|
with open('input') as f:
coordinates, creases = f.read().strip().split('\n\n')
dots = [tuple(map(int, line.split(','))) for line in coordinates.split('\n')]
folds = [(line[11:12], int(line[13:])) for line in creases.split('\n')]
def do_fold(dots, x_or_y, at):
new_dots = set()
if x_or_y == 'x':
for x, y in dots:
if x > at:
new_dots.add((at - (x - at), y))
else:
new_dots.add((x, y))
else:
for x, y in dots:
if y > at:
new_dots.add((x, at - (y - at)))
else:
new_dots.add((x, y))
return new_dots
def print_dots(dots):
for y in range(max(y for x, y in dots) + 1):
text = ''
for x in range(max(x for x, y in dots) + 1):
text += '#' if (x, y) in dots else '.'
print(text)
d = set(dots)
print(len(do_fold(d, *folds[0])))
d = set(dots)
for fold in folds:
d = do_fold(d, *fold)
print_dots(d)
| true
| true
|
1c44930d11306e39af7a6ffbd3814e73e7a26fb9
| 521
|
py
|
Python
|
Famcy/_elements_/span/span.py
|
nexuni/Famcy
|
80f8f18fe1614ab3c203ca3466b9506b494470bf
|
[
"Apache-2.0"
] | null | null | null |
Famcy/_elements_/span/span.py
|
nexuni/Famcy
|
80f8f18fe1614ab3c203ca3466b9506b494470bf
|
[
"Apache-2.0"
] | 12
|
2022-02-05T04:56:44.000Z
|
2022-03-30T09:59:26.000Z
|
Famcy/_elements_/span/span.py
|
nexuni/Famcy
|
80f8f18fe1614ab3c203ca3466b9506b494470bf
|
[
"Apache-2.0"
] | null | null | null |
import Famcy
import json
class span(Famcy.FamcyElement):
def __init__(self):
super(span, self).__init__()
def render_element(self):
html = ""
if self.innerHTML and self.innerHTML != "":
html += self.innerHTML
self.children = []
else:
for child in self.children:
html += child.render_inner()
child.parentElement = self
self.html = html
return "<span" + self.setAttrTag() + ">" + html + "</span>"
| 27.421053
| 67
| 0.539347
|
import Famcy
import json
class span(Famcy.FamcyElement):
def __init__(self):
super(span, self).__init__()
def render_element(self):
html = ""
if self.innerHTML and self.innerHTML != "":
html += self.innerHTML
self.children = []
else:
for child in self.children:
html += child.render_inner()
child.parentElement = self
self.html = html
return "<span" + self.setAttrTag() + ">" + html + "</span>"
| true
| true
|
1c4497e7978fbbf7db672a1f8b3185fdccee9f54
| 1,473
|
py
|
Python
|
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ns/__init__.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ns/__init__.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/ns/__init__.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ['nsacl', 'nsacl6', 'nsacls', 'nsacls6', 'nsappflowcollector', 'nsappflowparam', 'nsaptlicense', 'nsassignment', 'nscapacity', 'nscentralmanagementserver', 'nsconfig', 'nsconnectiontable', 'nsconsoleloginprompt', 'nsdhcpip', 'nsdhcpparams', 'nsdiameter', 'nsencryptionparams', 'nsevents', 'nsextension', 'nsextension_binding', 'nsextension_extensionfunction_binding', 'nsfeature', 'nshardware', 'nshostname', 'nshttpparam', 'nshttpprofile', 'nsip', 'nsip6', 'nslicense', 'nslicenseproxyserver', 'nslicenseserver', 'nslicenseserverpool', 'nslimitidentifier', 'nslimitidentifier_binding', 'nslimitidentifier_nslimitsessions_binding', 'nslimitselector', 'nslimitsessions', 'nsmode', 'nsparam', 'nspartition', 'nspartition_binding', 'nspartition_bridgegroup_binding', 'nspartition_vlan_binding', 'nspbr', 'nspbr6', 'nspbrs', 'nsratecontrol', 'nsrollbackcmd', 'nsrpcnode', 'nsrunningconfig', 'nssavedconfig', 'nsservicefunction', 'nsservicepath', 'nsservicepath_binding', 'nsservicepath_nsservicefunction_binding', 'nssimpleacl', 'nssimpleacl6', 'nssourceroutecachetable', 'nsspparams', 'nsstats', 'nssurgeq', 'nstcpbufparam', 'nstcpparam', 'nstcpprofile', 'nstimeout', 'nstimer', 'nstimer_autoscalepolicy_binding', 'nstimer_binding', 'nstrafficdomain', 'nstrafficdomain_binding', 'nstrafficdomain_bridgegroup_binding', 'nstrafficdomain_vlan_binding', 'nstrafficdomain_vxlan_binding', 'nsvariable', 'nsversion', 'nsweblogparam', 'nsxmlnamespace', 'reboot', 'shutdown']
| 1,473
| 1,473
| 0.783435
|
__all__ = ['nsacl', 'nsacl6', 'nsacls', 'nsacls6', 'nsappflowcollector', 'nsappflowparam', 'nsaptlicense', 'nsassignment', 'nscapacity', 'nscentralmanagementserver', 'nsconfig', 'nsconnectiontable', 'nsconsoleloginprompt', 'nsdhcpip', 'nsdhcpparams', 'nsdiameter', 'nsencryptionparams', 'nsevents', 'nsextension', 'nsextension_binding', 'nsextension_extensionfunction_binding', 'nsfeature', 'nshardware', 'nshostname', 'nshttpparam', 'nshttpprofile', 'nsip', 'nsip6', 'nslicense', 'nslicenseproxyserver', 'nslicenseserver', 'nslicenseserverpool', 'nslimitidentifier', 'nslimitidentifier_binding', 'nslimitidentifier_nslimitsessions_binding', 'nslimitselector', 'nslimitsessions', 'nsmode', 'nsparam', 'nspartition', 'nspartition_binding', 'nspartition_bridgegroup_binding', 'nspartition_vlan_binding', 'nspbr', 'nspbr6', 'nspbrs', 'nsratecontrol', 'nsrollbackcmd', 'nsrpcnode', 'nsrunningconfig', 'nssavedconfig', 'nsservicefunction', 'nsservicepath', 'nsservicepath_binding', 'nsservicepath_nsservicefunction_binding', 'nssimpleacl', 'nssimpleacl6', 'nssourceroutecachetable', 'nsspparams', 'nsstats', 'nssurgeq', 'nstcpbufparam', 'nstcpparam', 'nstcpprofile', 'nstimeout', 'nstimer', 'nstimer_autoscalepolicy_binding', 'nstimer_binding', 'nstrafficdomain', 'nstrafficdomain_binding', 'nstrafficdomain_bridgegroup_binding', 'nstrafficdomain_vlan_binding', 'nstrafficdomain_vxlan_binding', 'nsvariable', 'nsversion', 'nsweblogparam', 'nsxmlnamespace', 'reboot', 'shutdown']
| true
| true
|
1c4498665b8aae85e1fcc79ae9ad664e8e4f74fe
| 2,454
|
py
|
Python
|
script/map_design_layer.py
|
matteli/histemul
|
61f1ea8e1263b92fd2bead0c808f67940faad802
|
[
"BSD-2-Clause"
] | 1
|
2019-07-05T09:40:50.000Z
|
2019-07-05T09:40:50.000Z
|
script/map_design_layer.py
|
matteli/histemul
|
61f1ea8e1263b92fd2bead0c808f67940faad802
|
[
"BSD-2-Clause"
] | null | null | null |
script/map_design_layer.py
|
matteli/histemul
|
61f1ea8e1263b92fd2bead0c808f67940faad802
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
'''
Copyright (c) 2012-2015, Matthieu Nué
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
'''
import shlex, subprocess
#from PIL import Image, ImageFilter
'''img = Image.open('worldid.png')
img = img.filter(ImageFilter.CONTOUR)
img.save('color.png')
img = img.convert('L')
mask = img.point(lambda i: 0 if (i == 0) else 255)
img.save('dd.png')
mask.save('worldedge.png')'''
print("1/5")
#p = subprocess.Popen(shlex.split("convert worldid.png -morphology Convolve Laplacian:0 -threshold 0.1 -morphology Edge Octagon:1 -negate worldedge.png"))
p = subprocess.Popen(shlex.split("convert worldid.png -morphology Edge Disk:1.0 -threshold 0.1 -negate worldedge.png"))
p.wait()
print("2/5")
p = subprocess.Popen(shlex.split("convert worldedge.png -blur 0x2 -shade 115x30 worldtemp1.png"))
p.wait()
print("3/5")
p = subprocess.Popen(shlex.split("convert worldedge.png -blur 0x20 -level 65%,100% worldtemp2.png"))
p.wait()
print("4/5")
p = subprocess.Popen(shlex.split("composite -compose multiply worldtemp1.png worldtemp2.png worldshading.png"))
p.wait()
print("5/5")
p = subprocess.Popen(shlex.split("convert worldid.png -morphology Edge Disk:3.0 -threshold 0.1 worldborder.png"))
p.wait()
print("Finish")
| 40.9
| 154
| 0.765689
|
import shlex, subprocess
print("1/5")
p = subprocess.Popen(shlex.split("convert worldid.png -morphology Edge Disk:1.0 -threshold 0.1 -negate worldedge.png"))
p.wait()
print("2/5")
p = subprocess.Popen(shlex.split("convert worldedge.png -blur 0x2 -shade 115x30 worldtemp1.png"))
p.wait()
print("3/5")
p = subprocess.Popen(shlex.split("convert worldedge.png -blur 0x20 -level 65%,100% worldtemp2.png"))
p.wait()
print("4/5")
p = subprocess.Popen(shlex.split("composite -compose multiply worldtemp1.png worldtemp2.png worldshading.png"))
p.wait()
print("5/5")
p = subprocess.Popen(shlex.split("convert worldid.png -morphology Edge Disk:3.0 -threshold 0.1 worldborder.png"))
p.wait()
print("Finish")
| true
| true
|
1c449b22c9baf8b37615b192cc1ac043d84ca6f0
| 4,960
|
py
|
Python
|
arviz/plots/energyplot.py
|
aseyboldt/arviz
|
1fb40ff442f5ba4b8d11ceeaef27e6c339eb1685
|
[
"Apache-2.0"
] | null | null | null |
arviz/plots/energyplot.py
|
aseyboldt/arviz
|
1fb40ff442f5ba4b8d11ceeaef27e6c339eb1685
|
[
"Apache-2.0"
] | null | null | null |
arviz/plots/energyplot.py
|
aseyboldt/arviz
|
1fb40ff442f5ba4b8d11ceeaef27e6c339eb1685
|
[
"Apache-2.0"
] | null | null | null |
"""Plot energy transition distribution in HMC inference."""
from itertools import cycle
from matplotlib.pyplot import rcParams
import numpy as np
from ..data import convert_to_dataset
from .plot_utils import _scale_fig_size, get_plotting_function
def plot_energy(
data,
kind="kde",
bfmi=True,
figsize=None,
legend=True,
fill_alpha=(1, 0.75),
fill_color=("C0", "C5"),
bw=4.5,
textsize=None,
fill_kwargs=None,
plot_kwargs=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""Plot energy transition distribution and marginal energy distribution in HMC algorithms.
This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.
Parameters
----------
data : xarray dataset, or object that can be converted (must represent
`sample_stats` and have an `energy` variable)
kind : str
Type of plot to display {"kde", "histogram")
bfmi : bool
If True add to the plot the value of the estimated Bayesian fraction of missing information
figsize : tuple
Figure size. If None it will be defined automatically.
legend : bool
Flag for plotting legend (defaults to True)
fill_alpha : tuple of floats
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to (1, .75)
fill_color : tuple of valid matplotlib color
Color for Marginal energy distribution and Energy transition distribution.
Defaults to ('C0', 'C5')
bw : float
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy). Only works if `kind='kde'`
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
fill_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` (to control the shade)
plot_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist')
ax: axes, optional
Matplotlib axes or bokeh figures.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
Plot a default energy plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_energy(data)
Represent energy plot via histograms
.. plot::
:context: close-figs
>>> az.plot_energy(data, kind='hist')
"""
energy = convert_to_dataset(data, group="sample_stats").energy.values
if fill_kwargs is None:
fill_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)
_colors = [
prop for _, prop in zip(range(10), cycle(rcParams["axes.prop_cycle"].by_key()["color"]))
]
if (fill_color[0].startswith("C") and len(fill_color[0]) == 2) and (
fill_color[1].startswith("C") and len(fill_color[1]) == 2
):
fill_color = tuple([_colors[int(color[1:]) % 10] for color in fill_color])
elif fill_color[0].startswith("C") and len(fill_color[0]) == 2:
fill_color = tuple([_colors[int(fill_color[0][1:]) % 10]] + list(fill_color[1:]))
elif fill_color[1].startswith("C") and len(fill_color[1]) == 2:
fill_color = tuple(list(fill_color[1:]) + [_colors[int(fill_color[0][1:]) % 10]])
series = zip(
fill_alpha,
fill_color,
("Marginal Energy", "Energy transition"),
(energy - energy.mean(), np.diff(energy)),
)
plot_energy_kwargs = dict(
ax=ax,
series=series,
energy=energy,
kind=kind,
bfmi=bfmi,
figsize=figsize,
xt_labelsize=xt_labelsize,
linewidth=linewidth,
fill_kwargs=fill_kwargs,
plot_kwargs=plot_kwargs,
bw=bw,
legend=legend,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
plot_energy_kwargs.pop("xt_labelsize")
plot_energy_kwargs["line_width"] = plot_energy_kwargs.pop("linewidth")
if kind in {"hist", "histogram"}:
plot_energy_kwargs["legend"] = False
# TODO: Add backend kwargs
plot = get_plotting_function("plot_energy", "energyplot", backend)
ax = plot(**plot_energy_kwargs)
return ax
| 33.066667
| 99
| 0.644355
|
from itertools import cycle
from matplotlib.pyplot import rcParams
import numpy as np
from ..data import convert_to_dataset
from .plot_utils import _scale_fig_size, get_plotting_function
def plot_energy(
data,
kind="kde",
bfmi=True,
figsize=None,
legend=True,
fill_alpha=(1, 0.75),
fill_color=("C0", "C5"),
bw=4.5,
textsize=None,
fill_kwargs=None,
plot_kwargs=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
energy = convert_to_dataset(data, group="sample_stats").energy.values
if fill_kwargs is None:
fill_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)
_colors = [
prop for _, prop in zip(range(10), cycle(rcParams["axes.prop_cycle"].by_key()["color"]))
]
if (fill_color[0].startswith("C") and len(fill_color[0]) == 2) and (
fill_color[1].startswith("C") and len(fill_color[1]) == 2
):
fill_color = tuple([_colors[int(color[1:]) % 10] for color in fill_color])
elif fill_color[0].startswith("C") and len(fill_color[0]) == 2:
fill_color = tuple([_colors[int(fill_color[0][1:]) % 10]] + list(fill_color[1:]))
elif fill_color[1].startswith("C") and len(fill_color[1]) == 2:
fill_color = tuple(list(fill_color[1:]) + [_colors[int(fill_color[0][1:]) % 10]])
series = zip(
fill_alpha,
fill_color,
("Marginal Energy", "Energy transition"),
(energy - energy.mean(), np.diff(energy)),
)
plot_energy_kwargs = dict(
ax=ax,
series=series,
energy=energy,
kind=kind,
bfmi=bfmi,
figsize=figsize,
xt_labelsize=xt_labelsize,
linewidth=linewidth,
fill_kwargs=fill_kwargs,
plot_kwargs=plot_kwargs,
bw=bw,
legend=legend,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
plot_energy_kwargs.pop("xt_labelsize")
plot_energy_kwargs["line_width"] = plot_energy_kwargs.pop("linewidth")
if kind in {"hist", "histogram"}:
plot_energy_kwargs["legend"] = False
plot = get_plotting_function("plot_energy", "energyplot", backend)
ax = plot(**plot_energy_kwargs)
return ax
| true
| true
|
1c449b7027e666e7274fd83f44ad36f1462257cb
| 7,573
|
py
|
Python
|
addons/house_location/models/house_location.py
|
nathanbangwa243/house-location
|
fa38203b2c92dd97f253fc3b4354af228f1b0338
|
[
"MIT"
] | 1
|
2021-11-17T18:49:44.000Z
|
2021-11-17T18:49:44.000Z
|
addons/house_location/models/house_location.py
|
nathanbangwa243/house-location
|
fa38203b2c92dd97f253fc3b4354af228f1b0338
|
[
"MIT"
] | null | null | null |
addons/house_location/models/house_location.py
|
nathanbangwa243/house-location
|
fa38203b2c92dd97f253fc3b4354af228f1b0338
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# odoo imports
from odoo import models
from odoo import fields
from odoo import api
from odoo import exceptions
# others imports
import datetime
class HouseLocation(models.Model):
_name = 'house.location'
_description = 'House Location'
name = fields.Char(string="Title", required=True, help="",)
description = fields.Text(string="Description", help="",)
postcode = fields.Char(string="Postcode", help="",)
# the default availability date is in 3 months
date_availability = fields.Date(string="Available From", default=fields.Date.today(
) + datetime.timedelta(days=90), help="",)
expected_price = fields.Float(
string="Expected Price", required=True, help="",)
selling_price = fields.Float(
string="Selling Price", readonly=True, help="",)
bedrooms = fields.Integer(string="Bedrooms", default=2, help="",)
living_area = fields.Integer(
string="Living Area (sqm)", default=0, help="",)
facades = fields.Integer(string="Facades", help="",)
garage = fields.Boolean(string="Garage", help="",)
garden = fields.Boolean(string="Garden", help="",)
garden_area = fields.Integer(
string="Garden Area (sqm)", default=0, help="",)
garden_orientation = fields.Selection(
string="Garden Orientation",
selection=[
("north", "North"),
("south", "South"),
("east", "East"),
("west", "West"),
],
help="",
)
active = fields.Boolean(string="Active", default=True, help="",)
state = fields.Selection(
string="Status",
selection=[
("new", "New"),
("offer received", "Offer Received"),
("offer accepted", "Offer Accepted"),
("sold", "Sold"),
("canceled", "Canceled"),
],
default='new',
readonly=True,
)
# computed fields
total_area = fields.Integer(
string="Total Area", compute="_compute_total_area", help="")
best_price = fields.Float(
string="Best Offer", compute="_compute_best_offer", default=0, readonly=True, help="")
# Model Link
# property tags
property_type_id = fields.Many2one(
"house.location.type", string="Property Type",)
# property tags
tag_ids = fields.Many2many("house.location.tag", string="Property Tag",)
# users and partners
salesperson = fields.Many2one(
"res.users", string="Salesman", default=lambda self: self.env.user, copy=False, help="")
buyer = fields.Many2one("res.partner", string="Buyer", help="",)
# offers
offer_ids = fields.One2many(
"house.location.offer", "property_id", string="Offers")
# psql constraints : warning -> ne fonctionne pas
_sql_constraints = [
# ("check_positive_expected_price", "expected_price >= 0", "expected_price must be positive"),
]
# Python constraints
@api.constrains('expected_price')
def _check_expected_price(self):
"""check expected_price constraint
Raises:
exceptions.ValidationError: negative value
"""
for record in self:
if (record.expected_price < 0):
raise exceptions.ValidationError(
"Expected Price must be positive")
@api.constrains('selling_price')
def _check_selling_price(self):
"""check selling_price constraint
Raises:
exceptions.ValidationError: negative value
"""
for record in self:
if (record.selling_price < 0):
raise exceptions.ValidationError(
"Selling Price must be positive")
@api.constrains('bedrooms')
def _check_bedrooms(self):
"""check bedrooms constraint
Raises:
exceptions.ValidationError: negative value
"""
for record in self:
if (record.bedrooms < 1):
raise exceptions.ValidationError(
"Bedrooms must be positive and greater than 1")
@api.constrains('living_area')
def _check_living_area(self):
"""check living_area constraint
Raises:
exceptions.ValidationError: negative value
"""
for record in self:
if (record.living_area < 0):
raise exceptions.ValidationError(
"Living Area must be positive")
@api.constrains('facades')
def _check_facades(self):
"""check facades constraint
Raises:
exceptions.ValidationError: negative value
"""
for record in self:
if (record.facades < 0):
raise exceptions.ValidationError("facades must be positive")
@api.constrains('garden_area')
def _check_garden_area(self):
"""check garden_area constraint
Raises:
exceptions.ValidationError: negative value
"""
for record in self:
if (record.garden_area < 0):
raise exceptions.ValidationError(
"Garden Area must be positive")
# crud actions
@api.model
def unlink(self):
"""when user attempt to delete a property
Raises:
exceptions.UserError: only 'new' and 'canceled' property can be deleted
Returns:
HouseLocation: a new instance of HouseLocation
"""
if self.state != "new" and self.state != 'canceled':
raise exceptions.UserError(
"only 'new' and 'canceled' property can be deleted")
else:
return super(HouseLocation, self).create()
# computed function
@api.depends('living_area', 'garden_area')
def _compute_total_area(self):
"""when living_area or garden_area changes, update total_area
"""
for record in self:
record.total_area = int(record.living_area) + \
int(record.garden_area)
@api.depends('offer_ids')
def _compute_best_offer(self):
"""when an new offer is added, update the best offer
"""
for record in self:
# update best_price
for offer in record.offer_ids:
record.best_price = offer.price if offer.price > record.best_price else record.best_price
# assign best_price when offer_ids is empty
record.best_price = record.best_price
# onchanges functions
@api.onchange("garden")
def _onchange_garden(self):
"""Onchange garden, set garden_area to 10 and garden_orientation to 'north'
"""
if self.garden:
self.garden_area = 10
self.garden_orientation = 'north'
else:
self.garden_area = 0
self.garden_orientation = None
# buttons actions
def sold_property(self):
"""Sold property
Raises:
exceptions.UserError: canceled property can't be sold
"""
for record in self:
if record.state == 'canceled':
raise exceptions.UserError("Canceled property can't be sold")
else:
record.state = 'sold'
def cancel_property(self):
"""Cancel property
Raises:
exceptions.UserError: sold property can't be canceled
"""
for record in self:
if record.state == 'sold':
raise exceptions.UserError("Sold property can't be canceled")
else:
record.state = 'canceled'
| 28.469925
| 105
| 0.589727
|
from odoo import models
from odoo import fields
from odoo import api
from odoo import exceptions
import datetime
class HouseLocation(models.Model):
_name = 'house.location'
_description = 'House Location'
name = fields.Char(string="Title", required=True, help="",)
description = fields.Text(string="Description", help="",)
postcode = fields.Char(string="Postcode", help="",)
date_availability = fields.Date(string="Available From", default=fields.Date.today(
) + datetime.timedelta(days=90), help="",)
expected_price = fields.Float(
string="Expected Price", required=True, help="",)
selling_price = fields.Float(
string="Selling Price", readonly=True, help="",)
bedrooms = fields.Integer(string="Bedrooms", default=2, help="",)
living_area = fields.Integer(
string="Living Area (sqm)", default=0, help="",)
facades = fields.Integer(string="Facades", help="",)
garage = fields.Boolean(string="Garage", help="",)
garden = fields.Boolean(string="Garden", help="",)
garden_area = fields.Integer(
string="Garden Area (sqm)", default=0, help="",)
garden_orientation = fields.Selection(
string="Garden Orientation",
selection=[
("north", "North"),
("south", "South"),
("east", "East"),
("west", "West"),
],
help="",
)
active = fields.Boolean(string="Active", default=True, help="",)
state = fields.Selection(
string="Status",
selection=[
("new", "New"),
("offer received", "Offer Received"),
("offer accepted", "Offer Accepted"),
("sold", "Sold"),
("canceled", "Canceled"),
],
default='new',
readonly=True,
)
total_area = fields.Integer(
string="Total Area", compute="_compute_total_area", help="")
best_price = fields.Float(
string="Best Offer", compute="_compute_best_offer", default=0, readonly=True, help="")
property_type_id = fields.Many2one(
"house.location.type", string="Property Type",)
tag_ids = fields.Many2many("house.location.tag", string="Property Tag",)
salesperson = fields.Many2one(
"res.users", string="Salesman", default=lambda self: self.env.user, copy=False, help="")
buyer = fields.Many2one("res.partner", string="Buyer", help="",)
offer_ids = fields.One2many(
"house.location.offer", "property_id", string="Offers")
_sql_constraints = [
]
@api.constrains('expected_price')
def _check_expected_price(self):
for record in self:
if (record.expected_price < 0):
raise exceptions.ValidationError(
"Expected Price must be positive")
@api.constrains('selling_price')
def _check_selling_price(self):
for record in self:
if (record.selling_price < 0):
raise exceptions.ValidationError(
"Selling Price must be positive")
@api.constrains('bedrooms')
def _check_bedrooms(self):
for record in self:
if (record.bedrooms < 1):
raise exceptions.ValidationError(
"Bedrooms must be positive and greater than 1")
@api.constrains('living_area')
def _check_living_area(self):
for record in self:
if (record.living_area < 0):
raise exceptions.ValidationError(
"Living Area must be positive")
@api.constrains('facades')
def _check_facades(self):
for record in self:
if (record.facades < 0):
raise exceptions.ValidationError("facades must be positive")
@api.constrains('garden_area')
def _check_garden_area(self):
for record in self:
if (record.garden_area < 0):
raise exceptions.ValidationError(
"Garden Area must be positive")
@api.model
def unlink(self):
if self.state != "new" and self.state != 'canceled':
raise exceptions.UserError(
"only 'new' and 'canceled' property can be deleted")
else:
return super(HouseLocation, self).create()
@api.depends('living_area', 'garden_area')
def _compute_total_area(self):
for record in self:
record.total_area = int(record.living_area) + \
int(record.garden_area)
@api.depends('offer_ids')
def _compute_best_offer(self):
for record in self:
for offer in record.offer_ids:
record.best_price = offer.price if offer.price > record.best_price else record.best_price
record.best_price = record.best_price
@api.onchange("garden")
def _onchange_garden(self):
if self.garden:
self.garden_area = 10
self.garden_orientation = 'north'
else:
self.garden_area = 0
self.garden_orientation = None
def sold_property(self):
for record in self:
if record.state == 'canceled':
raise exceptions.UserError("Canceled property can't be sold")
else:
record.state = 'sold'
def cancel_property(self):
for record in self:
if record.state == 'sold':
raise exceptions.UserError("Sold property can't be canceled")
else:
record.state = 'canceled'
| true
| true
|
1c449c41e70e7280767ce5dd935f25659bbe4897
| 2,825
|
py
|
Python
|
salt/modules/scsi.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 19
|
2016-01-29T14:37:52.000Z
|
2022-03-30T18:08:01.000Z
|
salt/modules/scsi.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 223
|
2016-03-02T16:39:41.000Z
|
2022-03-03T12:26:35.000Z
|
salt/modules/scsi.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 64
|
2016-02-04T19:45:26.000Z
|
2021-12-15T02:02:31.000Z
|
# -*- coding: utf-8 -*-
"""
SCSI administration module
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os.path
import salt.utils.path
log = logging.getLogger(__name__)
__func_alias__ = {"ls_": "ls"}
def ls_(get_size=True):
"""
List SCSI devices, with details
CLI Examples:
.. code-block:: bash
salt '*' scsi.ls
salt '*' scsi.ls get_size=False
get_size : True
Get the size information for scsi devices. This option
should be set to False for older OS distributions (RHEL6 and older)
due to lack of support for the '-s' option in lsscsi.
.. versionadded:: 2015.5.10
"""
if not salt.utils.path.which("lsscsi"):
__context__["retcode"] = 1
return "scsi.ls not available - lsscsi command not found"
if get_size:
cmd = "lsscsi -dLsv"
else:
cmd = "lsscsi -dLv"
ret = {}
res = __salt__["cmd.run_all"](cmd)
rc = res.get("retcode", -1)
if rc != 0:
__context__["retcode"] = rc
error = res.get("stderr", "").split("\n")[0]
if error == "lsscsi: invalid option -- 's'":
return "{0} - try get_size=False".format(error)
return res.get("stderr", "").split("\n")[0]
data = res.get("stdout", "")
for line in data.splitlines():
if line.startswith("["):
size = None
major = None
minor = None
comps = line.strip().split()
key = comps[0]
if get_size:
size = comps.pop()
majmin = comps.pop()
if majmin.startswith("["):
major, minor = majmin.replace("[", "").replace("]", "").split(":")
device = comps.pop()
model = " ".join(comps[3:])
ret[key] = {
"lun": key.replace("[", "").replace("]", ""),
"size": size,
"major": major,
"minor": minor,
"device": device,
"model": model,
}
elif line.startswith(" "):
if line.strip().startswith("dir"):
comps = line.strip().split()
ret[key]["dir"] = [comps[1], comps[2].replace("[", "").replace("]", "")]
else:
comps = line.strip().split("=")
ret[key][comps[0]] = comps[1]
return ret
def rescan_all(host):
"""
List scsi devices
CLI Example:
.. code-block:: bash
salt '*' scsi.rescan_all 0
"""
if os.path.isdir("/sys/class/scsi_host/host{0}".format(host)):
cmd = 'echo "- - -" > /sys/class/scsi_host/host{0}/scan'.format(host)
else:
return "Host {0} does not exist".format(host)
return __salt__["cmd.run"](cmd).splitlines()
| 27.427184
| 88
| 0.513274
|
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os.path
import salt.utils.path
log = logging.getLogger(__name__)
__func_alias__ = {"ls_": "ls"}
def ls_(get_size=True):
if not salt.utils.path.which("lsscsi"):
__context__["retcode"] = 1
return "scsi.ls not available - lsscsi command not found"
if get_size:
cmd = "lsscsi -dLsv"
else:
cmd = "lsscsi -dLv"
ret = {}
res = __salt__["cmd.run_all"](cmd)
rc = res.get("retcode", -1)
if rc != 0:
__context__["retcode"] = rc
error = res.get("stderr", "").split("\n")[0]
if error == "lsscsi: invalid option -- 's'":
return "{0} - try get_size=False".format(error)
return res.get("stderr", "").split("\n")[0]
data = res.get("stdout", "")
for line in data.splitlines():
if line.startswith("["):
size = None
major = None
minor = None
comps = line.strip().split()
key = comps[0]
if get_size:
size = comps.pop()
majmin = comps.pop()
if majmin.startswith("["):
major, minor = majmin.replace("[", "").replace("]", "").split(":")
device = comps.pop()
model = " ".join(comps[3:])
ret[key] = {
"lun": key.replace("[", "").replace("]", ""),
"size": size,
"major": major,
"minor": minor,
"device": device,
"model": model,
}
elif line.startswith(" "):
if line.strip().startswith("dir"):
comps = line.strip().split()
ret[key]["dir"] = [comps[1], comps[2].replace("[", "").replace("]", "")]
else:
comps = line.strip().split("=")
ret[key][comps[0]] = comps[1]
return ret
def rescan_all(host):
if os.path.isdir("/sys/class/scsi_host/host{0}".format(host)):
cmd = 'echo "- - -" > /sys/class/scsi_host/host{0}/scan'.format(host)
else:
return "Host {0} does not exist".format(host)
return __salt__["cmd.run"](cmd).splitlines()
| true
| true
|
1c449cfee90b6a41e6c2ac62cbebd76da108bf37
| 241
|
py
|
Python
|
approxhaynet/runsinglecellLFP.py
|
ModelDBRepository/237469
|
15f71106b4f99577ee503178aaedbf2781ec61f6
|
[
"CC-BY-4.0"
] | null | null | null |
approxhaynet/runsinglecellLFP.py
|
ModelDBRepository/237469
|
15f71106b4f99577ee503178aaedbf2781ec61f6
|
[
"CC-BY-4.0"
] | null | null | null |
approxhaynet/runsinglecellLFP.py
|
ModelDBRepository/237469
|
15f71106b4f99577ee503178aaedbf2781ec61f6
|
[
"CC-BY-4.0"
] | null | null | null |
import simseedburst_func_withLFP
data = simseedburst_func_withLFP.simseedburst_func(Nmc=1, tstop=11000,mutID=0,rdSeed=1,Econ=0.00039,Icon=0.0006,nseg=5,rateCoeff=1.0,gNoiseCoeff=1.07,gSynCoeff=1.07,Ncells2save=1,sparsedt=1.0,Nsyns2save=1)
| 48.2
| 205
| 0.821577
|
import simseedburst_func_withLFP
data = simseedburst_func_withLFP.simseedburst_func(Nmc=1, tstop=11000,mutID=0,rdSeed=1,Econ=0.00039,Icon=0.0006,nseg=5,rateCoeff=1.0,gNoiseCoeff=1.07,gSynCoeff=1.07,Ncells2save=1,sparsedt=1.0,Nsyns2save=1)
| true
| true
|
1c449dd14a675bb11a5d0b4e04f1b1f2dd18faf5
| 1,083
|
py
|
Python
|
poptimizer/data/adapters/gateways/tests/test_cbr.py
|
poliyev/poptimizer
|
71935c4365b0572e65b6d3172f925701dda283db
|
[
"Unlicense"
] | 94
|
2018-12-04T13:14:16.000Z
|
2022-03-31T17:53:11.000Z
|
poptimizer/data/adapters/gateways/tests/test_cbr.py
|
poliyev/poptimizer
|
71935c4365b0572e65b6d3172f925701dda283db
|
[
"Unlicense"
] | 55
|
2019-11-25T21:18:50.000Z
|
2022-02-16T07:06:50.000Z
|
poptimizer/data/adapters/gateways/tests/test_cbr.py
|
poliyev/poptimizer
|
71935c4365b0572e65b6d3172f925701dda283db
|
[
"Unlicense"
] | 25
|
2019-05-14T19:04:09.000Z
|
2022-03-21T05:22:28.000Z
|
"""Тесты загрузки данных о максимальных ставках депозитов с сайта ЦБР."""
from datetime import datetime
import pandas as pd
import pytest
from poptimizer.data.adapters.gateways import cbr
from poptimizer.data.adapters.html import parser
from poptimizer.shared import col
def test_date_parser():
"""Проверка обработки разных декад в датах."""
assert cbr.date_parser("III.05.2021") == datetime(2021, 5, 21)
assert cbr.date_parser("II.04.2021") == datetime(2021, 4, 11)
assert cbr.date_parser("I.03.2021") == datetime(2021, 3, 1)
assert cbr.date_parser("IV.03.2021") is None
DF = pd.DataFrame(
[[4.1], [3.9]],
index=["2020-01-20", "2014-11-25"],
columns=[col.RF],
)
DF_REZ = pd.DataFrame(
[[0.039], [0.041]],
index=["2014-11-25", "2020-01-20"],
columns=[col.RF],
)
@pytest.mark.asyncio
async def test_loader(mocker):
"""Сортировка полученных данных и перевод в проценты."""
mocker.patch.object(parser, "get_df_from_url", return_value=DF)
loader = cbr.RFGateway()
pd.testing.assert_frame_equal(await loader(), DF_REZ)
| 27.769231
| 73
| 0.689751
|
from datetime import datetime
import pandas as pd
import pytest
from poptimizer.data.adapters.gateways import cbr
from poptimizer.data.adapters.html import parser
from poptimizer.shared import col
def test_date_parser():
assert cbr.date_parser("III.05.2021") == datetime(2021, 5, 21)
assert cbr.date_parser("II.04.2021") == datetime(2021, 4, 11)
assert cbr.date_parser("I.03.2021") == datetime(2021, 3, 1)
assert cbr.date_parser("IV.03.2021") is None
DF = pd.DataFrame(
[[4.1], [3.9]],
index=["2020-01-20", "2014-11-25"],
columns=[col.RF],
)
DF_REZ = pd.DataFrame(
[[0.039], [0.041]],
index=["2014-11-25", "2020-01-20"],
columns=[col.RF],
)
@pytest.mark.asyncio
async def test_loader(mocker):
mocker.patch.object(parser, "get_df_from_url", return_value=DF)
loader = cbr.RFGateway()
pd.testing.assert_frame_equal(await loader(), DF_REZ)
| true
| true
|
1c449e1964a8cbe044f07acdc9c0bc3aa308f42c
| 2,810
|
py
|
Python
|
setup.py
|
NarekA/olmos
|
740c1ec6351d96bcea4969ab87afdfb4686efbaf
|
[
"MIT"
] | null | null | null |
setup.py
|
NarekA/olmos
|
740c1ec6351d96bcea4969ab87afdfb4686efbaf
|
[
"MIT"
] | null | null | null |
setup.py
|
NarekA/olmos
|
740c1ec6351d96bcea4969ab87afdfb4686efbaf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: setup.py
.. moduleauthor:: NarekA <my_email>
This file is used to create the package we'll publish to PyPI.
"""
import importlib.util
import os
from pathlib import Path
from setuptools import setup, find_packages, Command
from codecs import open # Use a consistent encoding.
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Get the base version from the library. (We'll find it in the `version.py`
# file in the src directory, but we'll bypass actually loading up the library.)
vspec = importlib.util.spec_from_file_location(
"version",
str(Path(__file__).resolve().parent / 'olmos' / "version.py")
)
vmod = importlib.util.module_from_spec(vspec)
vspec.loader.exec_module(vmod)
version = getattr(vmod, '__version__')
# If the environment has a build number set...
if os.getenv('buildnum') is not None:
# ...append it to the version.
version = "{version}.{buildnum}".format(
version=version,
buildnum=os.getenv('buildnum')
)
setup(
name='olmos',
description="A tool to help you Stand Up and Deliver",
long_description=long_description,
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
version=version,
install_requires=[
# Include your dependencies here.
# Here are a couple of examples...
# 'numpy>=1.13.3,<2',
# 'measurement>=1.8.0,<2'
'click>=7.0,<8'
],
entry_points="""
[console_scripts]
olmos=olmos.cli:cli
""",
python_requires=">=0.0.1",
license='MIT',
author='NarekA',
author_email='my_email',
# Use the URL to the github repo.
url='https://github.com/NarekA/olmos',
download_url=(
f'https://github.com/NarekA/'
f'olmos/archive/{version}.tar.gz'
),
keywords=[
# Add package keywords here.
],
# See https://PyPI.python.org/PyPI?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for.
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
# Pick your license as you wish (should match "license" above).
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.7',
],
include_package_data=True
)
| 29.893617
| 79
| 0.649466
|
import importlib.util
import os
from pathlib import Path
from setuptools import setup, find_packages, Command
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# file in the src directory, but we'll bypass actually loading up the library.)
vspec = importlib.util.spec_from_file_location(
"version",
str(Path(__file__).resolve().parent / 'olmos' / "version.py")
)
vmod = importlib.util.module_from_spec(vspec)
vspec.loader.exec_module(vmod)
version = getattr(vmod, '__version__')
if os.getenv('buildnum') is not None:
version = "{version}.{buildnum}".format(
version=version,
buildnum=os.getenv('buildnum')
)
setup(
name='olmos',
description="A tool to help you Stand Up and Deliver",
long_description=long_description,
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
version=version,
install_requires=[
'click>=7.0,<8'
],
entry_points="""
[console_scripts]
olmos=olmos.cli:cli
""",
python_requires=">=0.0.1",
license='MIT',
author='NarekA',
author_email='my_email',
url='https://github.com/NarekA/olmos',
download_url=(
f'https://github.com/NarekA/'
f'olmos/archive/{version}.tar.gz'
),
keywords=[
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
include_package_data=True
)
| true
| true
|
1c449e4c0f9b6812e2ad6eb27f9fcdb6e97545d0
| 9,541
|
py
|
Python
|
Gabarito FlappyBird.py
|
eduardomoraespy/flappy_bird_with_neural-network
|
d394b9a5dea6eef684ed7a0179cfe8272c1e3bdc
|
[
"MIT"
] | null | null | null |
Gabarito FlappyBird.py
|
eduardomoraespy/flappy_bird_with_neural-network
|
d394b9a5dea6eef684ed7a0179cfe8272c1e3bdc
|
[
"MIT"
] | null | null | null |
Gabarito FlappyBird.py
|
eduardomoraespy/flappy_bird_with_neural-network
|
d394b9a5dea6eef684ed7a0179cfe8272c1e3bdc
|
[
"MIT"
] | null | null | null |
import pygame
import os
import random
import neat
ai_jogando = True
geracao = 0
TELA_LARGURA = 500
TELA_ALTURA = 700
IMAGEM_CANO = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'pipe.png')))
IMAGEM_CHAO = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'base.png')))
IMAGEM_BACKGROUND = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bg.png')))
IMAGENS_PASSARO = [
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird1.png'))),
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird2.png'))),
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird3.png'))),
]
pygame.font.init()
FONTE_PONTOS = pygame.font.SysFont('arial', 50)
pygame.mixer.init()
pygame.mixer.music.load('flappy_bird_music.mp3')
pygame.mixer.music.play()
pygame.event.wait()
class Passaro:
IMGS = IMAGENS_PASSARO
# animações da rotação
ROTACAO_MAXIMA = 25
VELOCIDADE_ROTACAO = 20
TEMPO_ANIMACAO = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.angulo = 0
self.velocidade = 0
self.altura = self.y
self.tempo = 0
self.contagem_imagem = 0
self.imagem = self.IMGS[0]
def pular(self):
self.velocidade = -10.5
self.tempo = 0
self.altura = self.y
def mover(self):
# calcular o deslocamento
self.tempo += 1
deslocamento = 1.5 * (self.tempo**2) + self.velocidade * self.tempo
# restringir o deslocamento
if deslocamento > 16:
deslocamento = 16
elif deslocamento < 0:
deslocamento -= 2
self.y += deslocamento
# o angulo do passaro
if deslocamento < 0 or self.y < (self.altura + 50):
if self.angulo < self.ROTACAO_MAXIMA:
self.angulo = self.ROTACAO_MAXIMA
else:
if self.angulo > -90:
self.angulo -= self.VELOCIDADE_ROTACAO
def desenhar(self, tela):
# definir qual imagem do passaro vai usar
self.contagem_imagem += 1
if self.contagem_imagem < self.TEMPO_ANIMACAO:
self.imagem = self.IMGS[0]
elif self.contagem_imagem < self.TEMPO_ANIMACAO*2:
self.imagem = self.IMGS[1]
elif self.contagem_imagem < self.TEMPO_ANIMACAO*3:
self.imagem = self.IMGS[2]
elif self.contagem_imagem < self.TEMPO_ANIMACAO*4:
self.imagem = self.IMGS[1]
elif self.contagem_imagem >= self.TEMPO_ANIMACAO*4 + 1:
self.imagem = self.IMGS[0]
self.contagem_imagem = 0
# se o passaro tiver caindo eu não vou bater asa
if self.angulo <= -80:
self.imagem = self.IMGS[1]
self.contagem_imagem = self.TEMPO_ANIMACAO*2
# desenhar a imagem
imagem_rotacionada = pygame.transform.rotate(self.imagem, self.angulo)
pos_centro_imagem = self.imagem.get_rect(topleft=(self.x, self.y)).center
retangulo = imagem_rotacionada.get_rect(center=pos_centro_imagem)
tela.blit(imagem_rotacionada, retangulo.topleft)
def get_mask(self):
return pygame.mask.from_surface(self.imagem)
class Cano:
DISTANCIA = 200
VELOCIDADE = 5
def __init__(self, x):
self.x = x
self.altura = 0
self.pos_topo = 0
self.pos_base = 0
self.CANO_TOPO = pygame.transform.flip(IMAGEM_CANO, False, True)
self.CANO_BASE = IMAGEM_CANO
self.passou = False
self.definir_altura()
def definir_altura(self):
self.altura = random.randrange(50, 450)
self.pos_topo = self.altura - self.CANO_TOPO.get_height()
self.pos_base = self.altura + self.DISTANCIA
def mover(self):
self.x -= self.VELOCIDADE
def desenhar(self, tela):
tela.blit(self.CANO_TOPO, (self.x, self.pos_topo))
tela.blit(self.CANO_BASE, (self.x, self.pos_base))
def colidir(self, passaro):
passaro_mask = passaro.get_mask()
topo_mask = pygame.mask.from_surface(self.CANO_TOPO)
base_mask = pygame.mask.from_surface(self.CANO_BASE)
distancia_topo = (self.x - passaro.x, self.pos_topo - round(passaro.y))
distancia_base = (self.x - passaro.x, self.pos_base - round(passaro.y))
topo_ponto = passaro_mask.overlap(topo_mask, distancia_topo)
base_ponto = passaro_mask.overlap(base_mask, distancia_base)
if base_ponto or topo_ponto:
return True
else:
return False
class Chao:
VELOCIDADE = 5
LARGURA = IMAGEM_CHAO.get_width()
IMAGEM = IMAGEM_CHAO
def __init__(self, y):
self.y = y
self.x1 = 0
self.x2 = self.LARGURA
def mover(self):
self.x1 -= self.VELOCIDADE
self.x2 -= self.VELOCIDADE
if self.x1 + self.LARGURA < 0:
self.x1 = self.x2 + self.LARGURA
if self.x2 + self.LARGURA < 0:
self.x2 = self.x1 + self.LARGURA
def desenhar(self, tela):
tela.blit(self.IMAGEM, (self.x1, self.y))
tela.blit(self.IMAGEM, (self.x2, self.y))
def desenhar_tela(tela, passaros, canos, chao, pontos):
tela.blit(IMAGEM_BACKGROUND, (0, 0))
for passaro in passaros:
passaro.desenhar(tela)
for cano in canos:
cano.desenhar(tela)
texto = FONTE_PONTOS.render(f"Pontuação: {pontos}", 1, (255, 255, 255))
tela.blit(texto, (TELA_LARGURA - 10 - texto.get_width(), 10))
if ai_jogando:
texto = FONTE_PONTOS.render(f"Geração: {geracao}", 1, (255, 255, 255))
tela.blit(texto, (10, 10))
chao.desenhar(tela)
pygame.display.update()
def main(genomas, config): # fitness function
global geracao
geracao += 1
if ai_jogando:
redes = []
lista_genomas = []
passaros = []
for _, genoma in genomas:
rede = neat.nn.FeedForwardNetwork.create(genoma, config)
redes.append(rede)
genoma.fitness = 0
lista_genomas.append(genoma)
passaros.append(Passaro(230, 350))
else:
passaros = [Passaro(230, 350)]
chao = Chao(730)
canos = [Cano(700)]
tela = pygame.display.set_mode((TELA_LARGURA, TELA_ALTURA))
pontos = 0
relogio = pygame.time.Clock()
rodando = True
while rodando:
relogio.tick(30)
# interação com o usuário
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
rodando = False
pygame.quit()
quit()
if not ai_jogando:
if evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_SPACE:
for passaro in passaros:
passaro.pular()
indice_cano = 0
if len(passaros) > 0:
if len(canos) > 1 and passaros[0].x > (canos[0].x + canos[0].CANO_TOPO.get_width()):
indice_cano = 1
else:
rodando = False
break
# mover as coisas
for i, passaro in enumerate(passaros):
passaro.mover()
# aumentar um pouquinho a fitness do passaro
lista_genomas[i].fitness += 0.1
output = redes[i].activate((passaro.y,
abs(passaro.y - canos[indice_cano].altura),
abs(passaro.y - canos[indice_cano].pos_base)))
# -1 e 1 -> se o output for > 0.5 então o passaro pula
if output[0] > 0.5:
passaro.pular()
chao.mover()
adicionar_cano = False
remover_canos = []
for cano in canos:
for i, passaro in enumerate(passaros):
if cano.colidir(passaro):
passaros.pop(i)
if ai_jogando:
lista_genomas[i].fitness -= 1
lista_genomas.pop(i)
redes.pop(i)
if not cano.passou and passaro.x > cano.x:
cano.passou = True
adicionar_cano = True
cano.mover()
if cano.x + cano.CANO_TOPO.get_width() < 0:
remover_canos.append(cano)
if adicionar_cano:
pontos += 1
canos.append(Cano(600))
for genoma in lista_genomas:
genoma.fitness += 5
for cano in remover_canos:
canos.remove(cano)
for i, passaro in enumerate(passaros):
if (passaro.y + passaro.imagem.get_height()) > chao.y or passaro.y < 0:
passaros.pop(i)
if ai_jogando:
lista_genomas.pop(i)
redes.pop(i)
desenhar_tela(tela, passaros, canos, chao, pontos)
def rodar(caminho_config):
config = neat.config.Config(neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
caminho_config)
populacao = neat.Population(config)
populacao.add_reporter(neat.StdOutReporter(True))
populacao.add_reporter(neat.StatisticsReporter())
if ai_jogando:
populacao.run(main, 50)
else:
main(None, None)
if __name__ == '__main__':
caminho = os.path.dirname(__file__)
caminho_config = os.path.join(caminho, 'config.txt')
rodar(caminho_config)
| 31.281967
| 96
| 0.579289
|
import pygame
import os
import random
import neat
ai_jogando = True
geracao = 0
TELA_LARGURA = 500
TELA_ALTURA = 700
IMAGEM_CANO = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'pipe.png')))
IMAGEM_CHAO = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'base.png')))
IMAGEM_BACKGROUND = pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bg.png')))
IMAGENS_PASSARO = [
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird1.png'))),
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird2.png'))),
pygame.transform.scale2x(pygame.image.load(os.path.join('imgs', 'bird3.png'))),
]
pygame.font.init()
FONTE_PONTOS = pygame.font.SysFont('arial', 50)
pygame.mixer.init()
pygame.mixer.music.load('flappy_bird_music.mp3')
pygame.mixer.music.play()
pygame.event.wait()
class Passaro:
IMGS = IMAGENS_PASSARO
ROTACAO_MAXIMA = 25
VELOCIDADE_ROTACAO = 20
TEMPO_ANIMACAO = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.angulo = 0
self.velocidade = 0
self.altura = self.y
self.tempo = 0
self.contagem_imagem = 0
self.imagem = self.IMGS[0]
def pular(self):
self.velocidade = -10.5
self.tempo = 0
self.altura = self.y
def mover(self):
self.tempo += 1
deslocamento = 1.5 * (self.tempo**2) + self.velocidade * self.tempo
if deslocamento > 16:
deslocamento = 16
elif deslocamento < 0:
deslocamento -= 2
self.y += deslocamento
if deslocamento < 0 or self.y < (self.altura + 50):
if self.angulo < self.ROTACAO_MAXIMA:
self.angulo = self.ROTACAO_MAXIMA
else:
if self.angulo > -90:
self.angulo -= self.VELOCIDADE_ROTACAO
def desenhar(self, tela):
self.contagem_imagem += 1
if self.contagem_imagem < self.TEMPO_ANIMACAO:
self.imagem = self.IMGS[0]
elif self.contagem_imagem < self.TEMPO_ANIMACAO*2:
self.imagem = self.IMGS[1]
elif self.contagem_imagem < self.TEMPO_ANIMACAO*3:
self.imagem = self.IMGS[2]
elif self.contagem_imagem < self.TEMPO_ANIMACAO*4:
self.imagem = self.IMGS[1]
elif self.contagem_imagem >= self.TEMPO_ANIMACAO*4 + 1:
self.imagem = self.IMGS[0]
self.contagem_imagem = 0
if self.angulo <= -80:
self.imagem = self.IMGS[1]
self.contagem_imagem = self.TEMPO_ANIMACAO*2
imagem_rotacionada = pygame.transform.rotate(self.imagem, self.angulo)
pos_centro_imagem = self.imagem.get_rect(topleft=(self.x, self.y)).center
retangulo = imagem_rotacionada.get_rect(center=pos_centro_imagem)
tela.blit(imagem_rotacionada, retangulo.topleft)
def get_mask(self):
return pygame.mask.from_surface(self.imagem)
class Cano:
DISTANCIA = 200
VELOCIDADE = 5
def __init__(self, x):
self.x = x
self.altura = 0
self.pos_topo = 0
self.pos_base = 0
self.CANO_TOPO = pygame.transform.flip(IMAGEM_CANO, False, True)
self.CANO_BASE = IMAGEM_CANO
self.passou = False
self.definir_altura()
def definir_altura(self):
self.altura = random.randrange(50, 450)
self.pos_topo = self.altura - self.CANO_TOPO.get_height()
self.pos_base = self.altura + self.DISTANCIA
def mover(self):
self.x -= self.VELOCIDADE
def desenhar(self, tela):
tela.blit(self.CANO_TOPO, (self.x, self.pos_topo))
tela.blit(self.CANO_BASE, (self.x, self.pos_base))
def colidir(self, passaro):
passaro_mask = passaro.get_mask()
topo_mask = pygame.mask.from_surface(self.CANO_TOPO)
base_mask = pygame.mask.from_surface(self.CANO_BASE)
distancia_topo = (self.x - passaro.x, self.pos_topo - round(passaro.y))
distancia_base = (self.x - passaro.x, self.pos_base - round(passaro.y))
topo_ponto = passaro_mask.overlap(topo_mask, distancia_topo)
base_ponto = passaro_mask.overlap(base_mask, distancia_base)
if base_ponto or topo_ponto:
return True
else:
return False
class Chao:
VELOCIDADE = 5
LARGURA = IMAGEM_CHAO.get_width()
IMAGEM = IMAGEM_CHAO
def __init__(self, y):
self.y = y
self.x1 = 0
self.x2 = self.LARGURA
def mover(self):
self.x1 -= self.VELOCIDADE
self.x2 -= self.VELOCIDADE
if self.x1 + self.LARGURA < 0:
self.x1 = self.x2 + self.LARGURA
if self.x2 + self.LARGURA < 0:
self.x2 = self.x1 + self.LARGURA
def desenhar(self, tela):
tela.blit(self.IMAGEM, (self.x1, self.y))
tela.blit(self.IMAGEM, (self.x2, self.y))
def desenhar_tela(tela, passaros, canos, chao, pontos):
tela.blit(IMAGEM_BACKGROUND, (0, 0))
for passaro in passaros:
passaro.desenhar(tela)
for cano in canos:
cano.desenhar(tela)
texto = FONTE_PONTOS.render(f"Pontuação: {pontos}", 1, (255, 255, 255))
tela.blit(texto, (TELA_LARGURA - 10 - texto.get_width(), 10))
if ai_jogando:
texto = FONTE_PONTOS.render(f"Geração: {geracao}", 1, (255, 255, 255))
tela.blit(texto, (10, 10))
chao.desenhar(tela)
pygame.display.update()
def main(genomas, config):
global geracao
geracao += 1
if ai_jogando:
redes = []
lista_genomas = []
passaros = []
for _, genoma in genomas:
rede = neat.nn.FeedForwardNetwork.create(genoma, config)
redes.append(rede)
genoma.fitness = 0
lista_genomas.append(genoma)
passaros.append(Passaro(230, 350))
else:
passaros = [Passaro(230, 350)]
chao = Chao(730)
canos = [Cano(700)]
tela = pygame.display.set_mode((TELA_LARGURA, TELA_ALTURA))
pontos = 0
relogio = pygame.time.Clock()
rodando = True
while rodando:
relogio.tick(30)
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
rodando = False
pygame.quit()
quit()
if not ai_jogando:
if evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_SPACE:
for passaro in passaros:
passaro.pular()
indice_cano = 0
if len(passaros) > 0:
if len(canos) > 1 and passaros[0].x > (canos[0].x + canos[0].CANO_TOPO.get_width()):
indice_cano = 1
else:
rodando = False
break
for i, passaro in enumerate(passaros):
passaro.mover()
lista_genomas[i].fitness += 0.1
output = redes[i].activate((passaro.y,
abs(passaro.y - canos[indice_cano].altura),
abs(passaro.y - canos[indice_cano].pos_base)))
if output[0] > 0.5:
passaro.pular()
chao.mover()
adicionar_cano = False
remover_canos = []
for cano in canos:
for i, passaro in enumerate(passaros):
if cano.colidir(passaro):
passaros.pop(i)
if ai_jogando:
lista_genomas[i].fitness -= 1
lista_genomas.pop(i)
redes.pop(i)
if not cano.passou and passaro.x > cano.x:
cano.passou = True
adicionar_cano = True
cano.mover()
if cano.x + cano.CANO_TOPO.get_width() < 0:
remover_canos.append(cano)
if adicionar_cano:
pontos += 1
canos.append(Cano(600))
for genoma in lista_genomas:
genoma.fitness += 5
for cano in remover_canos:
canos.remove(cano)
for i, passaro in enumerate(passaros):
if (passaro.y + passaro.imagem.get_height()) > chao.y or passaro.y < 0:
passaros.pop(i)
if ai_jogando:
lista_genomas.pop(i)
redes.pop(i)
desenhar_tela(tela, passaros, canos, chao, pontos)
def rodar(caminho_config):
config = neat.config.Config(neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
caminho_config)
populacao = neat.Population(config)
populacao.add_reporter(neat.StdOutReporter(True))
populacao.add_reporter(neat.StatisticsReporter())
if ai_jogando:
populacao.run(main, 50)
else:
main(None, None)
if __name__ == '__main__':
caminho = os.path.dirname(__file__)
caminho_config = os.path.join(caminho, 'config.txt')
rodar(caminho_config)
| true
| true
|
1c449f9ed77b754c5bdb3a79fb6a5638a020d7b7
| 10,053
|
py
|
Python
|
pyjsviz/lib/python3.4/site-packages/pip/vcs/git.py
|
Geege/dataviz-with-python-and-js
|
2cb40ae243298d22ee98675692b44e8da950a812
|
[
"MIT"
] | null | null | null |
pyjsviz/lib/python3.4/site-packages/pip/vcs/git.py
|
Geege/dataviz-with-python-and-js
|
2cb40ae243298d22ee98675692b44e8da950a812
|
[
"MIT"
] | null | null | null |
pyjsviz/lib/python3.4/site-packages/pip/vcs/git.py
|
Geege/dataviz-with-python-and-js
|
2cb40ae243298d22ee98675692b44e8da950a812
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import logging
import tempfile
import os.path
from pip.compat import samefile
from pip.exceptions import BadCommand
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
super(Git, self).__init__(url, *args, **kwargs)
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_short_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warning(
"Could not find a tag or branch '%s', assuming commit.", rev,
)
return rev_options
def check_version(self, dest, rev_options):
"""
Compare the current sha to the ref. ref may be a branch or tag name,
but current rev will always point to a sha. This means that a branch
or tag will never compare as True. So this ultimately only matches
against exact shas.
"""
return self.get_revision(dest).startswith(rev_options[0])
def switch(self, dest, url, rev_options):
self.run_command(['config', 'remote.origin.url', url], cwd=dest)
self.run_command(['checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(
rev_options[0], dest, rev_options,
)
self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning %s%s to %s', url, rev_display, display_path(dest),
)
self.run_command(['clone', '-q', url, dest])
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.check_version(dest, rev_options):
self.run_command(
['checkout', '-q'] + rev_options,
cwd=dest,
)
#: repo may contain submodules
self.update_submodules(dest)
def get_url(self, location):
url = self.run_command(
['config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = self.run_command(
['rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_full_refs(self, location):
"""Yields tuples of (commit, ref) for branches and tags"""
output = self.run_command(['show-ref'],
show_stdout=False, cwd=location)
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
yield commit.strip(), ref.strip()
def is_ref_remote(self, ref):
return ref.startswith('refs/remotes/')
def is_ref_branch(self, ref):
return ref.startswith('refs/heads/')
def is_ref_tag(self, ref):
return ref.startswith('refs/tags/')
def is_ref_commit(self, ref):
"""A ref is a commit sha if it is not anything else"""
return not any((
self.is_ref_remote(ref),
self.is_ref_branch(ref),
self.is_ref_tag(ref),
))
# Should deprecate `get_refs` since it's ambiguous
def get_refs(self, location):
return self.get_short_refs(location)
def get_short_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
rv = {}
for commit, ref in self.get_full_refs(location):
ref_name = None
if self.is_ref_remote(ref):
ref_name = ref[len('refs/remotes/'):]
elif self.is_ref_branch(ref):
ref_name = ref[len('refs/heads/'):]
elif self.is_ref_tag(ref):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit
return rv
def _get_subdirectory(self, location):
"""Return the relative path of setup.py to the git repo root."""
# find the repo root
git_dir = self.run_command(['rev-parse', '--git-dir'],
show_stdout=False, cwd=location).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
root_dir = os.path.join(git_dir, '..')
# find setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
# relative path of setup.py to repo root
if samefile(root_dir, location):
return None
return os.path.relpath(location, root_dir)
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
subdirectory = self._get_subdirectory(location)
if subdirectory:
req += '&subdirectory=' + subdirectory
return req
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if '://' not in self.url:
assert 'file:' not in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
self.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def controls_location(cls, location):
if super(Git, cls).controls_location(location):
return True
try:
r = cls().run_command(['rev-parse'],
cwd=location,
show_stdout=False,
on_returncode='ignore')
return not r
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return False
vcs.register(Git)
| 36.556364
| 78
| 0.562718
|
from __future__ import absolute_import
import logging
import tempfile
import os.path
from pip.compat import samefile
from pip.exceptions import BadCommand
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
def __init__(self, url=None, *args, **kwargs):
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
super(Git, self).__init__(url, *args, **kwargs)
def export(self, location):
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
revisions = self.get_short_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
return [revisions[origin_rev]]
elif rev in revisions:
return [revisions[rev]]
else:
logger.warning(
"Could not find a tag or branch '%s', assuming commit.", rev,
)
return rev_options
def check_version(self, dest, rev_options):
return self.get_revision(dest).startswith(rev_options[0])
def switch(self, dest, url, rev_options):
self.run_command(['config', 'remote.origin.url', url], cwd=dest)
self.run_command(['checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
self.run_command(['fetch', '-q'], cwd=dest)
if rev_options:
rev_options = self.check_rev_options(
rev_options[0], dest, rev_options,
)
self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning %s%s to %s', url, rev_display, display_path(dest),
)
self.run_command(['clone', '-q', url, dest])
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
if not self.check_version(dest, rev_options):
self.run_command(
['checkout', '-q'] + rev_options,
cwd=dest,
)
self.update_submodules(dest)
def get_url(self, location):
url = self.run_command(
['config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = self.run_command(
['rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_full_refs(self, location):
output = self.run_command(['show-ref'],
show_stdout=False, cwd=location)
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
yield commit.strip(), ref.strip()
def is_ref_remote(self, ref):
return ref.startswith('refs/remotes/')
def is_ref_branch(self, ref):
return ref.startswith('refs/heads/')
def is_ref_tag(self, ref):
return ref.startswith('refs/tags/')
def is_ref_commit(self, ref):
return not any((
self.is_ref_remote(ref),
self.is_ref_branch(ref),
self.is_ref_tag(ref),
))
def get_refs(self, location):
return self.get_short_refs(location)
def get_short_refs(self, location):
rv = {}
for commit, ref in self.get_full_refs(location):
ref_name = None
if self.is_ref_remote(ref):
ref_name = ref[len('refs/remotes/'):]
elif self.is_ref_branch(ref):
ref_name = ref[len('refs/heads/'):]
elif self.is_ref_tag(ref):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit
return rv
def _get_subdirectory(self, location):
# find the repo root
git_dir = self.run_command(['rev-parse', '--git-dir'],
show_stdout=False, cwd=location).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
root_dir = os.path.join(git_dir, '..')
# find setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
if samefile(root_dir, location):
return None
return os.path.relpath(location, root_dir)
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
subdirectory = self._get_subdirectory(location)
if subdirectory:
req += '&subdirectory=' + subdirectory
return req
def get_url_rev(self):
if '://' not in self.url:
assert 'file:' not in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
self.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def controls_location(cls, location):
if super(Git, cls).controls_location(location):
return True
try:
r = cls().run_command(['rev-parse'],
cwd=location,
show_stdout=False,
on_returncode='ignore')
return not r
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return False
vcs.register(Git)
| true
| true
|
1c449ff6839fb7088eca4a84f73092fc0c50bfd1
| 51,092
|
py
|
Python
|
tests/core/test_Transaction.py
|
ademcan/QRL
|
8eed7305c8a055758343ea414d8183f21b2bf6aa
|
[
"MIT"
] | 1
|
2020-07-11T15:32:52.000Z
|
2020-07-11T15:32:52.000Z
|
tests/core/test_Transaction.py
|
ademcan/QRL
|
8eed7305c8a055758343ea414d8183f21b2bf6aa
|
[
"MIT"
] | null | null | null |
tests/core/test_Transaction.py
|
ademcan/QRL
|
8eed7305c8a055758343ea414d8183f21b2bf6aa
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import simplejson as json
from mock import Mock
from pyqrllib.pyqrllib import bin2hstr
from qrl.core.misc import logger
from qrl.core.BlockHeader import BlockHeader
from qrl.core.Transaction import Transaction, TransferTransaction, CoinBase, TokenTransaction, \
TransferTokenTransaction, MessageTransaction
from qrl.crypto.misc import sha256
from qrl.generated import qrl_pb2
from tests.misc.helper import get_alice_xmss, get_bob_xmss
logger.initialize_default()
test_json_Simple = """{
"fee": "1",
"publicKey": "AQMAOOpjdQafgnLMGmYBs8dsIVGUVWA9NwA2uXx3mto1ZYVOOYO9VkKYxJri5/puKNS5VNjNWTmPEiWwjWFEhUruDg==",
"transfer": {
"addrsTo": [
"AQMAHWXX5ZrtXvvq5kJG4PMYTXxCQRQh6zhbow8sHABahevEQZz9"
],
"amounts": [
"100"
]
}
}"""
test_json_CoinBase = """{
"masterAddr": "AQMACCOCpS+LqcLTOtgHws3VvQhsLC/mPG6hO2MNEoCJTDo54cOA",
"nonce": "2",
"transactionHash": "x/Ph4JLnD0mpQ6Fi3osRCJm2CrHa/QxyYl+6b8GtzQE=",
"coinbase": {
"addrTo": "AQMAodonTmjIiwzPRI4LGRb6eJsB6y7U6a1WXOJkyTkHgqnGGsAv",
"amount": "90"
}
}"""
test_json_Token = """{
"fee": "1",
"publicKey": "AQMAOOpjdQafgnLMGmYBs8dsIVGUVWA9NwA2uXx3mto1ZYVOOYO9VkKYxJri5/puKNS5VNjNWTmPEiWwjWFEhUruDg==",
"token": {
"symbol": "UVJM",
"name": "UXVhbnR1bSBSZXNpc3RhbnQgTGVkZ2Vy",
"owner": "AQMXRj3NWBtnm0dU9GxkJRJUiaKCaJTjxCpZDvtoBkUM5r9ScWw=",
"decimals": "4",
"initialBalances": [
{
"address": "AQMAodonTmjIiwzPRI4LGRb6eJsB6y7U6a1WXOJkyTkHgqnGGsAv",
"amount": "400000000"
},
{
"address": "AQMAHWXX5ZrtXvvq5kJG4PMYTXxCQRQh6zhbow8sHABahevEQZz9",
"amount": "200000000"
}
]
}
}"""
test_json_TransferToken = """{
"fee": "1",
"publicKey": "AQMAOOpjdQafgnLMGmYBs8dsIVGUVWA9NwA2uXx3mto1ZYVOOYO9VkKYxJri5/puKNS5VNjNWTmPEiWwjWFEhUruDg==",
"transferToken": {
"tokenTxhash": "MDAwMDAwMDAwMDAwMDAw",
"addrsTo": [
"AQMAHWXX5ZrtXvvq5kJG4PMYTXxCQRQh6zhbow8sHABahevEQZz9"
],
"amounts": [
"200000"
]
}
}"""
test_json_MessageTransaction = """{
"fee": "1",
"publicKey": "AQMAOOpjdQafgnLMGmYBs8dsIVGUVWA9NwA2uXx3mto1ZYVOOYO9VkKYxJri5/puKNS5VNjNWTmPEiWwjWFEhUruDg==",
"message": {
"messageHash": "VGVzdCBNZXNzYWdl"
}
}"""
test_signature_Simple = "0000000a899e73cfbf8c57027f5a0f853b9906701ee378ad169d34ce45153f13" \
"3c3f3f6cd87250b2ad225ced6b8c902a5fa1ecfacaa6744f6f42323ee586d873" \
"f066388ab9f17ad396aed963678edeab3e6e35c082ecd7bb8ef568f2da92fb2a" \
"7336a7d58fe826663094cba678fe71cd3221bff405f9110acfa4ea58b6a908d4" \
"e3bcb5e555571dc4bd2201c87488c1c68162ebf51b59576a4c0cfdf034f2ab32" \
"9dc157345e34a68e794728d2b388cc0ea10d5918bf7954b4876deb03693ec44a" \
"6f2fd2ef34d07dc3cc49dfaec2612d9ae94881e3bbba86991e935d2b585be306" \
"7f9b91b5d20dce21742ce4cc0b20eb575c86b428a9f2a9ed1905d67250a8b0ba" \
"6b2c70ddc2b832c21ffdf526bfcba0596e4a9dd36dc95d42c27dc235cd482464" \
"ef27c8616a6e2195ccca792bb6511b2c2b45888878d05a27ea129627da356c0d" \
"e849840a2c40dabca5bdd462bc16f6b85a163b8727cf4a806814770fed8f91cc" \
"7106097585df2ff582aaa58cac02d7a6b8c13c655619f04bd95e7819e9353f9f" \
"c727e023ef1b89b72c3ea2c6c89edf4917071690984a1b4644ec523975fe2cbf" \
"113246184d6f5a5b0fc6e925605ef930009415ccb292db9983ecfcaca62601c1" \
"860418dcb73ca1958f83febb53cccd3b1b3767ac9d18a9446817ffbfccc7ebc7" \
"dd33e1876475e5d9d325092d1572b9211df2a1547ea08d49c2d3b6441bb89c38" \
"db466135b4c2ebe50885861560ce20bebb96ccbb166ddfeaad1507089eb385a6" \
"c40d7f94e04eb00d916ecb8fdc14908feb3db6f026e13d5bf7c60ff887ad5ccc" \
"9cd295327da6a2c091f82a98cb7ab34c2e49541714ee9f55b3963d7ee8f1f870" \
"73fbc5c75e1f86e78b149ac7fe764b326e18404203863641ba9fad2b1a038c7e" \
"c1abb0b3ec07183e749b4a57f8eacac0ee2192a9474f63adb35c91abb449fe50" \
"de1858342d3c9f0eac99611115c52e002470e43f95a1fa7b666263eebdc3433e" \
"68be9534c2552eaa461c93911102075ce3e10e1c4bad91b2bfb961e31ab8138a" \
"863f0e429a10fef69e6d318f8bd6cc7523b905c1e3ec097d668fbf478b30cb3d" \
"2cae1ba4ff33b481e05c9049756621e7627cc8860153fe641691993b28f5232b" \
"b150a5f9b0dd0af07834f9dcdf4aa77fc689fedb190164b356aeabcc3e531f66" \
"d30481369a93b10e1fdd4d426f23602184ca8fea5be3353e2d0902cc48df1246" \
"4d432e7dacc3e3556bf4aa2969caa0e7888ed3cef8410421ebef187a99361caf" \
"eda041df53fe1a494534e4c9117c48b3916843dc5252b5bdb56bb5d9effe5819" \
"d22174e5c62e47c124e5bd10102d9894e605827e5914d3c10818162ac5fa4030" \
"b0dbc56d08ada3884320309601580f85f94b7a1d180e33f0ede9a7d1b3a46259" \
"8de014c6953de1640f706c060e3b92e259e829752e42e70dea78e04e1ac9c5a8" \
"74a77d1d3cc7760c92aea2a133509efd579e8c9ad994c02e7a1411031698702c" \
"247726141291ec7ba50d58f70cdaaf9c4a9ca6590778139c8c66465e2d032f61" \
"55946f7e03433add23065e774c3887743cdb8b38807c292436a973d6aa235d0c" \
"ff3a872dd3c864ddb869a2a247a13a4e7e9792e25371d3af6957e8ce9806b27b" \
"cc92369a4b6bcd4b84eafcae09ad1278a349321c8084ce24c9d389540e6893aa" \
"09347b00fdf56441b82074295c2cb0fe0b7afc3bd58f217eb0aee5d6fc265ba8" \
"c459cc9588473307c743c3803fb5103544c3bcec638aa0e608211d6155d401d3" \
"ee3c2f836b118bf3471cb2082231e30f698dde4af3e07a2d33ae4973a42ac28e" \
"3c5646b2844835358269548156509c6efa6c0604f1852cae15495597f270df31" \
"4d73171ff7b308bcac078464754c6d18f27032ea85e407b45fe83354067c5bda" \
"2f82dc9854e90a07a11844836a0b67a80d2375c4f6289e724a5cf8da34a3248d" \
"1d00eaa3ae0f2389a80b51ba813fcaa638a8eece14a85ee64ff8d735e7a1188f" \
"ccf5dcedd17acbb8d46d149087cb1334780e37a0a43b604e1c45356ac3067e13" \
"b8ec1eefe24d775848e5a891a6f23ff84dd70bb81585c00ae7d390a0d80fde58" \
"fc1bfb47c12e749506846ad7d6cd0f4bcc35d49ae7e89c80d4594eb66d8a0db6" \
"de4ff4c0df82deb74554a65fe021d1b8996aca9ba97be8e6050a4302a7872686" \
"1942a663363893d5367ffdbd85ee86ade84ab492682e3a6bd67e27b534a0356b" \
"53db39e13c3a825296e2330246d27b3b66cf6db99c87403d372b1f2e9ece2d49" \
"cad96146b43ddefa92f6187c63804f9da5d1266caa5eb0c5dbf4cf627adab1fa" \
"2a47f0acc89e4923e42201e5180e6829c75c64aad26d3be19727665e019b15d4" \
"5c03cc418aa1f64ca8ce7d857176784778a48233114a8b87b36b3b4cb93ccc13" \
"5ac93060d0309a9fe346ec7db8dc525d9d2a67b1454ed6f3b388d4163b4d13e5" \
"02b95e7c69cf1b6b382d0bb58fad46463165a723cd5b65d4e70058fd1c9c6363" \
"ffa5d170e2c0f22b823a5156eef56553761092b4d581c13e1e47c46d69ed4b91" \
"3af1e271f68cfcd43d8c0681590b2abac50ed3bc33fb67f5b71daab8886ca2c6" \
"9e073a5edd821ccc615f45603202c40bad1e338fbf45b1c92d4d04f4e45c28f5" \
"3188a807cd62e6d38098a56f2d4c72b1a681a08f17178b447a880dd883ce49d1" \
"8f36bb7e1341128c9de9f7304cce08ace4f1444cce7e750fe3dd85cc0f6dfa32" \
"55a66d083807b3459b4b4130b261eaac342b298d884a1696d5253535d96923ee" \
"00359ef5c9941cb00c9e1a5fd53004439a1cf353e14fe4912f07e2822ae81ae6" \
"cd5d8b65b875033859025d4d3c9265614b085daae72b160f471c77f6af443ac9" \
"61175698b77c5c6109f099685533419fc2a788c476ef4bd7773145b8e8128fbe" \
"f3a95dfb9879157f1c9722c2d00b28452c606037156bcaf68cbbc8f6d4fd371a" \
"1acc0ed96f7e524fc9e31d5400049be470af36b375943d6fd33d4edd6fc64514" \
"1d3b735cadfb36679a959451e66714b041e9879566d50d8f13e5a4eb519b53b2" \
"9e0cc58aa36f19e4bf59a90736d83d4c371d29ae6601201e3329f71922802e6c" \
"f728113de9bc647fcd24bc50d3d0ab41b9997cc3371db8c742bde679e67ed775" \
"e14296218d9e075ae892eb5bb3e8e41568ab594809f2bc173a38649123a86dc6" \
"a9f58e48ef5c2c90feccc6a6b1f3f90bcbf233bd0347d4c95b1818c93fe7f250" \
"5252d9176958b64cc5a7a6c2b99b6adebc3a66e3c07d2343ec0072fc32645100" \
"95b34ebe7f09870e34e155ef3c2c542bfff412c7d6b6f6fc90b0a95a635eed0f" \
"a50a126a5d24b78c915c210dbf5e92633f83f282d0b9e4e0a47f49f3d3249828" \
"98675eed"
test_signature_Token = "0000000a899e73cfbf8c57027f5a0f853b9906701ee378ad169d34ce45153f13" \
"3c3f3f6cc186a29f9a39b38d7dd73a51f5dcdc759f9349e9a33ec47aa9e1171e" \
"fa0426b30d5074b25cdf27cf21938f1a6a208ec739f47c982613218b48c5b02b" \
"fd4052a923f916073913b9f3095c0ed7f300d833ad0ccdcc3f2469e228b83466" \
"b86dd76067016d5c5c3552b3314bacd53ab5442129540ebf6047fab4e3f2ea68" \
"dd6ed7935e34a68e794728d2b388cc0ea10d5918bf7954b4876deb03693ec44a" \
"6f2fd2efe307bf24f3fb550eb0a3a2acd0907e30abcc07acf3fc7136c455cb61" \
"180a2d232d1c1eac26d137e6b395b3deec26b1e66b543cbcb261eab981f5ac7b" \
"e9d210faff17cb324ad2d29718ae21688dd307487697acd422726d457e588f0d" \
"2ec15b3b68cfdcaee7fb24f0fd02c92e2df365cc9d89a87fae0f0768d3f11739" \
"693593dee7de346a1ddce5bb2102deb2bd1fb12a93c619bb1418544217f0f71c" \
"cb65890585df2ff582aaa58cac02d7a6b8c13c655619f04bd95e7819e9353f9f" \
"c727e023d231815e32918814ec344a7a2b65e1fbf85ab53f80ca4024c6b77ede" \
"4636b3bb666c03d96b2fd8e938fd1a46189d0a8ac5054cdde594a18fc4c2cf1c" \
"fc7f43308178b254d9b431a9c0cfd7a78b1dad393c0ccdd3dc5ecc9b24912226" \
"2b3f008c76688612390460fd4832fdbcad01b0481229bcb44bd2cb62e01305fb" \
"0ff09930e88dcd6a5d0a4ac5d968a79c838acd7769dd0bbc21a24695c05954da" \
"f2b049c5973866bd41ffc6a9b7fb3aff011b9b64df7dc361740623bce15f0336" \
"09a2a21ce0dbc9a8b8d80f690d200a46b433dc775df902647fdac9e9621595f1" \
"b0c86f06717bacc9f72c8f17701fe131cccf4f1dfe67129bf3880fa51f471fbf" \
"2e1197faec07183e749b4a57f8eacac0ee2192a9474f63adb35c91abb449fe50" \
"de185834e26d227bd00c966da00d13f8f195c13fa6ef2e9cd0e930811d0f2315" \
"8bd91243a5e66a0f5736e11dd2c9f5454348c88a43c2bebbf6c24daa230e7b4e" \
"6db3c0257d12dd2ab248887b4c1973e7431921b2f61abf16ed22a4b2e0189ce6" \
"fe304c3186be408a2a881ded2e8a3534ae03b07927083b8087891d54c12c54c4" \
"b4cc9dbddace3305df0a154c9536bb2a67a2513ba99e10cbda1cc2e3e3f0fd81" \
"e3660edfb2f461c972ab0220e4946c3b6abf5f8784fe77eff0de8b8df5a595f3" \
"d6416e652c2249e9b68f4021fedc23192c5161ec112f4e03abba776e20efdbde" \
"e8a582c8cd04b01bcb24ef635cb8e6cbb1538c5940e6840abe606549117095b6" \
"e5a504cdb29b0ba4b7347aed5f192925c4cc14e9a2e977e07223c3845551adf9" \
"6fd9bf3ab3659366f67eada28522fbd23f2e46a7d31124f1c31ec8586000c7fa" \
"5449d46b0f09d7aefc685866ad53dc58a2aa4388c7e6b1963556e682b869e731" \
"7b01ca320a23c75e301a4df09ecacc0c892cc1e43780f96a256d0639dcc374b5" \
"a13725dece716e5383077fcdd8db6c679e0d56b8efcd296894be8551d0b6eda9" \
"14e565bd17c88cb3bfa47d014d278dc95672e1f8e1764d3587aad7b37a2cc4d9" \
"634d2bc44415502bdef02de04d3c1c8ab4ec95a507c1932117971f81f081567f" \
"50349bfcd5b57bbfccc1867f9593657147ef23423db76fbfb53bc7a9e4833615" \
"c58f30984004f76d259d75d4a15a151942cc41f92cf8d2d8b14c3f538aad3e72" \
"5993a9009d72c4e561591279bf5373aa44ae6b71aee6dc0c29c2604af7f09758" \
"ff7616a3d1f9dcb486e132c1f7818acc9c6f4ebfe60ebad5ca63d86cecdc139f" \
"b2fc665be289caf43af02c034ed6a10a2469e2a40785d9bcee81cc714e70f264" \
"36f2883ef7b308bcac078464754c6d18f27032ea85e407b45fe83354067c5bda" \
"2f82dc986cfaf471dd726a1be1c104b61f543ae5814d19a37ae336aed7c2ae46" \
"1a669f3181d8179419f7540f59acb338834bb36d5bed4c718145e8bae74680d7" \
"37027f1a8fc35a50ab7821fee7b84b09cc1e640b7c3606f73c71b413b464e23c" \
"df81bbc9875d8ed5ae25bac46962b0476fddd3366de52efbc5887325ab8a9bb0" \
"c7d745eabdc6941bea89c40be63a8aa2db07c30a9492145cec0fe005fa222c88" \
"84161a1041021639106d925c47308fd9e98e9909396dd1086ab8e1df8b2a5db9" \
"eaf96cb989f52e1593e0405688a4e2d6a803a2ba03dc3b13ec0e3f21f219a8ad" \
"1cbaf53d3f23e98bd4afe0fd91e62063930ce481f955d33df163f048b8e3f551" \
"91af094302406e4c6eed081186753cfd6274876748dac65fdda4ef154a31e83a" \
"382203c06890ab5896a05d9cfd117254ea34117b0ae6eeff576edb35b6e30acb" \
"5c1cfb45c1911bfdafb8c97e2fcd841f9c0b34241f4a1669776cc48727df459e" \
"30e65ed5dd82b723fe833d3e1c6085873af9c4d015e7781e0fe8bad6cf2e57dc" \
"f41e7f3b584f15880a0ca48fbc856f06852853d334f1f328be8dd9b52e2f9f3a" \
"d2e187ef585749d1fda06ba02b55818cbcaffff8bce89ae3aea73c4b6f27dd38" \
"8da09dfec2c813f9d274262e340b0dd63c629fe17e75e003b1bda516e3e8edee" \
"501dab1374322ea62f9088d3c10b1524a46c4c4df24ac386e74f1c415145ffd8" \
"e2150500b93fde8d1e1987a61a6cfae9a0736bb1aecda99eb047bc42659a213c" \
"995f5ed17853186c49f20e62f8c45e7d60b9cc9a941ef45652badcd8361989b9" \
"ecb848c46f6d7c9fe80f6534f5af2ced1631ae0c593854430d84e3fee5ab914c" \
"ccff1bfc5b00aaf7d0ff707587a5eaf6e547d95b0d263557431ed94b0bcfda9a" \
"c2b2f98675bc52e7a30da7842a179eadc2ffec7181d69799e3c991e75781caba" \
"2ad088d2638eecc58c2c9ea2cefbd80d883e278414bb85e8a46b9cf793d4ea26" \
"2609f148bfe3cde1d2dbb16eaa47a28e0b4b1ed9de45237f79c50de0b6d1ed0a" \
"981b86382111ea986fd011fe4b0d9c728c5ef5d30eb1e175aed1b8881c7fc396" \
"9da0ba073f8949d0b087f605eaf69837b22abab62eb39f0a4bb7c965a4d77887" \
"3c39a093fdf77f0fe1cacc5920b52cb63013fdc852c4243b48ad55b026098ac9" \
"65f771b5e9bc647fcd24bc50d3d0ab41b9997cc3371db8c742bde679e67ed775" \
"e14296218d9e075ae892eb5bb3e8e41568ab594809f2bc173a38649123a86dc6" \
"a9f58e48ef5c2c90feccc6a6b1f3f90bcbf233bd0347d4c95b1818c93fe7f250" \
"5252d9176958b64cc5a7a6c2b99b6adebc3a66e3c07d2343ec0072fc32645100" \
"95b34ebe7f09870e34e155ef3c2c542bfff412c7d6b6f6fc90b0a95a635eed0f" \
"a50a126a5d24b78c915c210dbf5e92633f83f282d0b9e4e0a47f49f3d3249828" \
"98675eed"
test_signature_TransferToken = "0000000a899e73cfbf8c57027f5a0f853b9906701ee378ad169d34ce45153f13" \
"3c3f3f6c704cb79db016ed3aa75b5acf697b4cefa20dd6c66c2598dafd67e995" \
"69441d489b82316035ff53ac3bd7e53007da659e531d77d62dcf3cdbfab00c52" \
"70164f347c028e5c975acd2471b5f5837b90ffcd10852d885ed570f351c89477" \
"b759b9fe70f7693ab3294841fe224c9f4de5c6865e2417a9ada2aa9581f05545" \
"a45fe6a2099e09752c2091c16a6edf0b7e24faed1e0e2b26a72d05e9081b6adb" \
"d0975fdec34e78542457007ed25b6caecf01382ccf252e4509ffd2d3e4302a45" \
"f955490760cf6125fc92a210a7983705358d7b8c7818a767e878e97e5db3c293" \
"54a684abff17cb324ad2d29718ae21688dd307487697acd422726d457e588f0d" \
"2ec15b3ba822d31d5b8d84e899d28ffef2fb678607b1574a8e6324db4028076b" \
"e959815e749d076158d2e674a41707a02ddef3226f4525bc77842388c060a128" \
"ff46ff3c8cd790e10ccce767ee7248738376c254ce6527d864b64e8902344f3f" \
"f5aed9f404198d1956137f1c76408e2c9c71a8141a5ce2f10a836b5187e92d33" \
"615af8e457a0b26c1a0b7b2463e047a2314c6414d87581a01880dd75e081d1df" \
"8be57ab7c54fee608eef62c87e73e95cc2f90ec57f6e767c6ccb0d59dad60ce4" \
"450d777e76688612390460fd4832fdbcad01b0481229bcb44bd2cb62e01305fb" \
"0ff0993095a1f552389c5968ff7ba4b5d33ce12c6c0dc350beeeb891ac82f54f" \
"d54078a5dd4e2dcd1f6d909a01ed91c754770456dbdbf9f3e3d83a1c1ad2bc02" \
"4166461b443e6e310939abf1e6048c95373e985eb8b9967f163764f9f1464aa7" \
"cc18d54474843d2c12e2e15a44730ae8397b13fbef9a0635b81be07e01d424ef" \
"c5d3fb4f7105bc9fe284eb7cdb41f5016a585deb2ba9b0fd41d21f32029a60bf" \
"40c322beb0b93afcb2bed0660e7f455387409f09112df4dd3aed9272f44a4e25" \
"a8cf06763f9dcb62ff95a08b1e66f419921b1ed60ab5d7f412b234f5b889d2fe" \
"0c1ba52dfb56b38056bf3e9843df382e7438d28fbcc7e71d0996e3cf21af7000" \
"9f66ad7e42a1498b62c5624d757a648ab82cd1958c67195128f3f7a978954575" \
"d8c1057478041b1c563dc3de516f5051e56af464923d77adbd57fcc2dd286b4f" \
"75da96c93b787cba99d930f5df9a124a57cf2a7621fb97029299f0ca470f7f26" \
"f512aa2676b366508cb666bceb6188b0a6e19bbf6215b84af50a4db498e1d669" \
"9a6ea35fcd04b01bcb24ef635cb8e6cbb1538c5940e6840abe606549117095b6" \
"e5a504cdf69f7101567af0bb381c033efef8a9137ae70fb3c05616cfc7bdf517" \
"701cac31b3659366f67eada28522fbd23f2e46a7d31124f1c31ec8586000c7fa" \
"5449d46b547f331d3af8842d333d24b0e4e80a2b49c284e83161035740dbd228" \
"4ba98bdce32ed2751649cd9cdcbdb5281fd21d4a9daaf61d18d048d617e49b72" \
"c9d616b5832bb402e1e2ab18a9744d6d610e9ffc671c61c5b4494d08cce4f679" \
"aec9ee97783cbfaa194d6fcc92b566a3ac41ec6c3fdbb26228ea014975fa584e" \
"55c57f5a5aa178e17c7b9f221caef1d30b0569ed6db502e549964b3a2b949f08" \
"4463b52fb34ceadb1cf5f7fa930abfd68b0d1083265450a1d02f3ab2cc2f6fcf" \
"39a516d6054c041aabf33456061324766bb9820260fe17a4fe76d0879cefb1b3" \
"d5f6de7072781e487f13f1359eb22eb024430c00e51b1205630de20c591000c7" \
"6e7116200c7a84744d291a259b6a44d739261311f570bd2721336adfb5189a8a" \
"103a928e0bee0b423807b4fbd9d142cb505850741303e1db058d6ce9770c6470" \
"7f4d412f94649cab8285e8b1b7c96120388c3063e4324e2b6ff1c2e96dabbb6b" \
"43944cbec3883c2d22d252386b8ee9580a6757859381db6afed914563b4bb603" \
"c7200562d6610809d1382ee89dfbab62cc5c42a6de5dbadf37f5cb7ff342fb82" \
"6f81372c1144ca83876418f4b843df9bbecee327e790740876155415070d2e66" \
"3346ae9388170fd144ab595fef9f61aac6445286b3efa86145454a12533ef4ad" \
"9a836e447f7601c9dacfd15d8fac01064ab3ae4027523d1d80bff15ebcdac0b1" \
"e52b3851158c8d63275bd4d7c395fd50bc6f80779165fafdd4636537d061b316" \
"55d7f14e47929ef65a1424b5fb07d094a8a98a09db01db22bc6c5cf1e51d9075" \
"0bc82245877bccc48a2af1852cba39ed81abb0e1309aaaf9efe8dc610be71f15" \
"b770079050a73e1448d89f79f3e2d6d20568bb9570b601484c9c61df1a688fe0" \
"ec19dfe6c8fd46671246a6d186b86c3ce6d12db0d45e5c074f72a5926e7512e8" \
"e327424c9af0524f8bc3a154f53a54fb046cd9cf0df3a45d129adb641ba66b25" \
"d9d99cfbefef37c44399e2ee83b869859b9fd68174086cd2701912926fc79200" \
"3290bf802762e2f5efe38d602eadf9b4fbb84198440f939e88ad3ef4897da608" \
"a2c5e1817c8eb3f20f17b6e40e815f6bd2196ef03e2217961c9690b023d0234c" \
"3646731ffc69448892efa59b217934ee5ea7ee687260d8e1c08e1ec9d04102e6" \
"3bbcaeb74d268b9530d4e1240f4817a91dd236189f9392d7417f7ca59e5150e1" \
"c7b87ddacd62e6d38098a56f2d4c72b1a681a08f17178b447a880dd883ce49d1" \
"8f36bb7e3fa0689a438b30baa6c3159d660bb093380ba04de04a6b1b5ba4d68f" \
"dddd4b7fe5ce9245590022441f7bfa9ee1b9e52a231ebe2ae08d931ba869d64d" \
"b2e77e4e0a1eda5b7b024c65717edc73de75fa950722ab106b3d07806b4a4463" \
"cb6fb136af55aba3a46e730cd7bc8fe61f211a05e20a082bb9ff6c1e1dd97d26" \
"39e048bee9044f4fb0de6a33d6f0d3b2efede3876777016692a095ef4eca1e1e" \
"891eb1139879157f1c9722c2d00b28452c606037156bcaf68cbbc8f6d4fd371a" \
"1acc0ed92111ea986fd011fe4b0d9c728c5ef5d30eb1e175aed1b8881c7fc396" \
"9da0ba073f8949d0b087f605eaf69837b22abab62eb39f0a4bb7c965a4d77887" \
"3c39a093057468628bd3b8839fff9ec7daea107c075f485895dd0bcc793b5bc3" \
"047f4883e9bc647fcd24bc50d3d0ab41b9997cc3371db8c742bde679e67ed775" \
"e14296218d9e075ae892eb5bb3e8e41568ab594809f2bc173a38649123a86dc6" \
"a9f58e48ef5c2c90feccc6a6b1f3f90bcbf233bd0347d4c95b1818c93fe7f250" \
"5252d9176958b64cc5a7a6c2b99b6adebc3a66e3c07d2343ec0072fc32645100" \
"95b34ebe7f09870e34e155ef3c2c542bfff412c7d6b6f6fc90b0a95a635eed0f" \
"a50a126a5d24b78c915c210dbf5e92633f83f282d0b9e4e0a47f49f3d3249828" \
"98675eed"
test_signature_MessageTransaction = "0000000a899e73cfbf8c57027f5a0f853b9906701ee378ad169d34ce45153f13" \
"3c3f3f6c870ead6e875c470543e9e91ab30e6119251698511ddd403c98167317" \
"78adc7495d8f176dc9d0d5b9e08bf6314933cd1fc630ee0e0deddf477c22220d" \
"61b3a78be588ce819a27523ef24066365a4156a1f809d7a47e047f99888a4998" \
"43fbbab755571dc4bd2201c87488c1c68162ebf51b59576a4c0cfdf034f2ab32" \
"9dc15734ae64db7bfde2193accc7c7d2befc43db6aabd428dc25d79d8e10f837" \
"cc1c47bfcda52aaeabc57d1f3e7e741144616fff91ffd7c511f62e2980e32480" \
"9d8afa822d1c1eac26d137e6b395b3deec26b1e66b543cbcb261eab981f5ac7b" \
"e9d210fa04db6e0b5a72fee9ec7b3d561ebad1957ddd05b357e0bc4bd5b383c2" \
"f7c76bd023081d4cf81a48f903b3218df7284e06d2b65b2083a472ca7888ffbc" \
"c9ebaf6fc725a36fb4e60a6a2cf3d7ebc0a9c28e13669b8cd11c6e51abc66167" \
"35ec866a0061c1db1c98a647fc68b783de0d226a47cc0305feea9d5611301611" \
"b133ef6a3f44e75e5ec8b4f579e8b7795d61885ad5cc9f0d217eb494607ef7b2" \
"d058fe304b59740ee4218418de0e1ca08a998017c7a2e3325331da38354b58a2" \
"35b6734f6d6f52a2499a97a278ace169e897f98260c4342c9595d33897b838fe" \
"4a320f42f407cdafe217809d5829b6ada1156691ee5421d23a1df25e0fda84fa" \
"98fabd432e354a339e4e4526da6a79fe77c30ba86ef988c57832dded50efd9b7" \
"3788c077fa8e049cb717eb8e241381048a14ef790a218d3ae02ca8aac3d33832" \
"27cd2ae97da6a2c091f82a98cb7ab34c2e49541714ee9f55b3963d7ee8f1f870" \
"73fbc5c72c4a070a10873e47477fd5dc35362de92cb72f9becf1e67a9130473f" \
"91ad4e12ae89ec1c00c2805c0615cfccc2dfbb47cfd6340478ce75955e93b16e" \
"a0ee11b72d3c9f0eac99611115c52e002470e43f95a1fa7b666263eebdc3433e" \
"68be9534c2e46ab5d70bc9647f4f39bdae35ad32bcebaf7548b5de91eca8126d" \
"04f350e97f8aaca80dc5a1fa74681f4afd38568b123ef00b90da1c5853009e95" \
"d81ece892977c1f43367acc0895af37a68753aee0a1f06548a2f46f448b92915" \
"5511821bcd66230cc66762902dd7c2c103738c6f4d3c263509b817bda60cf058" \
"e93a814101f3129f705ec5c39cda0a7dce4616631b044089645245caecec689f" \
"e59e3efb3e1a9028d2732b0058616706d79cb9169044072922c8cbad8cc56b5b" \
"2a14dfd653fe1a494534e4c9117c48b3916843dc5252b5bdb56bb5d9effe5819" \
"d22174e563effe6799809f08a464c9aa564992b412baa6578dc2f1f91697672f" \
"7c9015ccc4f291c54c0e2e8b100579663f61fbe5adc5e2c4e295244bd58e6e75" \
"b6df3284547f331d3af8842d333d24b0e4e80a2b49c284e83161035740dbd228" \
"4ba98bdc97498ad0084139243cc3b553a38dde0855c5d487d8c4ecbaebf75186" \
"e40deeee8a3d7a27b8cf486226d24253577d374cffa7a2a8172464249e0cc00d" \
"a1f83a0b45df768c4cbe929cabe866e19c6fb7fbbf6c3e9cd6c72e38cef02aef" \
"10aec1d53f92c5df896df6010091c6d6fdd30621c9ac62225e38abfe6757830b" \
"d2019a58653524a6be818e1b7b55021a579dacacfdde0854959da4e844f89381" \
"a36aaa61f5dc742c7539f8cfd3097e0c717046dd0bd3a181fe0f04efe03ed445" \
"f35b75629d72c4e561591279bf5373aa44ae6b71aee6dc0c29c2604af7f09758" \
"ff7616a30c7a84744d291a259b6a44d739261311f570bd2721336adfb5189a8a" \
"103a928e7edf9560143cb7f5a48feb24a6269452f0f0e1873c2ecf33c4812bdf" \
"89ab96f53505c6d74f4f68517e012cc08b22df1c152edb71b441690b6b668639" \
"ef22c9efb6f9db8b1a82d44b515f8ac08242cb09a994991403b4a593d5e4954d" \
"50a36a57828701592686b846062f9672acecebed48b0e845e4aa4d0bb477a280" \
"ac5af201b9b1e4ee22b4602b1360c30888a898ac43fcec032b803972a9e3dd11" \
"bb120e66f74054f47d4c8ff621b8057bb6a3e4b58c6557f1bd9294cdcf65a8eb" \
"a149ab71a2784ba462a8e0856890d9bce6d2dab26ac4e8dbf6f09fba7e06516e" \
"04dfc4fa080fcdb9d01851cbd8b3986edb072ceb3e86cf18d01b52bffc2ba338" \
"a8e4f8a2b8f3a1062ab65f2f70441d216d61e0eea7ba860ec539376ae0899f51" \
"e784efa6bfc87d4c75c0b736acc124f9d0246b5ab407914add2388a1e5c4840e" \
"a812da2e50a73e1448d89f79f3e2d6d20568bb9570b601484c9c61df1a688fe0" \
"ec19dfe66890ab5896a05d9cfd117254ea34117b0ae6eeff576edb35b6e30acb" \
"5c1cfb45846ca40249e396ad31b467eef233c0556dd0edf50ff06e2d9bdf622e" \
"0a69b4da1be4bc00f55f559b4fc6c88d929268fc17d4ecf2764439f5c876e592" \
"e911e3304c865738003144d26d6bf41afecba096b8e9994a4d39bcf6d2aad4ff" \
"5d928027c07922d4cf4b3a04acfb552c502f17efc652606009fcc255ed89af79" \
"d3545f1f56e316ccfb89d26dc9012461bb838f90e179e6e08ece20d97ee60a1c" \
"576fba51c174746b3155dd65f4b5b240621f186092f30bd3ae5cf98c15747a47" \
"67c87a53cbafb7a0f79cf25765fd2c32e54481111b0ea5f625031788bcfcafa9" \
"e6645409415a1c56c217c26149fd945c6b6f702637333950a3e441656daf460a" \
"782ded93a118a09b25fde1e26d467d1e5f7c81511c4aee2e61f5c0bc263ec5ac" \
"0f9c4b1dd7d7cc5d477b36534b92ac64f148cda22642434e4f72100729b4582f" \
"8877ae4370fd0c5e4d131487e738a50ed0ff9e96b8d35110503d06b7960c0d01" \
"a955d51063c1eb1088e8abd247c2aa2602c7e4f3c770cff184cef51c0b2a79aa" \
"ceb82c7ab641bc5e699b5bb6862c5ddf3630c1d48260147be335595b1d96dcc5" \
"bf3387112111ea986fd011fe4b0d9c728c5ef5d30eb1e175aed1b8881c7fc396" \
"9da0ba072b95661816920f809e6dd85a25918405e531860ce3f905fe41cc0552" \
"1885294e4c7471870ca5410593692f9cbd06d82cd86dc9cef94339cee4bd1548" \
"f91ecc21e9bc647fcd24bc50d3d0ab41b9997cc3371db8c742bde679e67ed775" \
"e14296218d9e075ae892eb5bb3e8e41568ab594809f2bc173a38649123a86dc6" \
"a9f58e48ef5c2c90feccc6a6b1f3f90bcbf233bd0347d4c95b1818c93fe7f250" \
"5252d9176958b64cc5a7a6c2b99b6adebc3a66e3c07d2343ec0072fc32645100" \
"95b34ebe7f09870e34e155ef3c2c542bfff412c7d6b6f6fc90b0a95a635eed0f" \
"a50a126a5d24b78c915c210dbf5e92633f83f282d0b9e4e0a47f49f3d3249828" \
"98675eed"
# TODO: Do the same for Lattice and Duplicate
# TODO: Write test to check after signing (before is there)
# TODO: Fix problems with verifications (positive and negative checks)
# TODO: Check corner cases, parameter boundaries
wrap_message_expected1 = bytearray(b'\xff\x00\x0000000027\x00{"data": 12345, "type": "TESTKEY_1234"}\x00\x00\xff')
wrap_message_expected1b = bytearray(b'\xff\x00\x0000000027\x00{"type": "TESTKEY_1234", "data": 12345}\x00\x00\xff')
class TestTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestTransaction, self).__init__(*args, **kwargs)
def test_calc_allowed_decimals(self):
decimal = Transaction.calc_allowed_decimals(10000000000000000000)
self.assertEqual(decimal, 0)
decimal = Transaction.calc_allowed_decimals(1)
self.assertEqual(decimal, 19)
decimal = Transaction.calc_allowed_decimals(2)
self.assertEqual(decimal, 18)
class TestSimpleTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestSimpleTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def test_create(self):
# Alice sending coins to Bob
tx = TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[100],
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_create_negative_amount(self):
with self.assertRaises(ValueError):
TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[-100],
fee=1,
xmss_pk=self.alice.pk)
def test_create_negative_fee(self):
with self.assertRaises(ValueError):
TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[-100],
fee=-1,
xmss_pk=self.alice.pk)
def test_to_json(self):
tx = TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[100],
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_Simple), json.loads(txjson))
def test_from_json(self):
tx = Transaction.from_json(test_json_Simple)
tx.sign(self.alice)
self.assertIsInstance(tx, TransferTransaction)
# Test that common Transaction components were copied over.
self.assertEqual(0, tx.nonce)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual('554f546305d4aed6ec71c759942b721b904ab9d65eeac3c954c08c652181c4e8', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
self.assertEqual(test_signature_Simple, bin2hstr(tx.signature))
# Test that specific content was copied over.
self.assertEqual('0103001d65d7e59aed5efbeae64246e0f3184d7c42411421eb385ba30f2c1c005a85ebc4419cfd',
bin2hstr(tx.addrs_to[0]))
self.assertEqual(100, tx.total_amount)
self.assertEqual(1, tx.fee)
def test_validate_tx(self):
# If we change amount, fee, addr_from, addr_to, (maybe include xmss stuff) txhash should change.
tx = TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[100],
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_state_validate_tx(self):
# Test balance not enough
# Test negative tx amounts
pass
class TestCoinBase(TestCase):
def __init__(self, *args, **kwargs):
super(TestCoinBase, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.alice.set_ots_index(11)
self.mock_blockheader = Mock(spec=BlockHeader)
self.mock_blockheader.stake_selector = self.alice.address
self.mock_blockheader.block_reward = 50
self.mock_blockheader.fee_reward = 40
self.mock_blockheader.prev_blockheaderhash = sha256(b'prev_headerhash')
self.mock_blockheader.block_number = 1
self.mock_blockheader.headerhash = sha256(b'headerhash')
self.maxDiff = None
def test_create(self):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(amount, self.alice.address, self.mock_blockheader.block_number)
self.assertIsInstance(tx, CoinBase)
def test_to_json(self):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(amount, self.alice.address, self.mock_blockheader.block_number)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_CoinBase), json.loads(txjson))
def test_from_txdict(self):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(amount, self.alice.address, self.mock_blockheader.block_number)
self.assertIsInstance(tx, CoinBase)
# Test that common Transaction components were copied over.
self.assertEqual(self.mock_blockheader.block_number + 1, tx.nonce)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_to))
self.assertEqual('c7f3e1e092e70f49a943a162de8b110899b60ab1dafd0c72625fba6fc1adcd01', bin2hstr(tx.txhash))
self.assertEqual(tx.amount, 90)
class TestTokenTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestTokenTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def test_create(self):
# Alice creates Token
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_create_negative_fee(self):
with self.assertRaises(ValueError):
TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=[],
fee=-1,
xmss_pk=self.alice.pk)
def test_to_json(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_Token), json.loads(txjson))
def test_from_json(self):
tx = Transaction.from_json(test_json_Token)
tx.sign(self.alice)
self.assertIsInstance(tx, TokenTransaction)
# Test that common Transaction components were copied over.
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual(b'QRL', tx.symbol)
self.assertEqual(b'Quantum Resistant Ledger', tx.name)
self.assertEqual('010317463dcd581b679b4754f46c6425125489a2826894e3c42a590efb6806450ce6bf52716c',
bin2hstr(tx.owner))
self.assertEqual('ff84da605e9c9cd04d68503be7922110b4cc147837f8687ad18aa54b7bc5632d', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
self.assertEqual(test_signature_Token, bin2hstr(tx.signature))
total_supply = 0
for initial_balance in tx.initial_balances:
total_supply += initial_balance.amount
self.assertEqual(600000000, total_supply)
self.assertEqual(1, tx.fee)
def test_validate_tx(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_validate_tx2(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=10000000000000000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=10000000000000000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# Transaction Validation should fail as the decimals is higher than the possible decimals
with self.assertRaises(ValueError):
self.assertFalse(tx.validate_or_raise())
def test_validate_tx3(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=1000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=1000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=15,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_state_validate_tx(self):
# Test balance not enough
# Test negative tx amounts
pass
class TestTransferTokenTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestTransferTokenTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def test_create(self):
tx = TransferTokenTransaction.create(token_txhash=b'000000000000000',
addrs_to=[self.bob.address],
amounts=[200000],
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_to_json(self):
tx = TransferTokenTransaction.create(token_txhash=b'000000000000000',
addrs_to=[self.bob.address],
amounts=[200000],
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_TransferToken), json.loads(txjson))
def test_from_json(self):
tx = Transaction.from_json(test_json_TransferToken)
tx.sign(self.alice)
self.assertIsInstance(tx, TransferTokenTransaction)
# Test that common Transaction components were copied over.
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual(b'000000000000000', tx.token_txhash)
self.assertEqual(200000, tx.total_amount)
self.assertEqual('390b159b34cffd29d4271a19679ff227df2ccd471078f177a7b58ca5f5d999f0', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
# z = bin2hstr(tx.signature)
# print('"', end='')
# for i in range(len(z)):
# print(z[i], end='')
# if (i + 1) % 64 == 0:
# print('" \\', end='')
# print('')
# print(' ' * len('test_signature_TransferToken = '), end='')
# print('"', end='')
self.assertEqual(test_signature_TransferToken, bin2hstr(tx.signature))
self.assertEqual(1, tx.fee)
def test_validate_tx(self):
tx = TransferTokenTransaction.create(token_txhash=b'000000000000000',
addrs_to=[self.bob.address],
amounts=[200000],
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_state_validate_tx(self):
# Test balance not enough
# Test negative tx amounts
pass
class TestMessageTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestMessageTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def test_create(self):
tx = MessageTransaction.create(message_hash=b'Test Message',
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_to_json(self):
tx = MessageTransaction.create(message_hash=b'Test Message',
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_MessageTransaction), json.loads(txjson))
def test_from_json(self):
tx = Transaction.from_json(test_json_MessageTransaction)
tx.sign(self.alice)
self.assertIsInstance(tx, MessageTransaction)
# Test that common Transaction components were copied over.
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual(b'Test Message', tx.message_hash)
self.assertEqual('cbe7c40a86e82b8b6ac4e7df812f882183bd85d60f335cd83483d6831e61f4ec', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
self.assertEqual(test_signature_MessageTransaction, bin2hstr(tx.signature))
self.assertEqual(1, tx.fee)
def test_validate_tx(self):
tx = MessageTransaction.create(message_hash=b'Test Message',
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_validate_tx2(self):
tx = MessageTransaction.create(message_hash=b'T' * 81,
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# Validation should fail, as we have entered a message of more than 80 lengths
with self.assertRaises(ValueError):
self.assertFalse(tx.validate_or_raise())
| 61.779927
| 145
| 0.630627
|
from unittest import TestCase
import simplejson as json
from mock import Mock
from pyqrllib.pyqrllib import bin2hstr
from qrl.core.misc import logger
from qrl.core.BlockHeader import BlockHeader
from qrl.core.Transaction import Transaction, TransferTransaction, CoinBase, TokenTransaction, \
TransferTokenTransaction, MessageTransaction
from qrl.crypto.misc import sha256
from qrl.generated import qrl_pb2
from tests.misc.helper import get_alice_xmss, get_bob_xmss
logger.initialize_default()
test_json_Simple = """{
"fee": "1",
"publicKey": "AQMAOOpjdQafgnLMGmYBs8dsIVGUVWA9NwA2uXx3mto1ZYVOOYO9VkKYxJri5/puKNS5VNjNWTmPEiWwjWFEhUruDg==",
"transfer": {
"addrsTo": [
"AQMAHWXX5ZrtXvvq5kJG4PMYTXxCQRQh6zhbow8sHABahevEQZz9"
],
"amounts": [
"100"
]
}
}"""
test_json_CoinBase = """{
"masterAddr": "AQMACCOCpS+LqcLTOtgHws3VvQhsLC/mPG6hO2MNEoCJTDo54cOA",
"nonce": "2",
"transactionHash": "x/Ph4JLnD0mpQ6Fi3osRCJm2CrHa/QxyYl+6b8GtzQE=",
"coinbase": {
"addrTo": "AQMAodonTmjIiwzPRI4LGRb6eJsB6y7U6a1WXOJkyTkHgqnGGsAv",
"amount": "90"
}
}"""
test_json_Token = """{
"fee": "1",
"publicKey": "AQMAOOpjdQafgnLMGmYBs8dsIVGUVWA9NwA2uXx3mto1ZYVOOYO9VkKYxJri5/puKNS5VNjNWTmPEiWwjWFEhUruDg==",
"token": {
"symbol": "UVJM",
"name": "UXVhbnR1bSBSZXNpc3RhbnQgTGVkZ2Vy",
"owner": "AQMXRj3NWBtnm0dU9GxkJRJUiaKCaJTjxCpZDvtoBkUM5r9ScWw=",
"decimals": "4",
"initialBalances": [
{
"address": "AQMAodonTmjIiwzPRI4LGRb6eJsB6y7U6a1WXOJkyTkHgqnGGsAv",
"amount": "400000000"
},
{
"address": "AQMAHWXX5ZrtXvvq5kJG4PMYTXxCQRQh6zhbow8sHABahevEQZz9",
"amount": "200000000"
}
]
}
}"""
test_json_TransferToken = """{
"fee": "1",
"publicKey": "AQMAOOpjdQafgnLMGmYBs8dsIVGUVWA9NwA2uXx3mto1ZYVOOYO9VkKYxJri5/puKNS5VNjNWTmPEiWwjWFEhUruDg==",
"transferToken": {
"tokenTxhash": "MDAwMDAwMDAwMDAwMDAw",
"addrsTo": [
"AQMAHWXX5ZrtXvvq5kJG4PMYTXxCQRQh6zhbow8sHABahevEQZz9"
],
"amounts": [
"200000"
]
}
}"""
test_json_MessageTransaction = """{
"fee": "1",
"publicKey": "AQMAOOpjdQafgnLMGmYBs8dsIVGUVWA9NwA2uXx3mto1ZYVOOYO9VkKYxJri5/puKNS5VNjNWTmPEiWwjWFEhUruDg==",
"message": {
"messageHash": "VGVzdCBNZXNzYWdl"
}
}"""
test_signature_Simple = "0000000a899e73cfbf8c57027f5a0f853b9906701ee378ad169d34ce45153f13" \
"3c3f3f6cd87250b2ad225ced6b8c902a5fa1ecfacaa6744f6f42323ee586d873" \
"f066388ab9f17ad396aed963678edeab3e6e35c082ecd7bb8ef568f2da92fb2a" \
"7336a7d58fe826663094cba678fe71cd3221bff405f9110acfa4ea58b6a908d4" \
"e3bcb5e555571dc4bd2201c87488c1c68162ebf51b59576a4c0cfdf034f2ab32" \
"9dc157345e34a68e794728d2b388cc0ea10d5918bf7954b4876deb03693ec44a" \
"6f2fd2ef34d07dc3cc49dfaec2612d9ae94881e3bbba86991e935d2b585be306" \
"7f9b91b5d20dce21742ce4cc0b20eb575c86b428a9f2a9ed1905d67250a8b0ba" \
"6b2c70ddc2b832c21ffdf526bfcba0596e4a9dd36dc95d42c27dc235cd482464" \
"ef27c8616a6e2195ccca792bb6511b2c2b45888878d05a27ea129627da356c0d" \
"e849840a2c40dabca5bdd462bc16f6b85a163b8727cf4a806814770fed8f91cc" \
"7106097585df2ff582aaa58cac02d7a6b8c13c655619f04bd95e7819e9353f9f" \
"c727e023ef1b89b72c3ea2c6c89edf4917071690984a1b4644ec523975fe2cbf" \
"113246184d6f5a5b0fc6e925605ef930009415ccb292db9983ecfcaca62601c1" \
"860418dcb73ca1958f83febb53cccd3b1b3767ac9d18a9446817ffbfccc7ebc7" \
"dd33e1876475e5d9d325092d1572b9211df2a1547ea08d49c2d3b6441bb89c38" \
"db466135b4c2ebe50885861560ce20bebb96ccbb166ddfeaad1507089eb385a6" \
"c40d7f94e04eb00d916ecb8fdc14908feb3db6f026e13d5bf7c60ff887ad5ccc" \
"9cd295327da6a2c091f82a98cb7ab34c2e49541714ee9f55b3963d7ee8f1f870" \
"73fbc5c75e1f86e78b149ac7fe764b326e18404203863641ba9fad2b1a038c7e" \
"c1abb0b3ec07183e749b4a57f8eacac0ee2192a9474f63adb35c91abb449fe50" \
"de1858342d3c9f0eac99611115c52e002470e43f95a1fa7b666263eebdc3433e" \
"68be9534c2552eaa461c93911102075ce3e10e1c4bad91b2bfb961e31ab8138a" \
"863f0e429a10fef69e6d318f8bd6cc7523b905c1e3ec097d668fbf478b30cb3d" \
"2cae1ba4ff33b481e05c9049756621e7627cc8860153fe641691993b28f5232b" \
"b150a5f9b0dd0af07834f9dcdf4aa77fc689fedb190164b356aeabcc3e531f66" \
"d30481369a93b10e1fdd4d426f23602184ca8fea5be3353e2d0902cc48df1246" \
"4d432e7dacc3e3556bf4aa2969caa0e7888ed3cef8410421ebef187a99361caf" \
"eda041df53fe1a494534e4c9117c48b3916843dc5252b5bdb56bb5d9effe5819" \
"d22174e5c62e47c124e5bd10102d9894e605827e5914d3c10818162ac5fa4030" \
"b0dbc56d08ada3884320309601580f85f94b7a1d180e33f0ede9a7d1b3a46259" \
"8de014c6953de1640f706c060e3b92e259e829752e42e70dea78e04e1ac9c5a8" \
"74a77d1d3cc7760c92aea2a133509efd579e8c9ad994c02e7a1411031698702c" \
"247726141291ec7ba50d58f70cdaaf9c4a9ca6590778139c8c66465e2d032f61" \
"55946f7e03433add23065e774c3887743cdb8b38807c292436a973d6aa235d0c" \
"ff3a872dd3c864ddb869a2a247a13a4e7e9792e25371d3af6957e8ce9806b27b" \
"cc92369a4b6bcd4b84eafcae09ad1278a349321c8084ce24c9d389540e6893aa" \
"09347b00fdf56441b82074295c2cb0fe0b7afc3bd58f217eb0aee5d6fc265ba8" \
"c459cc9588473307c743c3803fb5103544c3bcec638aa0e608211d6155d401d3" \
"ee3c2f836b118bf3471cb2082231e30f698dde4af3e07a2d33ae4973a42ac28e" \
"3c5646b2844835358269548156509c6efa6c0604f1852cae15495597f270df31" \
"4d73171ff7b308bcac078464754c6d18f27032ea85e407b45fe83354067c5bda" \
"2f82dc9854e90a07a11844836a0b67a80d2375c4f6289e724a5cf8da34a3248d" \
"1d00eaa3ae0f2389a80b51ba813fcaa638a8eece14a85ee64ff8d735e7a1188f" \
"ccf5dcedd17acbb8d46d149087cb1334780e37a0a43b604e1c45356ac3067e13" \
"b8ec1eefe24d775848e5a891a6f23ff84dd70bb81585c00ae7d390a0d80fde58" \
"fc1bfb47c12e749506846ad7d6cd0f4bcc35d49ae7e89c80d4594eb66d8a0db6" \
"de4ff4c0df82deb74554a65fe021d1b8996aca9ba97be8e6050a4302a7872686" \
"1942a663363893d5367ffdbd85ee86ade84ab492682e3a6bd67e27b534a0356b" \
"53db39e13c3a825296e2330246d27b3b66cf6db99c87403d372b1f2e9ece2d49" \
"cad96146b43ddefa92f6187c63804f9da5d1266caa5eb0c5dbf4cf627adab1fa" \
"2a47f0acc89e4923e42201e5180e6829c75c64aad26d3be19727665e019b15d4" \
"5c03cc418aa1f64ca8ce7d857176784778a48233114a8b87b36b3b4cb93ccc13" \
"5ac93060d0309a9fe346ec7db8dc525d9d2a67b1454ed6f3b388d4163b4d13e5" \
"02b95e7c69cf1b6b382d0bb58fad46463165a723cd5b65d4e70058fd1c9c6363" \
"ffa5d170e2c0f22b823a5156eef56553761092b4d581c13e1e47c46d69ed4b91" \
"3af1e271f68cfcd43d8c0681590b2abac50ed3bc33fb67f5b71daab8886ca2c6" \
"9e073a5edd821ccc615f45603202c40bad1e338fbf45b1c92d4d04f4e45c28f5" \
"3188a807cd62e6d38098a56f2d4c72b1a681a08f17178b447a880dd883ce49d1" \
"8f36bb7e1341128c9de9f7304cce08ace4f1444cce7e750fe3dd85cc0f6dfa32" \
"55a66d083807b3459b4b4130b261eaac342b298d884a1696d5253535d96923ee" \
"00359ef5c9941cb00c9e1a5fd53004439a1cf353e14fe4912f07e2822ae81ae6" \
"cd5d8b65b875033859025d4d3c9265614b085daae72b160f471c77f6af443ac9" \
"61175698b77c5c6109f099685533419fc2a788c476ef4bd7773145b8e8128fbe" \
"f3a95dfb9879157f1c9722c2d00b28452c606037156bcaf68cbbc8f6d4fd371a" \
"1acc0ed96f7e524fc9e31d5400049be470af36b375943d6fd33d4edd6fc64514" \
"1d3b735cadfb36679a959451e66714b041e9879566d50d8f13e5a4eb519b53b2" \
"9e0cc58aa36f19e4bf59a90736d83d4c371d29ae6601201e3329f71922802e6c" \
"f728113de9bc647fcd24bc50d3d0ab41b9997cc3371db8c742bde679e67ed775" \
"e14296218d9e075ae892eb5bb3e8e41568ab594809f2bc173a38649123a86dc6" \
"a9f58e48ef5c2c90feccc6a6b1f3f90bcbf233bd0347d4c95b1818c93fe7f250" \
"5252d9176958b64cc5a7a6c2b99b6adebc3a66e3c07d2343ec0072fc32645100" \
"95b34ebe7f09870e34e155ef3c2c542bfff412c7d6b6f6fc90b0a95a635eed0f" \
"a50a126a5d24b78c915c210dbf5e92633f83f282d0b9e4e0a47f49f3d3249828" \
"98675eed"
test_signature_Token = "0000000a899e73cfbf8c57027f5a0f853b9906701ee378ad169d34ce45153f13" \
"3c3f3f6cc186a29f9a39b38d7dd73a51f5dcdc759f9349e9a33ec47aa9e1171e" \
"fa0426b30d5074b25cdf27cf21938f1a6a208ec739f47c982613218b48c5b02b" \
"fd4052a923f916073913b9f3095c0ed7f300d833ad0ccdcc3f2469e228b83466" \
"b86dd76067016d5c5c3552b3314bacd53ab5442129540ebf6047fab4e3f2ea68" \
"dd6ed7935e34a68e794728d2b388cc0ea10d5918bf7954b4876deb03693ec44a" \
"6f2fd2efe307bf24f3fb550eb0a3a2acd0907e30abcc07acf3fc7136c455cb61" \
"180a2d232d1c1eac26d137e6b395b3deec26b1e66b543cbcb261eab981f5ac7b" \
"e9d210faff17cb324ad2d29718ae21688dd307487697acd422726d457e588f0d" \
"2ec15b3b68cfdcaee7fb24f0fd02c92e2df365cc9d89a87fae0f0768d3f11739" \
"693593dee7de346a1ddce5bb2102deb2bd1fb12a93c619bb1418544217f0f71c" \
"cb65890585df2ff582aaa58cac02d7a6b8c13c655619f04bd95e7819e9353f9f" \
"c727e023d231815e32918814ec344a7a2b65e1fbf85ab53f80ca4024c6b77ede" \
"4636b3bb666c03d96b2fd8e938fd1a46189d0a8ac5054cdde594a18fc4c2cf1c" \
"fc7f43308178b254d9b431a9c0cfd7a78b1dad393c0ccdd3dc5ecc9b24912226" \
"2b3f008c76688612390460fd4832fdbcad01b0481229bcb44bd2cb62e01305fb" \
"0ff09930e88dcd6a5d0a4ac5d968a79c838acd7769dd0bbc21a24695c05954da" \
"f2b049c5973866bd41ffc6a9b7fb3aff011b9b64df7dc361740623bce15f0336" \
"09a2a21ce0dbc9a8b8d80f690d200a46b433dc775df902647fdac9e9621595f1" \
"b0c86f06717bacc9f72c8f17701fe131cccf4f1dfe67129bf3880fa51f471fbf" \
"2e1197faec07183e749b4a57f8eacac0ee2192a9474f63adb35c91abb449fe50" \
"de185834e26d227bd00c966da00d13f8f195c13fa6ef2e9cd0e930811d0f2315" \
"8bd91243a5e66a0f5736e11dd2c9f5454348c88a43c2bebbf6c24daa230e7b4e" \
"6db3c0257d12dd2ab248887b4c1973e7431921b2f61abf16ed22a4b2e0189ce6" \
"fe304c3186be408a2a881ded2e8a3534ae03b07927083b8087891d54c12c54c4" \
"b4cc9dbddace3305df0a154c9536bb2a67a2513ba99e10cbda1cc2e3e3f0fd81" \
"e3660edfb2f461c972ab0220e4946c3b6abf5f8784fe77eff0de8b8df5a595f3" \
"d6416e652c2249e9b68f4021fedc23192c5161ec112f4e03abba776e20efdbde" \
"e8a582c8cd04b01bcb24ef635cb8e6cbb1538c5940e6840abe606549117095b6" \
"e5a504cdb29b0ba4b7347aed5f192925c4cc14e9a2e977e07223c3845551adf9" \
"6fd9bf3ab3659366f67eada28522fbd23f2e46a7d31124f1c31ec8586000c7fa" \
"5449d46b0f09d7aefc685866ad53dc58a2aa4388c7e6b1963556e682b869e731" \
"7b01ca320a23c75e301a4df09ecacc0c892cc1e43780f96a256d0639dcc374b5" \
"a13725dece716e5383077fcdd8db6c679e0d56b8efcd296894be8551d0b6eda9" \
"14e565bd17c88cb3bfa47d014d278dc95672e1f8e1764d3587aad7b37a2cc4d9" \
"634d2bc44415502bdef02de04d3c1c8ab4ec95a507c1932117971f81f081567f" \
"50349bfcd5b57bbfccc1867f9593657147ef23423db76fbfb53bc7a9e4833615" \
"c58f30984004f76d259d75d4a15a151942cc41f92cf8d2d8b14c3f538aad3e72" \
"5993a9009d72c4e561591279bf5373aa44ae6b71aee6dc0c29c2604af7f09758" \
"ff7616a3d1f9dcb486e132c1f7818acc9c6f4ebfe60ebad5ca63d86cecdc139f" \
"b2fc665be289caf43af02c034ed6a10a2469e2a40785d9bcee81cc714e70f264" \
"36f2883ef7b308bcac078464754c6d18f27032ea85e407b45fe83354067c5bda" \
"2f82dc986cfaf471dd726a1be1c104b61f543ae5814d19a37ae336aed7c2ae46" \
"1a669f3181d8179419f7540f59acb338834bb36d5bed4c718145e8bae74680d7" \
"37027f1a8fc35a50ab7821fee7b84b09cc1e640b7c3606f73c71b413b464e23c" \
"df81bbc9875d8ed5ae25bac46962b0476fddd3366de52efbc5887325ab8a9bb0" \
"c7d745eabdc6941bea89c40be63a8aa2db07c30a9492145cec0fe005fa222c88" \
"84161a1041021639106d925c47308fd9e98e9909396dd1086ab8e1df8b2a5db9" \
"eaf96cb989f52e1593e0405688a4e2d6a803a2ba03dc3b13ec0e3f21f219a8ad" \
"1cbaf53d3f23e98bd4afe0fd91e62063930ce481f955d33df163f048b8e3f551" \
"91af094302406e4c6eed081186753cfd6274876748dac65fdda4ef154a31e83a" \
"382203c06890ab5896a05d9cfd117254ea34117b0ae6eeff576edb35b6e30acb" \
"5c1cfb45c1911bfdafb8c97e2fcd841f9c0b34241f4a1669776cc48727df459e" \
"30e65ed5dd82b723fe833d3e1c6085873af9c4d015e7781e0fe8bad6cf2e57dc" \
"f41e7f3b584f15880a0ca48fbc856f06852853d334f1f328be8dd9b52e2f9f3a" \
"d2e187ef585749d1fda06ba02b55818cbcaffff8bce89ae3aea73c4b6f27dd38" \
"8da09dfec2c813f9d274262e340b0dd63c629fe17e75e003b1bda516e3e8edee" \
"501dab1374322ea62f9088d3c10b1524a46c4c4df24ac386e74f1c415145ffd8" \
"e2150500b93fde8d1e1987a61a6cfae9a0736bb1aecda99eb047bc42659a213c" \
"995f5ed17853186c49f20e62f8c45e7d60b9cc9a941ef45652badcd8361989b9" \
"ecb848c46f6d7c9fe80f6534f5af2ced1631ae0c593854430d84e3fee5ab914c" \
"ccff1bfc5b00aaf7d0ff707587a5eaf6e547d95b0d263557431ed94b0bcfda9a" \
"c2b2f98675bc52e7a30da7842a179eadc2ffec7181d69799e3c991e75781caba" \
"2ad088d2638eecc58c2c9ea2cefbd80d883e278414bb85e8a46b9cf793d4ea26" \
"2609f148bfe3cde1d2dbb16eaa47a28e0b4b1ed9de45237f79c50de0b6d1ed0a" \
"981b86382111ea986fd011fe4b0d9c728c5ef5d30eb1e175aed1b8881c7fc396" \
"9da0ba073f8949d0b087f605eaf69837b22abab62eb39f0a4bb7c965a4d77887" \
"3c39a093fdf77f0fe1cacc5920b52cb63013fdc852c4243b48ad55b026098ac9" \
"65f771b5e9bc647fcd24bc50d3d0ab41b9997cc3371db8c742bde679e67ed775" \
"e14296218d9e075ae892eb5bb3e8e41568ab594809f2bc173a38649123a86dc6" \
"a9f58e48ef5c2c90feccc6a6b1f3f90bcbf233bd0347d4c95b1818c93fe7f250" \
"5252d9176958b64cc5a7a6c2b99b6adebc3a66e3c07d2343ec0072fc32645100" \
"95b34ebe7f09870e34e155ef3c2c542bfff412c7d6b6f6fc90b0a95a635eed0f" \
"a50a126a5d24b78c915c210dbf5e92633f83f282d0b9e4e0a47f49f3d3249828" \
"98675eed"
test_signature_TransferToken = "0000000a899e73cfbf8c57027f5a0f853b9906701ee378ad169d34ce45153f13" \
"3c3f3f6c704cb79db016ed3aa75b5acf697b4cefa20dd6c66c2598dafd67e995" \
"69441d489b82316035ff53ac3bd7e53007da659e531d77d62dcf3cdbfab00c52" \
"70164f347c028e5c975acd2471b5f5837b90ffcd10852d885ed570f351c89477" \
"b759b9fe70f7693ab3294841fe224c9f4de5c6865e2417a9ada2aa9581f05545" \
"a45fe6a2099e09752c2091c16a6edf0b7e24faed1e0e2b26a72d05e9081b6adb" \
"d0975fdec34e78542457007ed25b6caecf01382ccf252e4509ffd2d3e4302a45" \
"f955490760cf6125fc92a210a7983705358d7b8c7818a767e878e97e5db3c293" \
"54a684abff17cb324ad2d29718ae21688dd307487697acd422726d457e588f0d" \
"2ec15b3ba822d31d5b8d84e899d28ffef2fb678607b1574a8e6324db4028076b" \
"e959815e749d076158d2e674a41707a02ddef3226f4525bc77842388c060a128" \
"ff46ff3c8cd790e10ccce767ee7248738376c254ce6527d864b64e8902344f3f" \
"f5aed9f404198d1956137f1c76408e2c9c71a8141a5ce2f10a836b5187e92d33" \
"615af8e457a0b26c1a0b7b2463e047a2314c6414d87581a01880dd75e081d1df" \
"8be57ab7c54fee608eef62c87e73e95cc2f90ec57f6e767c6ccb0d59dad60ce4" \
"450d777e76688612390460fd4832fdbcad01b0481229bcb44bd2cb62e01305fb" \
"0ff0993095a1f552389c5968ff7ba4b5d33ce12c6c0dc350beeeb891ac82f54f" \
"d54078a5dd4e2dcd1f6d909a01ed91c754770456dbdbf9f3e3d83a1c1ad2bc02" \
"4166461b443e6e310939abf1e6048c95373e985eb8b9967f163764f9f1464aa7" \
"cc18d54474843d2c12e2e15a44730ae8397b13fbef9a0635b81be07e01d424ef" \
"c5d3fb4f7105bc9fe284eb7cdb41f5016a585deb2ba9b0fd41d21f32029a60bf" \
"40c322beb0b93afcb2bed0660e7f455387409f09112df4dd3aed9272f44a4e25" \
"a8cf06763f9dcb62ff95a08b1e66f419921b1ed60ab5d7f412b234f5b889d2fe" \
"0c1ba52dfb56b38056bf3e9843df382e7438d28fbcc7e71d0996e3cf21af7000" \
"9f66ad7e42a1498b62c5624d757a648ab82cd1958c67195128f3f7a978954575" \
"d8c1057478041b1c563dc3de516f5051e56af464923d77adbd57fcc2dd286b4f" \
"75da96c93b787cba99d930f5df9a124a57cf2a7621fb97029299f0ca470f7f26" \
"f512aa2676b366508cb666bceb6188b0a6e19bbf6215b84af50a4db498e1d669" \
"9a6ea35fcd04b01bcb24ef635cb8e6cbb1538c5940e6840abe606549117095b6" \
"e5a504cdf69f7101567af0bb381c033efef8a9137ae70fb3c05616cfc7bdf517" \
"701cac31b3659366f67eada28522fbd23f2e46a7d31124f1c31ec8586000c7fa" \
"5449d46b547f331d3af8842d333d24b0e4e80a2b49c284e83161035740dbd228" \
"4ba98bdce32ed2751649cd9cdcbdb5281fd21d4a9daaf61d18d048d617e49b72" \
"c9d616b5832bb402e1e2ab18a9744d6d610e9ffc671c61c5b4494d08cce4f679" \
"aec9ee97783cbfaa194d6fcc92b566a3ac41ec6c3fdbb26228ea014975fa584e" \
"55c57f5a5aa178e17c7b9f221caef1d30b0569ed6db502e549964b3a2b949f08" \
"4463b52fb34ceadb1cf5f7fa930abfd68b0d1083265450a1d02f3ab2cc2f6fcf" \
"39a516d6054c041aabf33456061324766bb9820260fe17a4fe76d0879cefb1b3" \
"d5f6de7072781e487f13f1359eb22eb024430c00e51b1205630de20c591000c7" \
"6e7116200c7a84744d291a259b6a44d739261311f570bd2721336adfb5189a8a" \
"103a928e0bee0b423807b4fbd9d142cb505850741303e1db058d6ce9770c6470" \
"7f4d412f94649cab8285e8b1b7c96120388c3063e4324e2b6ff1c2e96dabbb6b" \
"43944cbec3883c2d22d252386b8ee9580a6757859381db6afed914563b4bb603" \
"c7200562d6610809d1382ee89dfbab62cc5c42a6de5dbadf37f5cb7ff342fb82" \
"6f81372c1144ca83876418f4b843df9bbecee327e790740876155415070d2e66" \
"3346ae9388170fd144ab595fef9f61aac6445286b3efa86145454a12533ef4ad" \
"9a836e447f7601c9dacfd15d8fac01064ab3ae4027523d1d80bff15ebcdac0b1" \
"e52b3851158c8d63275bd4d7c395fd50bc6f80779165fafdd4636537d061b316" \
"55d7f14e47929ef65a1424b5fb07d094a8a98a09db01db22bc6c5cf1e51d9075" \
"0bc82245877bccc48a2af1852cba39ed81abb0e1309aaaf9efe8dc610be71f15" \
"b770079050a73e1448d89f79f3e2d6d20568bb9570b601484c9c61df1a688fe0" \
"ec19dfe6c8fd46671246a6d186b86c3ce6d12db0d45e5c074f72a5926e7512e8" \
"e327424c9af0524f8bc3a154f53a54fb046cd9cf0df3a45d129adb641ba66b25" \
"d9d99cfbefef37c44399e2ee83b869859b9fd68174086cd2701912926fc79200" \
"3290bf802762e2f5efe38d602eadf9b4fbb84198440f939e88ad3ef4897da608" \
"a2c5e1817c8eb3f20f17b6e40e815f6bd2196ef03e2217961c9690b023d0234c" \
"3646731ffc69448892efa59b217934ee5ea7ee687260d8e1c08e1ec9d04102e6" \
"3bbcaeb74d268b9530d4e1240f4817a91dd236189f9392d7417f7ca59e5150e1" \
"c7b87ddacd62e6d38098a56f2d4c72b1a681a08f17178b447a880dd883ce49d1" \
"8f36bb7e3fa0689a438b30baa6c3159d660bb093380ba04de04a6b1b5ba4d68f" \
"dddd4b7fe5ce9245590022441f7bfa9ee1b9e52a231ebe2ae08d931ba869d64d" \
"b2e77e4e0a1eda5b7b024c65717edc73de75fa950722ab106b3d07806b4a4463" \
"cb6fb136af55aba3a46e730cd7bc8fe61f211a05e20a082bb9ff6c1e1dd97d26" \
"39e048bee9044f4fb0de6a33d6f0d3b2efede3876777016692a095ef4eca1e1e" \
"891eb1139879157f1c9722c2d00b28452c606037156bcaf68cbbc8f6d4fd371a" \
"1acc0ed92111ea986fd011fe4b0d9c728c5ef5d30eb1e175aed1b8881c7fc396" \
"9da0ba073f8949d0b087f605eaf69837b22abab62eb39f0a4bb7c965a4d77887" \
"3c39a093057468628bd3b8839fff9ec7daea107c075f485895dd0bcc793b5bc3" \
"047f4883e9bc647fcd24bc50d3d0ab41b9997cc3371db8c742bde679e67ed775" \
"e14296218d9e075ae892eb5bb3e8e41568ab594809f2bc173a38649123a86dc6" \
"a9f58e48ef5c2c90feccc6a6b1f3f90bcbf233bd0347d4c95b1818c93fe7f250" \
"5252d9176958b64cc5a7a6c2b99b6adebc3a66e3c07d2343ec0072fc32645100" \
"95b34ebe7f09870e34e155ef3c2c542bfff412c7d6b6f6fc90b0a95a635eed0f" \
"a50a126a5d24b78c915c210dbf5e92633f83f282d0b9e4e0a47f49f3d3249828" \
"98675eed"
test_signature_MessageTransaction = "0000000a899e73cfbf8c57027f5a0f853b9906701ee378ad169d34ce45153f13" \
"3c3f3f6c870ead6e875c470543e9e91ab30e6119251698511ddd403c98167317" \
"78adc7495d8f176dc9d0d5b9e08bf6314933cd1fc630ee0e0deddf477c22220d" \
"61b3a78be588ce819a27523ef24066365a4156a1f809d7a47e047f99888a4998" \
"43fbbab755571dc4bd2201c87488c1c68162ebf51b59576a4c0cfdf034f2ab32" \
"9dc15734ae64db7bfde2193accc7c7d2befc43db6aabd428dc25d79d8e10f837" \
"cc1c47bfcda52aaeabc57d1f3e7e741144616fff91ffd7c511f62e2980e32480" \
"9d8afa822d1c1eac26d137e6b395b3deec26b1e66b543cbcb261eab981f5ac7b" \
"e9d210fa04db6e0b5a72fee9ec7b3d561ebad1957ddd05b357e0bc4bd5b383c2" \
"f7c76bd023081d4cf81a48f903b3218df7284e06d2b65b2083a472ca7888ffbc" \
"c9ebaf6fc725a36fb4e60a6a2cf3d7ebc0a9c28e13669b8cd11c6e51abc66167" \
"35ec866a0061c1db1c98a647fc68b783de0d226a47cc0305feea9d5611301611" \
"b133ef6a3f44e75e5ec8b4f579e8b7795d61885ad5cc9f0d217eb494607ef7b2" \
"d058fe304b59740ee4218418de0e1ca08a998017c7a2e3325331da38354b58a2" \
"35b6734f6d6f52a2499a97a278ace169e897f98260c4342c9595d33897b838fe" \
"4a320f42f407cdafe217809d5829b6ada1156691ee5421d23a1df25e0fda84fa" \
"98fabd432e354a339e4e4526da6a79fe77c30ba86ef988c57832dded50efd9b7" \
"3788c077fa8e049cb717eb8e241381048a14ef790a218d3ae02ca8aac3d33832" \
"27cd2ae97da6a2c091f82a98cb7ab34c2e49541714ee9f55b3963d7ee8f1f870" \
"73fbc5c72c4a070a10873e47477fd5dc35362de92cb72f9becf1e67a9130473f" \
"91ad4e12ae89ec1c00c2805c0615cfccc2dfbb47cfd6340478ce75955e93b16e" \
"a0ee11b72d3c9f0eac99611115c52e002470e43f95a1fa7b666263eebdc3433e" \
"68be9534c2e46ab5d70bc9647f4f39bdae35ad32bcebaf7548b5de91eca8126d" \
"04f350e97f8aaca80dc5a1fa74681f4afd38568b123ef00b90da1c5853009e95" \
"d81ece892977c1f43367acc0895af37a68753aee0a1f06548a2f46f448b92915" \
"5511821bcd66230cc66762902dd7c2c103738c6f4d3c263509b817bda60cf058" \
"e93a814101f3129f705ec5c39cda0a7dce4616631b044089645245caecec689f" \
"e59e3efb3e1a9028d2732b0058616706d79cb9169044072922c8cbad8cc56b5b" \
"2a14dfd653fe1a494534e4c9117c48b3916843dc5252b5bdb56bb5d9effe5819" \
"d22174e563effe6799809f08a464c9aa564992b412baa6578dc2f1f91697672f" \
"7c9015ccc4f291c54c0e2e8b100579663f61fbe5adc5e2c4e295244bd58e6e75" \
"b6df3284547f331d3af8842d333d24b0e4e80a2b49c284e83161035740dbd228" \
"4ba98bdc97498ad0084139243cc3b553a38dde0855c5d487d8c4ecbaebf75186" \
"e40deeee8a3d7a27b8cf486226d24253577d374cffa7a2a8172464249e0cc00d" \
"a1f83a0b45df768c4cbe929cabe866e19c6fb7fbbf6c3e9cd6c72e38cef02aef" \
"10aec1d53f92c5df896df6010091c6d6fdd30621c9ac62225e38abfe6757830b" \
"d2019a58653524a6be818e1b7b55021a579dacacfdde0854959da4e844f89381" \
"a36aaa61f5dc742c7539f8cfd3097e0c717046dd0bd3a181fe0f04efe03ed445" \
"f35b75629d72c4e561591279bf5373aa44ae6b71aee6dc0c29c2604af7f09758" \
"ff7616a30c7a84744d291a259b6a44d739261311f570bd2721336adfb5189a8a" \
"103a928e7edf9560143cb7f5a48feb24a6269452f0f0e1873c2ecf33c4812bdf" \
"89ab96f53505c6d74f4f68517e012cc08b22df1c152edb71b441690b6b668639" \
"ef22c9efb6f9db8b1a82d44b515f8ac08242cb09a994991403b4a593d5e4954d" \
"50a36a57828701592686b846062f9672acecebed48b0e845e4aa4d0bb477a280" \
"ac5af201b9b1e4ee22b4602b1360c30888a898ac43fcec032b803972a9e3dd11" \
"bb120e66f74054f47d4c8ff621b8057bb6a3e4b58c6557f1bd9294cdcf65a8eb" \
"a149ab71a2784ba462a8e0856890d9bce6d2dab26ac4e8dbf6f09fba7e06516e" \
"04dfc4fa080fcdb9d01851cbd8b3986edb072ceb3e86cf18d01b52bffc2ba338" \
"a8e4f8a2b8f3a1062ab65f2f70441d216d61e0eea7ba860ec539376ae0899f51" \
"e784efa6bfc87d4c75c0b736acc124f9d0246b5ab407914add2388a1e5c4840e" \
"a812da2e50a73e1448d89f79f3e2d6d20568bb9570b601484c9c61df1a688fe0" \
"ec19dfe66890ab5896a05d9cfd117254ea34117b0ae6eeff576edb35b6e30acb" \
"5c1cfb45846ca40249e396ad31b467eef233c0556dd0edf50ff06e2d9bdf622e" \
"0a69b4da1be4bc00f55f559b4fc6c88d929268fc17d4ecf2764439f5c876e592" \
"e911e3304c865738003144d26d6bf41afecba096b8e9994a4d39bcf6d2aad4ff" \
"5d928027c07922d4cf4b3a04acfb552c502f17efc652606009fcc255ed89af79" \
"d3545f1f56e316ccfb89d26dc9012461bb838f90e179e6e08ece20d97ee60a1c" \
"576fba51c174746b3155dd65f4b5b240621f186092f30bd3ae5cf98c15747a47" \
"67c87a53cbafb7a0f79cf25765fd2c32e54481111b0ea5f625031788bcfcafa9" \
"e6645409415a1c56c217c26149fd945c6b6f702637333950a3e441656daf460a" \
"782ded93a118a09b25fde1e26d467d1e5f7c81511c4aee2e61f5c0bc263ec5ac" \
"0f9c4b1dd7d7cc5d477b36534b92ac64f148cda22642434e4f72100729b4582f" \
"8877ae4370fd0c5e4d131487e738a50ed0ff9e96b8d35110503d06b7960c0d01" \
"a955d51063c1eb1088e8abd247c2aa2602c7e4f3c770cff184cef51c0b2a79aa" \
"ceb82c7ab641bc5e699b5bb6862c5ddf3630c1d48260147be335595b1d96dcc5" \
"bf3387112111ea986fd011fe4b0d9c728c5ef5d30eb1e175aed1b8881c7fc396" \
"9da0ba072b95661816920f809e6dd85a25918405e531860ce3f905fe41cc0552" \
"1885294e4c7471870ca5410593692f9cbd06d82cd86dc9cef94339cee4bd1548" \
"f91ecc21e9bc647fcd24bc50d3d0ab41b9997cc3371db8c742bde679e67ed775" \
"e14296218d9e075ae892eb5bb3e8e41568ab594809f2bc173a38649123a86dc6" \
"a9f58e48ef5c2c90feccc6a6b1f3f90bcbf233bd0347d4c95b1818c93fe7f250" \
"5252d9176958b64cc5a7a6c2b99b6adebc3a66e3c07d2343ec0072fc32645100" \
"95b34ebe7f09870e34e155ef3c2c542bfff412c7d6b6f6fc90b0a95a635eed0f" \
"a50a126a5d24b78c915c210dbf5e92633f83f282d0b9e4e0a47f49f3d3249828" \
"98675eed"
wrap_message_expected1 = bytearray(b'\xff\x00\x0000000027\x00{"data": 12345, "type": "TESTKEY_1234"}\x00\x00\xff')
wrap_message_expected1b = bytearray(b'\xff\x00\x0000000027\x00{"type": "TESTKEY_1234", "data": 12345}\x00\x00\xff')
class TestTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestTransaction, self).__init__(*args, **kwargs)
def test_calc_allowed_decimals(self):
decimal = Transaction.calc_allowed_decimals(10000000000000000000)
self.assertEqual(decimal, 0)
decimal = Transaction.calc_allowed_decimals(1)
self.assertEqual(decimal, 19)
decimal = Transaction.calc_allowed_decimals(2)
self.assertEqual(decimal, 18)
class TestSimpleTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestSimpleTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def test_create(self):
tx = TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[100],
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_create_negative_amount(self):
with self.assertRaises(ValueError):
TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[-100],
fee=1,
xmss_pk=self.alice.pk)
def test_create_negative_fee(self):
with self.assertRaises(ValueError):
TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[-100],
fee=-1,
xmss_pk=self.alice.pk)
def test_to_json(self):
tx = TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[100],
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_Simple), json.loads(txjson))
def test_from_json(self):
tx = Transaction.from_json(test_json_Simple)
tx.sign(self.alice)
self.assertIsInstance(tx, TransferTransaction)
self.assertEqual(0, tx.nonce)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual('554f546305d4aed6ec71c759942b721b904ab9d65eeac3c954c08c652181c4e8', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
self.assertEqual(test_signature_Simple, bin2hstr(tx.signature))
self.assertEqual('0103001d65d7e59aed5efbeae64246e0f3184d7c42411421eb385ba30f2c1c005a85ebc4419cfd',
bin2hstr(tx.addrs_to[0]))
self.assertEqual(100, tx.total_amount)
self.assertEqual(1, tx.fee)
def test_validate_tx(self):
tx = TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[100],
fee=1,
xmss_pk=self.alice.pk)
tx.sign(self.alice)
self.assertTrue(tx.validate_or_raise())
def test_state_validate_tx(self):
pass
class TestCoinBase(TestCase):
def __init__(self, *args, **kwargs):
super(TestCoinBase, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.alice.set_ots_index(11)
self.mock_blockheader = Mock(spec=BlockHeader)
self.mock_blockheader.stake_selector = self.alice.address
self.mock_blockheader.block_reward = 50
self.mock_blockheader.fee_reward = 40
self.mock_blockheader.prev_blockheaderhash = sha256(b'prev_headerhash')
self.mock_blockheader.block_number = 1
self.mock_blockheader.headerhash = sha256(b'headerhash')
self.maxDiff = None
def test_create(self):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(amount, self.alice.address, self.mock_blockheader.block_number)
self.assertIsInstance(tx, CoinBase)
def test_to_json(self):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(amount, self.alice.address, self.mock_blockheader.block_number)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_CoinBase), json.loads(txjson))
def test_from_txdict(self):
amount = self.mock_blockheader.block_reward + self.mock_blockheader.fee_reward
tx = CoinBase.create(amount, self.alice.address, self.mock_blockheader.block_number)
self.assertIsInstance(tx, CoinBase)
self.assertEqual(self.mock_blockheader.block_number + 1, tx.nonce)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_to))
self.assertEqual('c7f3e1e092e70f49a943a162de8b110899b60ab1dafd0c72625fba6fc1adcd01', bin2hstr(tx.txhash))
self.assertEqual(tx.amount, 90)
class TestTokenTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestTokenTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def test_create(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_create_negative_fee(self):
with self.assertRaises(ValueError):
TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=[],
fee=-1,
xmss_pk=self.alice.pk)
def test_to_json(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_Token), json.loads(txjson))
def test_from_json(self):
tx = Transaction.from_json(test_json_Token)
tx.sign(self.alice)
self.assertIsInstance(tx, TokenTransaction)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual(b'QRL', tx.symbol)
self.assertEqual(b'Quantum Resistant Ledger', tx.name)
self.assertEqual('010317463dcd581b679b4754f46c6425125489a2826894e3c42a590efb6806450ce6bf52716c',
bin2hstr(tx.owner))
self.assertEqual('ff84da605e9c9cd04d68503be7922110b4cc147837f8687ad18aa54b7bc5632d', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
self.assertEqual(test_signature_Token, bin2hstr(tx.signature))
total_supply = 0
for initial_balance in tx.initial_balances:
total_supply += initial_balance.amount
self.assertEqual(600000000, total_supply)
self.assertEqual(1, tx.fee)
def test_validate_tx(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=400000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=200000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
tx.sign(self.alice)
self.assertTrue(tx.validate_or_raise())
def test_validate_tx2(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=10000000000000000000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=10000000000000000000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=4,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
tx.sign(self.alice)
with self.assertRaises(ValueError):
self.assertFalse(tx.validate_or_raise())
def test_validate_tx3(self):
initial_balances = list()
initial_balances.append(qrl_pb2.AddressAmount(address=self.alice.address,
amount=1000))
initial_balances.append(qrl_pb2.AddressAmount(address=self.bob.address,
amount=1000))
tx = TokenTransaction.create(symbol=b'QRL',
name=b'Quantum Resistant Ledger',
owner=b'\x01\x03\x17F=\xcdX\x1bg\x9bGT\xf4ld%\x12T\x89\xa2\x82h\x94\xe3\xc4*Y\x0e\xfbh\x06E\x0c\xe6\xbfRql',
decimals=15,
initial_balances=initial_balances,
fee=1,
xmss_pk=self.alice.pk)
tx.sign(self.alice)
self.assertTrue(tx.validate_or_raise())
def test_state_validate_tx(self):
pass
class TestTransferTokenTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestTransferTokenTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def test_create(self):
tx = TransferTokenTransaction.create(token_txhash=b'000000000000000',
addrs_to=[self.bob.address],
amounts=[200000],
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_to_json(self):
tx = TransferTokenTransaction.create(token_txhash=b'000000000000000',
addrs_to=[self.bob.address],
amounts=[200000],
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_TransferToken), json.loads(txjson))
def test_from_json(self):
tx = Transaction.from_json(test_json_TransferToken)
tx.sign(self.alice)
self.assertIsInstance(tx, TransferTokenTransaction)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual(b'000000000000000', tx.token_txhash)
self.assertEqual(200000, tx.total_amount)
self.assertEqual('390b159b34cffd29d4271a19679ff227df2ccd471078f177a7b58ca5f5d999f0', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
# for i in range(len(z)):
# print(z[i], end='')
# if (i + 1) % 64 == 0:
# print('" \\', end='')
self.assertEqual(test_signature_TransferToken, bin2hstr(tx.signature))
self.assertEqual(1, tx.fee)
def test_validate_tx(self):
tx = TransferTokenTransaction.create(token_txhash=b'000000000000000',
addrs_to=[self.bob.address],
amounts=[200000],
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_state_validate_tx(self):
# Test balance not enough
# Test negative tx amounts
pass
class TestMessageTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestMessageTransaction, self).__init__(*args, **kwargs)
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def test_create(self):
tx = MessageTransaction.create(message_hash=b'Test Message',
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_to_json(self):
tx = MessageTransaction.create(message_hash=b'Test Message',
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_MessageTransaction), json.loads(txjson))
def test_from_json(self):
tx = Transaction.from_json(test_json_MessageTransaction)
tx.sign(self.alice)
self.assertIsInstance(tx, MessageTransaction)
# Test that common Transaction components were copied over.
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual(b'Test Message', tx.message_hash)
self.assertEqual('cbe7c40a86e82b8b6ac4e7df812f882183bd85d60f335cd83483d6831e61f4ec', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
self.assertEqual(test_signature_MessageTransaction, bin2hstr(tx.signature))
self.assertEqual(1, tx.fee)
def test_validate_tx(self):
tx = MessageTransaction.create(message_hash=b'Test Message',
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(tx.validate_or_raise())
def test_validate_tx2(self):
tx = MessageTransaction.create(message_hash=b'T' * 81,
fee=1,
xmss_pk=self.alice.pk)
# We must sign the tx before validation will work.
tx.sign(self.alice)
# Validation should fail, as we have entered a message of more than 80 lengths
with self.assertRaises(ValueError):
self.assertFalse(tx.validate_or_raise())
| true
| true
|
1c44a1470ed6157280477e07c867ba2749afc4e3
| 8,947
|
py
|
Python
|
aggregate-reconstruction/reconstruct_aggregates.py
|
joncdavid/pymol-extras
|
89a6a85e442892bd3c3c4e69d738673798d02efb
|
[
"MIT"
] | null | null | null |
aggregate-reconstruction/reconstruct_aggregates.py
|
joncdavid/pymol-extras
|
89a6a85e442892bd3c3c4e69d738673798d02efb
|
[
"MIT"
] | null | null | null |
aggregate-reconstruction/reconstruct_aggregates.py
|
joncdavid/pymol-extras
|
89a6a85e442892bd3c3c4e69d738673798d02efb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
## #!/usr/bin/env python3
# filename: reconstruct_aggregates.py
# author: Jon David
# date: Wednesday, March 21, 2018
# description:
# Reads a vizmo path file are renders a subset of those
# configurations in PyMOL.
#--------------------------------------------------------------------
# notes:
# 3.21.18: replaced all " " to " " in p0.mb1n1c.path.noHeader
#--------------------------------------------------------------------
from pymol import cmd
class PathData(object):
def __init__(self, fname, numModels):
self.stepDict = {}
self.readFile(fname, numModels)
def readFile(self, fname, numModels):
"""Reads and parses and input vizmo path file to populate
a dictionary of type (StepID, [ModelConfiguration]),
where ModelConfiguration is of type (ModelID,x,y,z,a,b,g).
Note: fname is assumed to have no header lines."""
stepID = 0
with open(fname, 'r') as f:
for line in f:
modelConfigList = self.parse(line, numModels)
self.stepDict[stepID] = modelConfigList
stepID = stepID + 1
return
def parse(self, line, numModels):
"""Parses a line of the form AB, where A is of type [(x,y,z)],
and B is of type [(a,g,b)].
Return is of type [(x,y,z,a,b,g)]."""
line = line.strip()
itemList = line.split(' ') ## line.split(sep=' ') for python3
positionList = [] ## has type: [(x,y,z)]
receptorID = 0
for i in range(0, numModels):
xID = 3*receptorID ## x_index, where to find this x in itemList
yID = xID + 1
zID = xID + 2
position = (itemList[xID], itemList[yID], itemList[zID])
positionList.append(position)
receptorID = receptorID + 1
rotationList = [] ## has type: [(a,b,g)]
baseOffset = numModels*3
allergenID = 0
for j in range(0, numModels):
aID = baseOffset + 3*allergenID
bID = aID + 1
gID = aID + 2
rotation = (itemList[aID], itemList[bID], itemList[gID])
rotationList.append(rotation)
allergenID = allergenID + 1
modelConfigList = []
for k in range(0, numModels):
modelConfig = positionList[k] + rotationList[k] #one 6-tuple
modelConfigList.append(modelConfig)
return modelConfigList
def getConfiguration(self, stepID, modelID):
"""Gets the configuration of model modelID in step stepID."""
return self.stepDict[stepID][modelID]
class ConfigurationRenderer(object):
def __init__(self):
## initialize PyMOL here
return
def render(self, modelConfiguration, pdb_fname, modelName, color_str="green"):
"""Renders a pdb with given configuration in PyMOL.
modelConfiguration is of type (x,y,z,a,b,g);
model_fname is of type string and represents the model PDB file."""
x = 10*float(modelConfiguration[0]) ## scale "up" by a factor of 10
y = 10*float(modelConfiguration[1]) ## because modelConfiguration's x,y,z have units of nm
z = 10*float(modelConfiguration[2]) ## but PyMOL uses coordinates of Angstroms
translationVector = "[{},{},{}]".format(x,y,z)
xDegOfRot = 360*float(modelConfiguration[3]) ## alpha, is in units of a fraction of 360 degrees
yDegOfRot = 360*float(modelConfiguration[4])
zDegOfRot = 360*float(modelConfiguration[5])
#cmd.load(pdb_fname, "original_{}".format(modelName)) ## for debug, something to compare against
cmd.load(pdb_fname, modelName)
cmd.rotate('x', xDegOfRot, modelName, 0, 0) ## rotate about x-axis
cmd.rotate('y', yDegOfRot, modelName, 0, 0) ## rotate about y-axis
cmd.rotate('z', zDegOfRot, modelName, 0, 0) ## rotate about z-axis
## note: apply rotations before translation, because it rotates about the origin
cmd.translate(translationVector, modelName, 0, 0) ## all states, and _not_ camera coordinates
cmd.color(color_str, modelName)
def final_render(self):
cmd.hide()
cmd.show('cartoon')
##---- test functions ----
def test_PathData():
fname = "input/test_input"
numModels = 2
data = PathData(fname, numModels)
print("(Step0, Model0) is: {}".format( data.getConfiguration(0,0) ))
print("(Step0, Model1) is: {}".format( data.getConfiguration(0,1) ))
print("(Step1, Model0) is: {}".format( data.getConfiguration(1,0) ))
print("(Step1, Model1) is: {}".format( data.getConfiguration(1,1) ))
def test_outputAllConfigsInLastStep():
"""I used this function to display all configurations. This helps me
find all molecules in a particular quadrant."""
fname = "input/p0.mb1n1c.path.noHeader"
numModels = 20
data = PathData(fname, numModels)
for modelID in range(0, numModels):
print("(Step999,Model{}) is: {}".format(modelID, data.getConfiguration(999, modelID)))
## my filterested modelIDs in quadrant 3 (-x values, +z values):
# (Step999,Model1) is: ('-43.3077', '-4.59132', '27.605', '0', '0.122539', '0')
# (Step999,Model2) is: ('-56.2992', '-4.59132', '13.7883', '0', '0.392702', '0')
# (Step999,Model4) is: ('-58.7896', '-4.59132', '27.6803', '0', '0.682062', '0')
# (Step999,Model6) is: ('-41.8501', '-4.59132', '41.8614', '0', '0.964607', '0')
# (Step999,Model13) is: ('-45.4026', '0.01561', '33.9709', '0', '0.372954', '0')
# (Step999,Model14) is: ('-51.3592', '0.01561', '24.7466', '0', '0.130871', '0')
# (Step999,Model16) is: ('-50.752', '0.01561', '16.224', '0', '0.528833', '0')
# (Step999,Model18) is: ('-57.1006', '0.01561', '22.1472', '0', '0.775501', '0')
def test_ConfigurationRenderer():
fname = "input/p0.mb1n1c.path.noHeader"
numModels = 20
data = PathData(fname, numModels)
## note: models 0-9 are IgE receptors
## and models 10-19 are mutant allergens
receptor_pdb_fname = "./input_pdbs/Rec.pdb"
allergen_pdb_fname = "./input_pdbs/mutant-MB1N1C-singleFile.yAligned.pdb"
model_name = "model1"
modelConfiguration = data.getConfiguration(999,1)
configRenderer = ConfigurationRenderer()
configRenderer.render(modelConfiguration, receptor_pdb_fname, model_name)
def test_ConfigurationRenderer_manyModels():
fname = "input/p0.mb1n1c.path.noHeader"
numModels = 20
data = PathData(fname, numModels)
## note: models 0-9 are IgE receptors
## and models 10-19 are mutant allergens
receptor_pdb_fname = "./input_pdbs/Rec.pdb"
allergen_pdb_fname = "./input_pdbs/mutant-MB1N1C-singleFile.yAligned.pdb"
modelConfig_1 = data.getConfiguration(999,1)
modelConfig_2 = data.getConfiguration(999,2)
modelConfig_4 = data.getConfiguration(999,4)
modelConfig_6 = data.getConfiguration(999,6)
modelConfig_13 = data.getConfiguration(999,13)
modelConfig_14 = data.getConfiguration(999,14)
modelConfig_16 = data.getConfiguration(999,16)
modelConfig_18 = data.getConfiguration(999,18)
receptor_color_str = 'marine'
allergen_color_str = 'green'
configRenderer = ConfigurationRenderer()
configRenderer.render(modelConfig_1, receptor_pdb_fname, "model1-rec", receptor_color_str)
configRenderer.render(modelConfig_2, receptor_pdb_fname, "model2-rec", receptor_color_str)
configRenderer.render(modelConfig_4, receptor_pdb_fname, "model4-rec", receptor_color_str)
configRenderer.render(modelConfig_6, receptor_pdb_fname, "model6-rec", receptor_color_str)
configRenderer.render(modelConfig_13, allergen_pdb_fname, "model13-alg", allergen_color_str)
configRenderer.render(modelConfig_14, allergen_pdb_fname, "model14-alg", allergen_color_str)
configRenderer.render(modelConfig_16, allergen_pdb_fname, "model16-alg", allergen_color_str)
configRenderer.render(modelConfig_18, allergen_pdb_fname, "model18-alg", allergen_color_str)
configRenderer.final_render() ## hide all, then show as cartoons
# (Step999,Model1) is: ('-43.3077', '-4.59132', '27.605', '0', '0.122539', '0')
# (Step999,Model2) is: ('-56.2992', '-4.59132', '13.7883', '0', '0.392702', '0')
# (Step999,Model4) is: ('-58.7896', '-4.59132', '27.6803', '0', '0.682062', '0')
# (Step999,Model6) is: ('-41.8501', '-4.59132', '41.8614', '0', '0.964607', '0')
# (Step999,Model13) is: ('-45.4026', '0.01561', '33.9709', '0', '0.372954', '0')
# (Step999,Model14) is: ('-51.3592', '0.01561', '24.7466', '0', '0.130871', '0')
# (Step999,Model16) is: ('-50.752', '0.01561', '16.224', '0', '0.528833', '0')
# (Step999,Model18) is: ('-57.1006', '0.01561', '22.1472', '0', '0.775501', '0')
##---- main ----
#test_PathData()
##test_outputAllConfigsInLastStep()
#test_ConfigurationRenderer()
test_ConfigurationRenderer_manyModels()
| 44.735
| 105
| 0.627473
|
hData(object):
def __init__(self, fname, numModels):
self.stepDict = {}
self.readFile(fname, numModels)
def readFile(self, fname, numModels):
stepID = 0
with open(fname, 'r') as f:
for line in f:
modelConfigList = self.parse(line, numModels)
self.stepDict[stepID] = modelConfigList
stepID = stepID + 1
return
def parse(self, line, numModels):
line = line.strip()
itemList = line.split(' ') [] 0
for i in range(0, numModels):
xID = 3*receptorID = xID + 2
position = (itemList[xID], itemList[yID], itemList[zID])
positionList.append(position)
receptorID = receptorID + 1
rotationList = [] numModels*3
allergenID = 0
for j in range(0, numModels):
aID = baseOffset + 3*allergenID
bID = aID + 1
gID = aID + 2
rotation = (itemList[aID], itemList[bID], itemList[gID])
rotationList.append(rotation)
allergenID = allergenID + 1
modelConfigList = []
for k in range(0, numModels):
modelConfig = positionList[k] + rotationList[k]
modelConfigList.append(modelConfig)
return modelConfigList
def getConfiguration(self, stepID, modelID):
return self.stepDict[stepID][modelID]
class ConfigurationRenderer(object):
def __init__(self):
f render(self, modelConfiguration, pdb_fname, modelName, color_str="green"):
x = 10*float(modelConfiguration[0]) figuration[1]) PyMOL uses coordinates of Angstroms
translationVector = "[{},{},{}]".format(x,y,z)
xDegOfRot = 360*float(modelConfiguration[3]) ## alpha, is in units of a fraction of 360 degrees
yDegOfRot = 360*float(modelConfiguration[4])
zDegOfRot = 360*float(modelConfiguration[5])
#cmd.load(pdb_fname, "original_{}".format(modelName)) ## for debug, something to compare against
cmd.load(pdb_fname, modelName)
cmd.rotate('x', xDegOfRot, modelName, 0, 0) ## rotate about x-axis
cmd.rotate('y', yDegOfRot, modelName, 0, 0) ## rotate about y-axis
cmd.rotate('z', zDegOfRot, modelName, 0, 0) ## rotate about z-axis
## note: apply rotations before translation, because it rotates about the origin
cmd.translate(translationVector, modelName, 0, 0) ## all states, and _not_ camera coordinates
cmd.color(color_str, modelName)
def final_render(self):
cmd.hide()
cmd.show('cartoon')
##---- test functions ----
def test_PathData():
fname = "input/test_input"
numModels = 2
data = PathData(fname, numModels)
print("(Step0, Model0) is: {}".format( data.getConfiguration(0,0) ))
print("(Step0, Model1) is: {}".format( data.getConfiguration(0,1) ))
print("(Step1, Model0) is: {}".format( data.getConfiguration(1,0) ))
print("(Step1, Model1) is: {}".format( data.getConfiguration(1,1) ))
def test_outputAllConfigsInLastStep():
fname = "input/p0.mb1n1c.path.noHeader"
numModels = 20
data = PathData(fname, numModels)
for modelID in range(0, numModels):
print("(Step999,Model{}) is: {}".format(modelID, data.getConfiguration(999, modelID)))
## my filterested modelIDs in quadrant 3 (-x values, +z values):
# (Step999,Model1) is: ('-43.3077', '-4.59132', '27.605', '0', '0.122539', '0')
# (Step999,Model2) is: ('-56.2992', '-4.59132', '13.7883', '0', '0.392702', '0')
# (Step999,Model4) is: ('-58.7896', '-4.59132', '27.6803', '0', '0.682062', '0')
# (Step999,Model6) is: ('-41.8501', '-4.59132', '41.8614', '0', '0.964607', '0')
# (Step999,Model13) is: ('-45.4026', '0.01561', '33.9709', '0', '0.372954', '0')
# (Step999,Model14) is: ('-51.3592', '0.01561', '24.7466', '0', '0.130871', '0')
# (Step999,Model16) is: ('-50.752', '0.01561', '16.224', '0', '0.528833', '0')
# (Step999,Model18) is: ('-57.1006', '0.01561', '22.1472', '0', '0.775501', '0')
def test_ConfigurationRenderer():
fname = "input/p0.mb1n1c.path.noHeader"
numModels = 20
data = PathData(fname, numModels)
## note: models 0-9 are IgE receptors
## and models 10-19 are mutant allergens
receptor_pdb_fname = "./input_pdbs/Rec.pdb"
allergen_pdb_fname = "./input_pdbs/mutant-MB1N1C-singleFile.yAligned.pdb"
model_name = "model1"
modelConfiguration = data.getConfiguration(999,1)
configRenderer = ConfigurationRenderer()
configRenderer.render(modelConfiguration, receptor_pdb_fname, model_name)
def test_ConfigurationRenderer_manyModels():
fname = "input/p0.mb1n1c.path.noHeader"
numModels = 20
data = PathData(fname, numModels)
## note: models 0-9 are IgE receptors
## and models 10-19 are mutant allergens
receptor_pdb_fname = "./input_pdbs/Rec.pdb"
allergen_pdb_fname = "./input_pdbs/mutant-MB1N1C-singleFile.yAligned.pdb"
modelConfig_1 = data.getConfiguration(999,1)
modelConfig_2 = data.getConfiguration(999,2)
modelConfig_4 = data.getConfiguration(999,4)
modelConfig_6 = data.getConfiguration(999,6)
modelConfig_13 = data.getConfiguration(999,13)
modelConfig_14 = data.getConfiguration(999,14)
modelConfig_16 = data.getConfiguration(999,16)
modelConfig_18 = data.getConfiguration(999,18)
receptor_color_str = 'marine'
allergen_color_str = 'green'
configRenderer = ConfigurationRenderer()
configRenderer.render(modelConfig_1, receptor_pdb_fname, "model1-rec", receptor_color_str)
configRenderer.render(modelConfig_2, receptor_pdb_fname, "model2-rec", receptor_color_str)
configRenderer.render(modelConfig_4, receptor_pdb_fname, "model4-rec", receptor_color_str)
configRenderer.render(modelConfig_6, receptor_pdb_fname, "model6-rec", receptor_color_str)
configRenderer.render(modelConfig_13, allergen_pdb_fname, "model13-alg", allergen_color_str)
configRenderer.render(modelConfig_14, allergen_pdb_fname, "model14-alg", allergen_color_str)
configRenderer.render(modelConfig_16, allergen_pdb_fname, "model16-alg", allergen_color_str)
configRenderer.render(modelConfig_18, allergen_pdb_fname, "model18-alg", allergen_color_str)
configRenderer.final_render() ## hide all, then show as cartoons
# (Step999,Model1) is: ('-43.3077', '-4.59132', '27.605', '0', '0.122539', '0')
# (Step999,Model2) is: ('-56.2992', '-4.59132', '13.7883', '0', '0.392702', '0')
# (Step999,Model4) is: ('-58.7896', '-4.59132', '27.6803', '0', '0.682062', '0')
# (Step999,Model6) is: ('-41.8501', '-4.59132', '41.8614', '0', '0.964607', '0')
# (Step999,Model13) is: ('-45.4026', '0.01561', '33.9709', '0', '0.372954', '0')
# (Step999,Model14) is: ('-51.3592', '0.01561', '24.7466', '0', '0.130871', '0')
# (Step999,Model16) is: ('-50.752', '0.01561', '16.224', '0', '0.528833', '0')
# (Step999,Model18) is: ('-57.1006', '0.01561', '22.1472', '0', '0.775501', '0')
##---- main ----
#test_PathData()
##test_outputAllConfigsInLastStep()
#test_ConfigurationRenderer()
test_ConfigurationRenderer_manyModels()
| true
| true
|
1c44a236c91b224f4e6cf8cf92d6dda93dbb02b4
| 341
|
py
|
Python
|
lib/pool.py
|
Lufedi/reaper
|
bdf56b499e5b704c27b9f6c053d798c2a10fa4cf
|
[
"Apache-2.0"
] | 106
|
2015-07-21T16:18:26.000Z
|
2022-03-31T06:45:34.000Z
|
lib/pool.py
|
Kowndinya2000/enhanced_repo_reaper
|
744f794ba53bde5667b3b0f99b07273d0e32a495
|
[
"Apache-2.0"
] | 21
|
2015-07-11T03:48:28.000Z
|
2022-01-18T12:57:30.000Z
|
lib/pool.py
|
Kowndinya2000/enhanced_repo_reaper
|
744f794ba53bde5667b3b0f99b07273d0e32a495
|
[
"Apache-2.0"
] | 26
|
2015-07-22T22:38:21.000Z
|
2022-03-14T10:11:56.000Z
|
import multiprocessing
import multiprocessing.pool
class NonDaemonicProcess(multiprocessing.Process):
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
class NonDaemonicProcessPool(multiprocessing.pool.Pool):
Process = NonDaemonicProcess
| 20.058824
| 56
| 0.756598
|
import multiprocessing
import multiprocessing.pool
class NonDaemonicProcess(multiprocessing.Process):
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
class NonDaemonicProcessPool(multiprocessing.pool.Pool):
Process = NonDaemonicProcess
| true
| true
|
1c44a23a3372712f8db5d46c920eaa64d7901173
| 621
|
py
|
Python
|
download_skipthought.py
|
prakashpandey9/Text2Image-PyTorch
|
1cafacdc284590c30c635e7e519a5acaabd4463c
|
[
"MIT"
] | 28
|
2018-11-25T18:40:33.000Z
|
2021-07-30T03:17:29.000Z
|
download_skipthought.py
|
prakashpandey9/Text2Image-PyTorch
|
1cafacdc284590c30c635e7e519a5acaabd4463c
|
[
"MIT"
] | 1
|
2019-07-22T15:28:33.000Z
|
2019-07-22T15:28:33.000Z
|
download_skipthought.py
|
prakashpandey9/Text2Image-PyTorch
|
1cafacdc284590c30c635e7e519a5acaabd4463c
|
[
"MIT"
] | 4
|
2020-04-18T08:48:33.000Z
|
2021-04-15T10:00:36.000Z
|
import os
print ('Downloading Skip-Thought Model ...........')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/dictionary.txt')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/utable.npy')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/btable.npy')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/uni_skip.npz')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/uni_skip.npz.pkl')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/bi_skip.npz')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/bi_skip.npz.pkl')
print ('Download Completed ............')
| 51.75
| 76
| 0.723027
|
import os
print ('Downloading Skip-Thought Model ...........')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/dictionary.txt')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/utable.npy')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/btable.npy')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/uni_skip.npz')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/uni_skip.npz.pkl')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/bi_skip.npz')
os.sysytem('wget http://www.cs.toronto.edu/~rkiros/models/bi_skip.npz.pkl')
print ('Download Completed ............')
| true
| true
|
1c44a2f703726e0b57034ec9391f3a3d4cc34e07
| 5,464
|
py
|
Python
|
meiduo03/meiduo03/settings/dev.py
|
physili/django_test
|
09aa61f36e5d32f98af11057ea206dde8d082ac7
|
[
"MIT"
] | 1
|
2020-04-25T04:50:30.000Z
|
2020-04-25T04:50:30.000Z
|
meiduo03/meiduo03/settings/dev.py
|
physili/django_test
|
09aa61f36e5d32f98af11057ea206dde8d082ac7
|
[
"MIT"
] | null | null | null |
meiduo03/meiduo03/settings/dev.py
|
physili/django_test
|
09aa61f36e5d32f98af11057ea206dde8d082ac7
|
[
"MIT"
] | null | null | null |
"""
Django settings for meiduo03 project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR,'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's5(_7x3_(ls=$_sts_6g*$arw1l7wj!yha2hz)t_$$^ua!!n!+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','localhost','www.meiduo.site',]
AUTH_USER_MODEL = 'users.User'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'meiduo03.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduo03.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'PORT': 3306,
'USER': 'root',
'PASSWORD': 'mysql',
'NAME': 'meiduo_mall3',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
CACHES = {
"default": { # 默认存储信息: 存到 0 号库
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": { # session 信息: 存到 1 号库
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
LOGGING = {
'version': 1,
'disable_existing_loggers': False, # 是否禁用已经存在的日志器
'formatters': { # 日志信息显示的格式
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(module)s %(lineno)d %(message)s'
},
},
'filters': { # 对日志进行过滤
'require_debug_true': { # django在debug模式下才输出日志
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': { # 日志处理方法
'console': { # 向终端中输出日志
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': { # 向文件中输出日志
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/meiduo.log'), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': { # 日志器
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'], # 可以同时向终端与文件中输出日志
'propagate': True, # 是否继续传递日志信息
'level': 'INFO', # 日志器接收的最低日志级别
},
}
}
CORS_ORIGIN_WHITELIST = (
'http://127.0.0.1:8080',
'http://localhost:8080',
'http://www.meiduo.site:8080',
)
CORS_ALLOW_CREDENTIALS = True # 允许携带cookie
| 27.32
| 91
| 0.625
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR,'apps'))
SECRET_KEY = 's5(_7x3_(ls=$_sts_6g*$arw1l7wj!yha2hz)t_$$^ua!!n!+'
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','localhost','www.meiduo.site',]
AUTH_USER_MODEL = 'users.User'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'meiduo03.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduo03.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1',
'PORT': 3306,
'USER': 'root',
'PASSWORD': 'mysql',
'NAME': 'meiduo_mall3',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
CACHES = {
"default": { # 默认存储信息: 存到 0 号库
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": { # session 信息: 存到 1 号库
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
LOGGING = {
'version': 1,
'disable_existing_loggers': False, # 是否禁用已经存在的日志器
'formatters': { # 日志信息显示的格式
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(module)s %(lineno)d %(message)s'
},
},
'filters': { # 对日志进行过滤
'require_debug_true': { # django在debug模式下才输出日志
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': { # 日志处理方法
'console': { # 向终端中输出日志
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': { # 向文件中输出日志
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/meiduo.log'), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': { # 日志器
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'], # 可以同时向终端与文件中输出日志
'propagate': True, # 是否继续传递日志信息
'level': 'INFO', # 日志器接收的最低日志级别
},
}
}
CORS_ORIGIN_WHITELIST = (
'http://127.0.0.1:8080',
'http://localhost:8080',
'http://www.meiduo.site:8080',
)
CORS_ALLOW_CREDENTIALS = True # 允许携带cookie
| true
| true
|
1c44a300424e00e2d85f0ffb0cabd4407222539d
| 1,290
|
py
|
Python
|
setup.py
|
tfiers/piprelease
|
37bbea7788bb55408a10156e9d113a7532f20e29
|
[
"MIT"
] | null | null | null |
setup.py
|
tfiers/piprelease
|
37bbea7788bb55408a10156e9d113a7532f20e29
|
[
"MIT"
] | null | null | null |
setup.py
|
tfiers/piprelease
|
37bbea7788bb55408a10156e9d113a7532f20e29
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
GITHUB_URL = "https://github.com/tfiers/puprelease"
with open("ReadMe.md", mode="r", encoding="utf-8") as f:
readme = f.read()
setup(
name="puprelease",
description="Publishing a new version of your Python package has never been easier",
author="Tomas Fiers",
author_email="tomas.fiers@gmail.com",
long_description=readme,
long_description_content_type="text/markdown",
url=GITHUB_URL,
project_urls={"Source Code": GITHUB_URL},
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": ["pup=puprelease.pup:cli"]},
packages=find_packages("src"),
package_dir={"": "src"}, # (`""` is the "root" package).
install_requires=[
"click >= 7.1", # Major versions go fast and are not very breaking. Hence no `~`.
"requests ~= 2.0",
"twine",
"wheel",
"setuptools_scm",
"colorama; platform_system == 'Windows'",
],
# Get package version from git tags
setup_requires=["setuptools_scm"],
use_scm_version={
"version_scheme": "post-release",
"local_scheme": "dirty-tag",
},
)
| 31.463415
| 90
| 0.627907
|
from setuptools import find_packages, setup
GITHUB_URL = "https://github.com/tfiers/puprelease"
with open("ReadMe.md", mode="r", encoding="utf-8") as f:
readme = f.read()
setup(
name="puprelease",
description="Publishing a new version of your Python package has never been easier",
author="Tomas Fiers",
author_email="tomas.fiers@gmail.com",
long_description=readme,
long_description_content_type="text/markdown",
url=GITHUB_URL,
project_urls={"Source Code": GITHUB_URL},
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": ["pup=puprelease.pup:cli"]},
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=[
"click >= 7.1",
"requests ~= 2.0",
"twine",
"wheel",
"setuptools_scm",
"colorama; platform_system == 'Windows'",
],
setup_requires=["setuptools_scm"],
use_scm_version={
"version_scheme": "post-release",
"local_scheme": "dirty-tag",
},
)
| true
| true
|
1c44a48fdd16495155236d2c05304496a7bd5de7
| 840
|
py
|
Python
|
Oauth/app/routers/templates.py
|
837477/Oauth
|
8d01a84d71563d9d510950cdb77ae67de0da2a40
|
[
"MIT"
] | 2
|
2022-01-09T09:26:50.000Z
|
2022-01-16T15:56:10.000Z
|
Oauth/app/routers/templates.py
|
837477/Oauth
|
8d01a84d71563d9d510950cdb77ae67de0da2a40
|
[
"MIT"
] | null | null | null |
Oauth/app/routers/templates.py
|
837477/Oauth
|
8d01a84d71563d9d510950cdb77ae67de0da2a40
|
[
"MIT"
] | 1
|
2022-03-02T05:30:13.000Z
|
2022-03-02T05:30:13.000Z
|
from fastapi import APIRouter, Request
from fastapi.templating import Jinja2Templates
from config import config
from controller.google import GoogleOauth
from controller.kakao import KakaoOauth
from controller.naver import NaverOauth
from controller.facebook import FacebookOauth
router = APIRouter()
templates = Jinja2Templates(directory="app/assets")
@router.get("/")
async def index(request: Request):
"""
사용자 인증 과정의 로그인 페이지인 Oauth login URL을 전달.
"""
google = GoogleOauth(config)
kakao = KakaoOauth(config)
naver = NaverOauth(config)
facebook = FacebookOauth(config)
context = {
'request': request,
'google': google.url(),
'kakao': kakao.url(),
'facebook': facebook.url(),
'naver': naver.url()
}
return templates.TemplateResponse("index.html", context)
| 27.096774
| 60
| 0.70119
|
from fastapi import APIRouter, Request
from fastapi.templating import Jinja2Templates
from config import config
from controller.google import GoogleOauth
from controller.kakao import KakaoOauth
from controller.naver import NaverOauth
from controller.facebook import FacebookOauth
router = APIRouter()
templates = Jinja2Templates(directory="app/assets")
@router.get("/")
async def index(request: Request):
google = GoogleOauth(config)
kakao = KakaoOauth(config)
naver = NaverOauth(config)
facebook = FacebookOauth(config)
context = {
'request': request,
'google': google.url(),
'kakao': kakao.url(),
'facebook': facebook.url(),
'naver': naver.url()
}
return templates.TemplateResponse("index.html", context)
| true
| true
|
1c44a6cdf5d3d9445c99399658fd28df0e8e2d8b
| 96
|
py
|
Python
|
python3/help/set1.py
|
jtraver/dev
|
c7cd2181594510a8fa27e7325566ed2d79371624
|
[
"MIT"
] | null | null | null |
python3/help/set1.py
|
jtraver/dev
|
c7cd2181594510a8fa27e7325566ed2d79371624
|
[
"MIT"
] | null | null | null |
python3/help/set1.py
|
jtraver/dev
|
c7cd2181594510a8fa27e7325566ed2d79371624
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#!/usr/bin/python
import apihelper
set1 = set()
apihelper.info(set1)
| 9.6
| 22
| 0.697917
|
import apihelper
set1 = set()
apihelper.info(set1)
| true
| true
|
1c44a7936b88cb93c79a780c2dc47a1095d6da76
| 1,356
|
py
|
Python
|
PyEngine3D/Render/RenderOptions.py
|
ubuntunux/PyEngine3D
|
e5542b5b185e8b9b56fc4669a6f22eb06c386c4f
|
[
"BSD-2-Clause"
] | 121
|
2017-06-07T19:42:30.000Z
|
2022-03-31T04:42:29.000Z
|
PyEngine3D/Render/RenderOptions.py
|
MatthewPChapdelaine/PyEngine3D
|
e5542b5b185e8b9b56fc4669a6f22eb06c386c4f
|
[
"BSD-2-Clause"
] | 16
|
2015-12-21T16:57:55.000Z
|
2017-03-06T15:22:37.000Z
|
PyEngine3D/Render/RenderOptions.py
|
ubuntunux/GuineaPig
|
f32852ecbfa3ebdbba00afc466719fc78e37361c
|
[
"BSD-2-Clause"
] | 16
|
2018-01-15T03:12:13.000Z
|
2022-03-31T04:42:41.000Z
|
from enum import Enum
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import *
class BlendMode(Enum):
BLEND = 0
ADDITIVE = 1
MULTIPLY = 2
SUBTRACT = 3
class RenderOption:
RENDER_LIGHT_PROBE = False
RENDER_ONLY_ATMOSPHERE = False
RENDER_FONT = True
RENDER_STATIC_ACTOR = True
RENDER_SKELETON_ACTOR = True
RENDER_ATMOSPHERE = True
RENDER_OCEAN = True
RENDER_EFFECT = True
RENDER_COLLISION = True
RENDER_DEBUG_LINE = True
RENDER_GIZMO = True
RENDER_OBJECT_ID = True
class RenderingType(AutoEnum):
DEFERRED_RENDERING = ()
FORWARD_RENDERING = ()
LIGHT_PRE_PASS = ()
COUNT = ()
class RenderGroup(AutoEnum):
STATIC_ACTOR = ()
SKELETON_ACTOR = ()
COUNT = ()
class RenderMode(AutoEnum):
GBUFFER = ()
FORWARD_SHADING = ()
SHADOW = ()
OBJECT_ID = ()
SELECTED_OBJECT = ()
GIZMO = ()
COUNT = ()
class RenderOptionManager(Singleton):
def __init__(self):
logger.info("Create " + GetClassName(self))
self.rendering_type = RenderingType.DEFERRED_RENDERING
self.core_manager = None
def initialize(self, core_manager):
self.core_manager = core_manager
def set_rendering_type(self, rendering_type):
self.rendering_type = RenderingType.convert_index_to_enum(rendering_type)
| 21.1875
| 81
| 0.679941
|
from enum import Enum
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import *
class BlendMode(Enum):
BLEND = 0
ADDITIVE = 1
MULTIPLY = 2
SUBTRACT = 3
class RenderOption:
RENDER_LIGHT_PROBE = False
RENDER_ONLY_ATMOSPHERE = False
RENDER_FONT = True
RENDER_STATIC_ACTOR = True
RENDER_SKELETON_ACTOR = True
RENDER_ATMOSPHERE = True
RENDER_OCEAN = True
RENDER_EFFECT = True
RENDER_COLLISION = True
RENDER_DEBUG_LINE = True
RENDER_GIZMO = True
RENDER_OBJECT_ID = True
class RenderingType(AutoEnum):
DEFERRED_RENDERING = ()
FORWARD_RENDERING = ()
LIGHT_PRE_PASS = ()
COUNT = ()
class RenderGroup(AutoEnum):
STATIC_ACTOR = ()
SKELETON_ACTOR = ()
COUNT = ()
class RenderMode(AutoEnum):
GBUFFER = ()
FORWARD_SHADING = ()
SHADOW = ()
OBJECT_ID = ()
SELECTED_OBJECT = ()
GIZMO = ()
COUNT = ()
class RenderOptionManager(Singleton):
def __init__(self):
logger.info("Create " + GetClassName(self))
self.rendering_type = RenderingType.DEFERRED_RENDERING
self.core_manager = None
def initialize(self, core_manager):
self.core_manager = core_manager
def set_rendering_type(self, rendering_type):
self.rendering_type = RenderingType.convert_index_to_enum(rendering_type)
| true
| true
|
1c44a7add9bb2ee186aadb91834062e0c8e8fe58
| 18,039
|
py
|
Python
|
r2r_src/speaker.py
|
MarSaKi/NvEM
|
a636245c96c07f3b507b69f2a9837a4ff127f4aa
|
[
"MIT"
] | 16
|
2021-07-16T02:00:33.000Z
|
2022-03-28T03:57:11.000Z
|
r2r_src/speaker.py
|
MarSaKi/NvEM
|
a636245c96c07f3b507b69f2a9837a4ff127f4aa
|
[
"MIT"
] | null | null | null |
r2r_src/speaker.py
|
MarSaKi/NvEM
|
a636245c96c07f3b507b69f2a9837a4ff127f4aa
|
[
"MIT"
] | 1
|
2022-01-18T09:16:46.000Z
|
2022-01-18T09:16:46.000Z
|
import torch
import numpy as np
from param import args
import os
import utils
import model
import torch.nn.functional as F
import time
class Speaker():
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
def __init__(self, env, listener, tok):
self.env = env
self.feature_size = self.env.feature_size
self.tok = tok
self.tok.finalize()
self.listener = listener
# Model
print("VOCAB_SIZE", self.tok.vocab_size())
self.encoder = model.SpeakerEncoder(self.feature_size+args.angle_feat_size, args.rnn_dim, args.dropout, bidirectional=args.bidir).cuda()
self.decoder = model.SpeakerDecoder(self.tok.vocab_size(), args.wemb, self.tok.word_to_index['<PAD>'],
args.rnn_dim, args.dropout).cuda()
self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)
self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)
# Evaluation
self.softmax_loss = torch.nn.CrossEntropyLoss(ignore_index=self.tok.word_to_index['<PAD>'])
# Will be used in beam search
self.nonreduced_softmax_loss = torch.nn.CrossEntropyLoss(
ignore_index=self.tok.word_to_index['<PAD>'],
size_average=False,
reduce=False
)
def train(self, iters):
for i in range(iters):
self.env.reset()
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
# t0 = time.time()
loss = self.teacher_forcing(train=True)
# t1 = time.time()
# print('iter: {:0>3d}, time: {:.4f}'.format(i, t1 - t0))
loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
def get_insts(self, wrapper=(lambda x: x)):
# Get the caption for all the data
self.env.reset_epoch(shuffle=True)
path2inst = {}
total = self.env.size()
for _ in wrapper(range(total // self.env.batch_size + 1)): # Guarantee that all the data are processed
obs = self.env.reset()
insts = self.infer_batch() # Get the insts of the result
path_ids = [ob['path_id'] for ob in obs] # Gather the path ids
for path_id, inst in zip(path_ids, insts):
if path_id not in path2inst:
path2inst[path_id] = self.tok.shrink(inst) # Shrink the words
return path2inst
def valid(self, *aargs, **kwargs):
"""
:param iters:
:return: path2inst: path_id --> inst (the number from <bos> to <eos>)
loss: The XE loss
word_accu: per word accuracy
sent_accu: per sent accuracy
"""
path2inst = self.get_insts(*aargs, **kwargs)
# Calculate the teacher-forcing metrics
self.env.reset_epoch(shuffle=True)
N = 1 if args.fast_train else 3 # Set the iter to 1 if the fast_train (o.w. the problem occurs)
metrics = np.zeros(3)
for i in range(N):
self.env.reset()
metrics += np.array(self.teacher_forcing(train=False))
metrics /= N
return (path2inst, *metrics)
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
if perm_idx is None:
perm_idx = range(len(perm_obs))
actions = [[]] * self.env.batch_size # batch * action_len
max_len = 0 # for padding stop action
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point) // 12 # The point idx started from 0
trg_level = (trg_point) // 12
src_heading = (src_point) % 12
trg_heading = (trg_point) % 12
# adjust elevation
if trg_level > src_level:
actions[idx] = actions[idx] + [self.env_actions['up']] * int(trg_level - src_level)
elif trg_level < src_level:
actions[idx] = actions[idx] + [self.env_actions['down']] * int(src_level - trg_level)
# adjust heading
if trg_heading > src_heading:
dif = trg_heading - src_heading
if dif >= 6: # turn left
actions[idx] = actions[idx] + [self.env_actions['left']] * int(12 - dif)
else: # turn right
actions[idx] = actions[idx] + [self.env_actions['right']] * int(dif)
elif trg_heading < src_heading:
dif = src_heading - trg_heading
if dif >=6: # turn right
actions[idx] = actions[idx] + [self.env_actions['right']] * int(12 - dif)
else: # turn left
actions[idx] = actions[idx] + [self.env_actions['left']] * int(dif)
actions[idx] = actions[idx] + [(select_candidate['idx'], 0, 0)]
max_len = max(max_len, len(actions[idx]))
for idx in perm_idx:
if len(actions[idx]) < max_len:
actions[idx] = actions[idx] + [self.env_actions['<end>']] * (max_len - len(actions[idx]))
actions = np.array(actions, dtype = 'float32')
for i in range(max_len):
cur_actions = actions[:,i]
cur_actions = list(cur_actions)
cur_actions = [tuple(a) for a in cur_actions]
self.env.env.makeActions(cur_actions)
if traj is not None:
state = self.env.env.sim.getState()
for j, idx in enumerate(perm_idx):
if cur_actions[idx] != self.env_actions['<end>']:
traj[j]['path'].append((state[idx].location.viewpointId, state[idx].heading, state[idx].elevation))
def _teacher_action(self, obs, ended, tracker=None):
"""
Extract teacher actions into variable.
:param obs: The observation.
:param ended: Whether the action seq is ended
:return:
"""
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]: # Just ignore this index
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']: # Next view point
a[i] = k
break
else: # Stop here
assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
a[i] = len(ob['candidate'])
return torch.from_numpy(a).cuda()
def _candidate_variable(self, obs, actions):
candidate_feat = np.zeros((len(obs), self.feature_size + args.angle_feat_size), dtype=np.float32)
for i, (ob, act) in enumerate(zip(obs, actions)):
if act == -1: # Ignore or Stop --> Just use zero vector as the feature
pass
else:
c = ob['candidate'][act]
candidate_feat[i, :] = np.concatenate([c['visual_feat'],c['angle_feat']], -1)
return torch.from_numpy(candidate_feat).cuda()
def from_shortest_path(self, viewpoints=None, get_first_feat=False):
"""
:param viewpoints: [[], [], ....(batch_size)]. Only for dropout viewpoint
:param get_first_feat: whether output the first feat
:return:
"""
obs = self.env._get_obs()
ended = np.array([False] * len(obs)) # Indices match permuation of the model, not env
length = np.zeros(len(obs), np.int64)
img_feats = []
can_feats = []
first_feat = np.zeros((len(obs), self.feature_size+args.angle_feat_size), np.float32)
for i, ob in enumerate(obs):
first_feat[i, -args.angle_feat_size:] = utils.angle_feature(ob['heading'], ob['elevation'])
first_feat = torch.from_numpy(first_feat).cuda()
while not ended.all():
if viewpoints is not None:
for i, ob in enumerate(obs):
viewpoints[i].append(ob['viewpoint'])
img_feats.append(self.listener._feature_variable(obs))
teacher_action = self._teacher_action(obs, ended)
teacher_action = teacher_action.cpu().numpy()
for i, act in enumerate(teacher_action):
if act < 0 or act == len(obs[i]['candidate']): # Ignore or Stop
teacher_action[i] = -1 # Stop Action
can_feats.append(self._candidate_variable(obs, teacher_action))
self.make_equiv_action(teacher_action, obs)
length += (1 - ended)
ended[:] = np.logical_or(ended, (teacher_action == -1))
obs = self.env._get_obs()
img_feats = torch.stack(img_feats, 1).contiguous() # batch_size, max_len, 36, 2176
can_feats = torch.stack(can_feats, 1).contiguous() # batch_size, max_len, 2176
if get_first_feat:
return (img_feats, can_feats, first_feat), length
else:
return (img_feats, can_feats), length
def gt_words(self, obs):
"""
See "utils.Tokenizer.encode_sentence(...)" for "instr_encoding" details
"""
seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
return torch.from_numpy(seq_tensor).cuda()
def teacher_forcing(self, train=True, features=None, insts=None, for_listener=False):
if train:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
# Get Image Input & Encode
if features is not None:
# It is used in calulating the speaker score in beam-search
assert insts is not None
(img_feats, can_feats), lengths = features
ctx = self.encoder(can_feats, img_feats, lengths)
batch_size = len(lengths)
else:
obs = self.env._get_obs()
batch_size = len(obs)
(img_feats, can_feats), lengths = self.from_shortest_path() # Image Feature (from the shortest path)
ctx = self.encoder(can_feats, img_feats, lengths)
h_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
c_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
ctx_mask = utils.length2mask(lengths)
# Get Language Input
if insts is None:
insts = self.gt_words(obs) # Language Feature
# Decode
logits, _, _ = self.decoder(insts, ctx, ctx_mask, h_t, c_t)
# Because the softmax_loss only allow dim-1 to be logit,
# So permute the output (batch_size, length, logit) --> (batch_size, logit, length)
logits = logits.permute(0, 2, 1).contiguous()
loss = self.softmax_loss(
input = logits[:, :, :-1], # -1 for aligning
target = insts[:, 1:] # "1:" to ignore the word <BOS>
)
if for_listener:
return self.nonreduced_softmax_loss(
input = logits[:, :, :-1], # -1 for aligning
target = insts[:, 1:] # "1:" to ignore the word <BOS>
)
if train:
return loss
else:
# Evaluation
_, predict = logits.max(dim=1) # BATCH, LENGTH
gt_mask = (insts != self.tok.word_to_index['<PAD>'])
correct = (predict[:, :-1] == insts[:, 1:]) * gt_mask[:, 1:] # Not pad and equal to gt
correct, gt_mask = correct.type(torch.LongTensor), gt_mask.type(torch.LongTensor)
word_accu = correct.sum().item() / gt_mask[:, 1:].sum().item() # Exclude <BOS>
sent_accu = (correct.sum(dim=1) == gt_mask[:, 1:].sum(dim=1)).sum().item() / batch_size # Exclude <BOS>
return loss.item(), word_accu, sent_accu
def infer_batch(self, sampling=False, train=False, featdropmask=None):
"""
:param sampling: if not, use argmax. else use softmax_multinomial
:param train: Whether in the train mode
:return: if sampling: return insts(np, [batch, max_len]),
log_probs(torch, requires_grad, [batch,max_len])
hiddens(torch, requires_grad, [batch, max_len, dim})
And if train: the log_probs and hiddens are detached
if not sampling: returns insts(np, [batch, max_len])
"""
if train:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
# Image Input for the Encoder
obs = self.env._get_obs()
batch_size = len(obs)
viewpoints_list = [list() for _ in range(batch_size)]
# Get feature
(img_feats, can_feats), lengths = self.from_shortest_path(viewpoints=viewpoints_list) # Image Feature (from the shortest path)
# This code block is only used for the featdrop.
if featdropmask is not None:
img_feats[..., :-args.angle_feat_size] *= featdropmask
can_feats[..., :-args.angle_feat_size] *= featdropmask
# Encoder
ctx = self.encoder(can_feats, img_feats, lengths,
already_dropfeat=(featdropmask is not None))
ctx_mask = utils.length2mask(lengths)
# Decoder
words = []
log_probs = []
hidden_states = []
entropies = []
h_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
c_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
ended = np.zeros(len(obs), np.bool)
word = np.ones(len(obs), np.int64) * self.tok.word_to_index['<BOS>'] # First word is <BOS>
word = torch.from_numpy(word).view(-1, 1).cuda()
for i in range(args.maxDecode):
# Decode Step
logits, h_t, c_t = self.decoder(word, ctx, ctx_mask, h_t, c_t) # Decode, logits: (b, 1, vocab_size)
# Select the word
logits = logits.squeeze() # logits: (b, vocab_size)
logits[:, self.tok.word_to_index['<UNK>']] = -float("inf") # No <UNK> in infer
if sampling:
probs = F.softmax(logits, -1)
m = torch.distributions.Categorical(probs)
word = m.sample()
log_prob = m.log_prob(word)
if train:
log_probs.append(log_prob)
hidden_states.append(h_t.squeeze())
entropies.append(m.entropy())
else:
log_probs.append(log_prob.detach())
hidden_states.append(h_t.squeeze().detach())
entropies.append(m.entropy().detach())
else:
values, word = logits.max(1)
# Append the word
cpu_word = word.cpu().numpy()
cpu_word[ended] = self.tok.word_to_index['<PAD>']
words.append(cpu_word)
# Prepare the shape for next step
word = word.view(-1, 1)
# End?
ended = np.logical_or(ended, cpu_word == self.tok.word_to_index['<EOS>'])
if ended.all():
break
if train and sampling:
return np.stack(words, 1), torch.stack(log_probs, 1), torch.stack(hidden_states, 1), torch.stack(entropies, 1)
else:
return np.stack(words, 1) # [(b), (b), (b), ...] --> [b, l]
def save(self, epoch, path):
''' Snapshot models '''
the_dir, _ = os.path.split(path)
os.makedirs(the_dir, exist_ok=True)
states = {}
def create_state(name, model, optimizer):
states[name] = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer)]
for param in all_tuple:
create_state(*param)
torch.save(states, path)
def load(self, path):
''' Loads parameters (but not training state) '''
print("Load the speaker's state dict from %s" % path)
states = torch.load(path)
def recover_state(name, model, optimizer):
# print(name)
# print(list(model.state_dict().keys()))
# for key in list(model.state_dict().keys()):
# print(key, model.state_dict()[key].size())
state = model.state_dict()
state.update(states[name]['state_dict'])
model.load_state_dict(state)
if args.loadOptim:
optimizer.load_state_dict(states[name]['optimizer'])
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer)]
for param in all_tuple:
recover_state(*param)
return states['encoder']['epoch'] - 1
| 43.890511
| 144
| 0.543489
|
import torch
import numpy as np
from param import args
import os
import utils
import model
import torch.nn.functional as F
import time
class Speaker():
env_actions = {
'left': (0,-1, 0),
'right': (0, 1, 0),
'up': (0, 0, 1),
'down': (0, 0,-1),
'forward': (1, 0, 0),
'<end>': (0, 0, 0),
'<start>': (0, 0, 0),
'<ignore>': (0, 0, 0)
}
def __init__(self, env, listener, tok):
self.env = env
self.feature_size = self.env.feature_size
self.tok = tok
self.tok.finalize()
self.listener = listener
print("VOCAB_SIZE", self.tok.vocab_size())
self.encoder = model.SpeakerEncoder(self.feature_size+args.angle_feat_size, args.rnn_dim, args.dropout, bidirectional=args.bidir).cuda()
self.decoder = model.SpeakerDecoder(self.tok.vocab_size(), args.wemb, self.tok.word_to_index['<PAD>'],
args.rnn_dim, args.dropout).cuda()
self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)
self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)
self.softmax_loss = torch.nn.CrossEntropyLoss(ignore_index=self.tok.word_to_index['<PAD>'])
self.nonreduced_softmax_loss = torch.nn.CrossEntropyLoss(
ignore_index=self.tok.word_to_index['<PAD>'],
size_average=False,
reduce=False
)
def train(self, iters):
for i in range(iters):
self.env.reset()
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
loss = self.teacher_forcing(train=True)
loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
def get_insts(self, wrapper=(lambda x: x)):
self.env.reset_epoch(shuffle=True)
path2inst = {}
total = self.env.size()
for _ in wrapper(range(total // self.env.batch_size + 1)):
obs = self.env.reset()
insts = self.infer_batch()
path_ids = [ob['path_id'] for ob in obs]
for path_id, inst in zip(path_ids, insts):
if path_id not in path2inst:
path2inst[path_id] = self.tok.shrink(inst)
return path2inst
def valid(self, *aargs, **kwargs):
path2inst = self.get_insts(*aargs, **kwargs)
self.env.reset_epoch(shuffle=True)
N = 1 if args.fast_train else 3
metrics = np.zeros(3)
for i in range(N):
self.env.reset()
metrics += np.array(self.teacher_forcing(train=False))
metrics /= N
return (path2inst, *metrics)
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
if perm_idx is None:
perm_idx = range(len(perm_obs))
actions = [[]] * self.env.batch_size
max_len = 0
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1:
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point) // 12
trg_level = (trg_point) // 12
src_heading = (src_point) % 12
trg_heading = (trg_point) % 12
if trg_level > src_level:
actions[idx] = actions[idx] + [self.env_actions['up']] * int(trg_level - src_level)
elif trg_level < src_level:
actions[idx] = actions[idx] + [self.env_actions['down']] * int(src_level - trg_level)
if trg_heading > src_heading:
dif = trg_heading - src_heading
if dif >= 6:
actions[idx] = actions[idx] + [self.env_actions['left']] * int(12 - dif)
else:
actions[idx] = actions[idx] + [self.env_actions['right']] * int(dif)
elif trg_heading < src_heading:
dif = src_heading - trg_heading
if dif >=6:
actions[idx] = actions[idx] + [self.env_actions['right']] * int(12 - dif)
else:
actions[idx] = actions[idx] + [self.env_actions['left']] * int(dif)
actions[idx] = actions[idx] + [(select_candidate['idx'], 0, 0)]
max_len = max(max_len, len(actions[idx]))
for idx in perm_idx:
if len(actions[idx]) < max_len:
actions[idx] = actions[idx] + [self.env_actions['<end>']] * (max_len - len(actions[idx]))
actions = np.array(actions, dtype = 'float32')
for i in range(max_len):
cur_actions = actions[:,i]
cur_actions = list(cur_actions)
cur_actions = [tuple(a) for a in cur_actions]
self.env.env.makeActions(cur_actions)
if traj is not None:
state = self.env.env.sim.getState()
for j, idx in enumerate(perm_idx):
if cur_actions[idx] != self.env_actions['<end>']:
traj[j]['path'].append((state[idx].location.viewpointId, state[idx].heading, state[idx].elevation))
def _teacher_action(self, obs, ended, tracker=None):
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]:
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']:
a[i] = k
break
else:
assert ob['teacher'] == ob['viewpoint']
a[i] = len(ob['candidate'])
return torch.from_numpy(a).cuda()
def _candidate_variable(self, obs, actions):
candidate_feat = np.zeros((len(obs), self.feature_size + args.angle_feat_size), dtype=np.float32)
for i, (ob, act) in enumerate(zip(obs, actions)):
if act == -1:
pass
else:
c = ob['candidate'][act]
candidate_feat[i, :] = np.concatenate([c['visual_feat'],c['angle_feat']], -1)
return torch.from_numpy(candidate_feat).cuda()
def from_shortest_path(self, viewpoints=None, get_first_feat=False):
obs = self.env._get_obs()
ended = np.array([False] * len(obs))
length = np.zeros(len(obs), np.int64)
img_feats = []
can_feats = []
first_feat = np.zeros((len(obs), self.feature_size+args.angle_feat_size), np.float32)
for i, ob in enumerate(obs):
first_feat[i, -args.angle_feat_size:] = utils.angle_feature(ob['heading'], ob['elevation'])
first_feat = torch.from_numpy(first_feat).cuda()
while not ended.all():
if viewpoints is not None:
for i, ob in enumerate(obs):
viewpoints[i].append(ob['viewpoint'])
img_feats.append(self.listener._feature_variable(obs))
teacher_action = self._teacher_action(obs, ended)
teacher_action = teacher_action.cpu().numpy()
for i, act in enumerate(teacher_action):
if act < 0 or act == len(obs[i]['candidate']):
teacher_action[i] = -1
can_feats.append(self._candidate_variable(obs, teacher_action))
self.make_equiv_action(teacher_action, obs)
length += (1 - ended)
ended[:] = np.logical_or(ended, (teacher_action == -1))
obs = self.env._get_obs()
img_feats = torch.stack(img_feats, 1).contiguous()
can_feats = torch.stack(can_feats, 1).contiguous()
if get_first_feat:
return (img_feats, can_feats, first_feat), length
else:
return (img_feats, can_feats), length
def gt_words(self, obs):
seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
return torch.from_numpy(seq_tensor).cuda()
def teacher_forcing(self, train=True, features=None, insts=None, for_listener=False):
if train:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
if features is not None:
assert insts is not None
(img_feats, can_feats), lengths = features
ctx = self.encoder(can_feats, img_feats, lengths)
batch_size = len(lengths)
else:
obs = self.env._get_obs()
batch_size = len(obs)
(img_feats, can_feats), lengths = self.from_shortest_path()
ctx = self.encoder(can_feats, img_feats, lengths)
h_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
c_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
ctx_mask = utils.length2mask(lengths)
if insts is None:
insts = self.gt_words(obs)
logits, _, _ = self.decoder(insts, ctx, ctx_mask, h_t, c_t)
logits = logits.permute(0, 2, 1).contiguous()
loss = self.softmax_loss(
input = logits[:, :, :-1],
target = insts[:, 1:]
)
if for_listener:
return self.nonreduced_softmax_loss(
input = logits[:, :, :-1],
target = insts[:, 1:]
)
if train:
return loss
else:
_, predict = logits.max(dim=1)
gt_mask = (insts != self.tok.word_to_index['<PAD>'])
correct = (predict[:, :-1] == insts[:, 1:]) * gt_mask[:, 1:]
correct, gt_mask = correct.type(torch.LongTensor), gt_mask.type(torch.LongTensor)
word_accu = correct.sum().item() / gt_mask[:, 1:].sum().item()
sent_accu = (correct.sum(dim=1) == gt_mask[:, 1:].sum(dim=1)).sum().item() / batch_size
return loss.item(), word_accu, sent_accu
def infer_batch(self, sampling=False, train=False, featdropmask=None):
if train:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
obs = self.env._get_obs()
batch_size = len(obs)
viewpoints_list = [list() for _ in range(batch_size)]
(img_feats, can_feats), lengths = self.from_shortest_path(viewpoints=viewpoints_list)
if featdropmask is not None:
img_feats[..., :-args.angle_feat_size] *= featdropmask
can_feats[..., :-args.angle_feat_size] *= featdropmask
ctx = self.encoder(can_feats, img_feats, lengths,
already_dropfeat=(featdropmask is not None))
ctx_mask = utils.length2mask(lengths)
words = []
log_probs = []
hidden_states = []
entropies = []
h_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
c_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
ended = np.zeros(len(obs), np.bool)
word = np.ones(len(obs), np.int64) * self.tok.word_to_index['<BOS>']
word = torch.from_numpy(word).view(-1, 1).cuda()
for i in range(args.maxDecode):
logits, h_t, c_t = self.decoder(word, ctx, ctx_mask, h_t, c_t)
logits = logits.squeeze()
logits[:, self.tok.word_to_index['<UNK>']] = -float("inf")
if sampling:
probs = F.softmax(logits, -1)
m = torch.distributions.Categorical(probs)
word = m.sample()
log_prob = m.log_prob(word)
if train:
log_probs.append(log_prob)
hidden_states.append(h_t.squeeze())
entropies.append(m.entropy())
else:
log_probs.append(log_prob.detach())
hidden_states.append(h_t.squeeze().detach())
entropies.append(m.entropy().detach())
else:
values, word = logits.max(1)
cpu_word = word.cpu().numpy()
cpu_word[ended] = self.tok.word_to_index['<PAD>']
words.append(cpu_word)
word = word.view(-1, 1)
ended = np.logical_or(ended, cpu_word == self.tok.word_to_index['<EOS>'])
if ended.all():
break
if train and sampling:
return np.stack(words, 1), torch.stack(log_probs, 1), torch.stack(hidden_states, 1), torch.stack(entropies, 1)
else:
return np.stack(words, 1)
def save(self, epoch, path):
the_dir, _ = os.path.split(path)
os.makedirs(the_dir, exist_ok=True)
states = {}
def create_state(name, model, optimizer):
states[name] = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer)]
for param in all_tuple:
create_state(*param)
torch.save(states, path)
def load(self, path):
print("Load the speaker's state dict from %s" % path)
states = torch.load(path)
def recover_state(name, model, optimizer):
# print(name)
# print(list(model.state_dict().keys()))
# for key in list(model.state_dict().keys()):
# print(key, model.state_dict()[key].size())
state = model.state_dict()
state.update(states[name]['state_dict'])
model.load_state_dict(state)
if args.loadOptim:
optimizer.load_state_dict(states[name]['optimizer'])
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer)]
for param in all_tuple:
recover_state(*param)
return states['encoder']['epoch'] - 1
| true
| true
|
1c44a87a151e7c36edf8003106e7947c0ba32f65
| 48,907
|
py
|
Python
|
promgen/views.py
|
XILEF-Labs/promgen
|
f93b395df6e17a387edb9c4fcb431b10ce0a80cc
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
promgen/views.py
|
XILEF-Labs/promgen
|
f93b395df6e17a387edb9c4fcb431b10ce0a80cc
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
promgen/views.py
|
XILEF-Labs/promgen
|
f93b395df6e17a387edb9c4fcb431b10ce0a80cc
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import collections
import concurrent.futures
import datetime
import json
import logging
import platform
import time
from itertools import chain
import prometheus_client
import requests
from prometheus_client.core import CounterMetricFamily, GaugeMetricFamily
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Q
from django.db.utils import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.generic import DetailView, ListView, UpdateView, View
from django.views.generic.base import RedirectView, TemplateView
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import CreateView, DeleteView, FormView
import promgen.templatetags.promgen as macro
from promgen import (
celery,
discovery,
forms,
mixins,
models,
plugins,
prometheus,
signals,
tasks,
util,
version,
)
from promgen.shortcuts import resolve_domain
logger = logging.getLogger(__name__)
class ShardList(LoginRequiredMixin, ListView):
queryset = models.Shard.objects.prefetch_related(
"project_set__service",
"project_set__service__owner",
"project_set__service__notifiers",
"project_set__service__notifiers__owner",
"project_set__service__rule_set",
"project_set",
"project_set__owner",
"project_set__farm",
"project_set__exporter_set",
"project_set__notifiers",
"project_set__notifiers__owner",
"prometheus_set",
)
class ShardDetail(LoginRequiredMixin, DetailView):
queryset = models.Shard.objects.prefetch_related(
"project_set__service",
"project_set__service__owner",
"project_set__service__notifiers",
"project_set__service__notifiers__owner",
"project_set__service__notifiers__filter_set",
"project_set__service__rule_set",
"project_set",
"project_set__owner",
"project_set__farm",
"project_set__exporter_set",
"project_set__notifiers",
"project_set__notifiers__owner",
"project_set__notifiers__filter_set",
)
class ServiceList(LoginRequiredMixin, ListView):
paginate_by = 20
queryset = models.Service.objects.prefetch_related(
"rule_set",
"rule_set__parent",
"project_set",
"project_set__owner",
"project_set__shard",
"project_set__notifiers",
"project_set__notifiers__owner",
"project_set__notifiers__filter_set",
"project_set__farm",
"project_set__exporter_set",
"owner",
"notifiers",
"notifiers__owner",
"notifiers__filter_set",
)
class HomeList(LoginRequiredMixin, ListView):
template_name = 'promgen/home.html'
def get_queryset(self):
# TODO: Support showing subscribed projects as well
# Get the list of senders that a user is currently subscribed to
senders = models.Sender.objects.filter(
value=self.request.user.username,
sender='promgen.notification.user',
content_type=ContentType.objects.get_for_model(models.Service),
).values_list('object_id')
# and return just our list of services
return models.Service.objects.filter(pk__in=senders).prefetch_related(
'notifiers',
'notifiers__owner',
'owner',
'rule_set',
'rule_set__parent',
'project_set',
'project_set__farm',
'project_set__shard',
'project_set__exporter_set',
'project_set__notifiers',
'project_set__owner',
'project_set__notifiers__owner',
)
class HostList(LoginRequiredMixin, ListView):
queryset = models.Host.objects\
.prefetch_related(
'farm',
'farm__project_set',
'farm__project_set__service',
)
def get_context_data(self, **kwargs):
context = super(HostList, self).get_context_data(**kwargs)
context['host_groups'] = collections.defaultdict(list)
for host in context['object_list']:
context['host_groups'][host.name].append(host)
context['host_groups'] = dict(context['host_groups'])
return context
class HostDetail(LoginRequiredMixin, View):
def get(self, request, slug):
context = {}
context['slug'] = self.kwargs['slug']
context['host_list'] = models.Host.objects\
.filter(name__icontains=self.kwargs['slug'])\
.prefetch_related('farm')
if not context['host_list']:
return render(request, 'promgen/host_404.html', context, status=404)
context['farm_list'] = models.Farm.objects.filter(
id__in=context['host_list'].values_list('farm_id', flat=True)
)
context['project_list'] = models.Project.objects.filter(
id__in=context['farm_list'].values_list('project__id', flat=True)
).prefetch_related('notifiers', 'rule_set')
context['exporter_list'] = models.Exporter.objects.filter(
project_id__in=context['project_list'].values_list('id', flat=True)
).prefetch_related('project', 'project__service')
context['service_list'] = models.Service.objects.filter(
id__in=context['project_list'].values_list('service__id', flat=True)
).prefetch_related('notifiers', 'rule_set')
context['rule_list'] = models.Rule.objects.filter(
Q(id__in=context['project_list'].values_list('rule_set__id')) |
Q(id__in=context['service_list'].values_list('rule_set__id')) |
Q(id__in=models.Site.objects.get_current().rule_set.values_list('id'))
).select_related('content_type').prefetch_related('content_object')
context['notifier_list'] = models.Sender.objects.filter(
Q(id__in=context['project_list'].values_list('notifiers__id')) |
Q(id__in=context['service_list'].values_list('notifiers__id'))
).select_related('content_type').prefetch_related('content_object')
return render(request, 'promgen/host_detail.html', context)
class AuditList(LoginRequiredMixin, ListView):
model = models.Audit
FILTERS = {
'project': models.Project,
'service': models.Service,
'rule': models.Rule,
}
def get_queryset(self):
queryset = self.model.objects\
.order_by('-created')\
.prefetch_related(
'content_object', 'user'
)
for key in self.FILTERS:
if key in self.request.GET:
obj = self.FILTERS[key].objects.get(pk=self.request.GET[key])
# Get any log entries for the object itself
qset = Q(
object_id=obj.id,
content_type_id=ContentType.objects.get_for_model(obj).id,
)
if key in ['project', 'service']:
# Look for any registered notifiers
qset |= Q(
content_type_id=ContentType.objects.get_for_model(models.Sender).id,
object_id__in=obj.notifiers.values_list('id', flat=True)
)
# Look for any registered rules
qset |= Q(
content_type_id=ContentType.objects.get_for_model(models.Rule).id,
object_id__in=obj.rule_set.values_list('id', flat=True)
)
if key == 'project':
# Only projects may have exporters
qset |= Q(
content_type_id=ContentType.objects.get_for_model(models.Exporter).id,
object_id__in=obj.exporter_set.values_list('id', flat=True)
)
# Only projects may have URLs
qset |= Q(
content_type_id=ContentType.objects.get_for_model(models.URL).id,
object_id__in=obj.url_set.values_list('id', flat=True)
)
queryset = queryset.filter(qset)
if 'user' in self.request.GET:
queryset = queryset.filter(
user_id=self.request.GET['user']
)
return queryset
paginate_by = 50
class ServiceDetail(LoginRequiredMixin, DetailView):
queryset = models.Service.objects\
.prefetch_related(
'rule_set',
'notifiers',
'notifiers__filter_set',
'notifiers__owner',
'project_set',
'project_set__shard',
'project_set__farm',
'project_set__exporter_set',
'project_set__notifiers',
'project_set__notifiers__owner'
)
class ServiceDelete(LoginRequiredMixin, DeleteView):
model = models.Service
def get_success_url(self):
return reverse('service-list')
class ProjectDelete(LoginRequiredMixin, DeleteView):
model = models.Project
def get_success_url(self):
return reverse('service-detail', args=[self.object.service_id])
class NotifierUpdate(LoginRequiredMixin, UpdateView):
model = models.Sender
form_class = forms.NotifierUpdate
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
obj = self.get_object()
# For populating breadcrumb
context[obj.content_type.model] = obj.content_object
return context
def post(self, request, pk):
if 'filter.pk' in request.POST:
f = models.Filter.objects.get(pk=request.POST['filter.pk'])
f.delete()
messages.success(request, 'Removed filter {f.name} {f.value}'.format(f=f))
if 'filter.name' in request.POST:
obj = self.get_object()
f, created = obj.filter_set.get_or_create(name=request.POST['filter.name'], value=request.POST['filter.value'])
if created:
messages.success(request, 'Created filter {f.name} {f.value}'.format(f=f))
else:
messages.warning(request, 'Updated filter {f.name} {f.value}'.format(f=f))
if 'next' in request.POST:
return redirect(request.POST['next'])
return self.get(self, request, pk)
class NotifierDelete(LoginRequiredMixin, DeleteView):
model = models.Sender
def get_success_url(self):
if 'next' in self.request.POST:
return self.request.POST['next']
if hasattr(self.object.content_object, 'get_absolute_url'):
return self.object.content_object.get_absolute_url()
return reverse("profile")
class NotifierTest(LoginRequiredMixin, View):
def post(self, request, pk):
sender = get_object_or_404(models.Sender, id=pk)
try:
sender.test()
except Exception:
messages.warning(request, 'Error sending test message with ' + sender.sender)
else:
messages.info(request, 'Sent test message with ' + sender.sender)
if 'next' in request.POST:
return redirect(request.POST['next'])
if hasattr(sender.content_object, 'get_absolute_url'):
return redirect(sender.content_object)
return redirect("profile")
class ExporterDelete(LoginRequiredMixin, DeleteView):
model = models.Exporter
def get_success_url(self):
return reverse('project-detail', args=[self.object.project_id])
class ExporterToggle(LoginRequiredMixin, View):
def post(self, request, pk):
exporter = get_object_or_404(models.Exporter, id=pk)
exporter.enabled = not exporter.enabled
exporter.save()
signals.trigger_write_config.send(request)
return JsonResponse({'redirect': exporter.project.get_absolute_url()})
class NotifierToggle(LoginRequiredMixin, View):
def post(self, request, pk):
sender = get_object_or_404(models.Sender, id=pk)
sender.enabled = not sender.enabled
sender.save()
# Redirect to current page
return JsonResponse({'redirect': ""})
class RuleDelete(mixins.PromgenPermissionMixin, DeleteView):
model = models.Rule
def get_permission_denied_message(self):
return 'Unable to delete rule %s. User lacks permission' % self.object
def get_permission_required(self):
# In the case of rules, we want to make sure the user has permission
# to delete the rule itself, but also permission to change the linked object
self.object = self.get_object()
obj = self.object._meta
tgt = self.object.content_object._meta
yield '{}.delete_{}'.format(obj.app_label, obj.model_name)
yield '{}.change_{}'.format(tgt.app_label, tgt.model_name)
def get_success_url(self):
return self.object.content_object.get_absolute_url()
class RuleToggle(mixins.PromgenPermissionMixin, SingleObjectMixin, View):
model = models.Rule
def get_permission_denied_message(self):
return 'Unable to toggle rule %s. User lacks permission' % self.object
def get_permission_required(self):
# In the case of rules, we want to make sure the user has permission
# to delete the rule itself, but also permission to change the linked object
self.object = self.get_object()
obj = self.object._meta
tgt = self.object.content_object._meta
yield '{}.change_{}'.format(obj.app_label, obj.model_name)
yield '{}.change_{}'.format(tgt.app_label, tgt.model_name)
def post(self, request, pk):
self.object.enabled = not self.object.enabled
self.object.save()
return JsonResponse({'redirect': self.object.content_object.get_absolute_url()})
class HostDelete(LoginRequiredMixin, DeleteView):
model = models.Host
def get_success_url(self):
# If there's only one linked project then we redirect to the project page
# otherwise we redirect to our farm page
if self.object.farm.project_set.count():
return self.object.farm.project_set.first().get_absolute_url()
return self.object.farm.get_absolute_url()
class ProjectDetail(LoginRequiredMixin, DetailView):
queryset = models.Project.objects.prefetch_related(
'rule_set',
'rule_set__parent',
'notifiers',
'notifiers__owner',
'shard',
'service',
'service__rule_set',
'service__rule_set__parent',
)
def get_context_data(self, **kwargs):
context = super(ProjectDetail, self).get_context_data(**kwargs)
context['sources'] = models.Farm.driver_set()
context['url_form'] = forms.URLForm()
return context
class FarmList(LoginRequiredMixin, ListView):
paginate_by = 50
queryset = models.Farm.objects\
.prefetch_related(
'project_set',
'host_set',
)
class FarmDetail(LoginRequiredMixin, DetailView):
model = models.Farm
class FarmUpdate(LoginRequiredMixin, UpdateView):
model = models.Farm
button_label = _('Update Farm')
template_name = 'promgen/farm_form.html'
form_class = forms.FarmForm
def get_context_data(self, **kwargs):
context = super(FarmUpdate, self).get_context_data(**kwargs)
context['project'] = self.object.project_set.first()
context['service'] = context['project'].service
return context
def form_valid(self, form):
farm, created = models.Farm.objects.update_or_create(
id=self.kwargs['pk'],
defaults=form.clean(),
)
return HttpResponseRedirect(reverse('project-detail', args=[farm.project_set.first().id]))
class FarmDelete(LoginRequiredMixin, RedirectView):
pattern_name = 'farm-detail'
def post(self, request, pk):
farm = get_object_or_404(models.Farm, id=pk)
farm.delete()
return HttpResponseRedirect(
request.POST.get('next', reverse('service-list'))
)
class UnlinkFarm(LoginRequiredMixin, View):
def post(self, request, pk):
project = get_object_or_404(models.Project, id=pk)
oldfarm, project.farm = project.farm, None
project.save()
signals.trigger_write_config.send(request)
if oldfarm.project_set.count() == 0 and oldfarm.editable is False:
logger.debug('Cleaning up old farm %s', oldfarm)
oldfarm.delete()
return HttpResponseRedirect(reverse('project-detail', args=[project.id]))
class RulesList(LoginRequiredMixin, ListView, mixins.ServiceMixin):
template_name = "promgen/rule_list.html"
queryset = models.Rule.objects.prefetch_related("content_type", "content_object")
def get_context_data(self, **kwargs):
context = super(RulesList, self).get_context_data(**kwargs)
site_rules = models.Rule.objects.filter(
content_type__model="site", content_type__app_label="promgen"
).prefetch_related(
"content_object",
"rulelabel_set",
"ruleannotation_set",
)
service_rules = models.Rule.objects.filter(
content_type__model="service", content_type__app_label="promgen"
).prefetch_related(
"content_object",
"content_object",
"rulelabel_set",
"ruleannotation_set",
"parent",
)
project_rules = models.Rule.objects.filter(
content_type__model="project", content_type__app_label="promgen"
).prefetch_related(
"content_object",
"content_object__service",
"content_object__service",
"rulelabel_set",
"ruleannotation_set",
"parent",
)
context["rule_list"] = chain(site_rules, service_rules, project_rules)
return context
class RulesCopy(LoginRequiredMixin, View):
def post(self, request, pk):
original = get_object_or_404(models.Rule, id=pk)
form = forms.RuleCopyForm(request.POST)
if form.is_valid():
rule = original.copy_to(**form.clean())
return HttpResponseRedirect(reverse('rule-edit', args=[rule.id]))
else:
return HttpResponseRedirect(reverse('service-detail', args=[pk]))
class FarmRefresh(LoginRequiredMixin, RedirectView):
pattern_name = 'farm-detail'
def post(self, request, pk):
farm = get_object_or_404(models.Farm, id=pk)
# If any hosts are added or removed, then we want to
# trigger a config refresh
if any(farm.refresh()):
signals.trigger_write_config.send(request)
messages.info(request, 'Refreshed hosts')
if 'next' in request.POST:
return HttpResponseRedirect(request.POST['next'])
# If we don't have an explicit redirect, we can redirect to the farm
# itself
return redirect(farm)
class FarmConvert(LoginRequiredMixin, RedirectView):
pattern_name = 'farm-detail'
def post(self, request, pk):
farm = get_object_or_404(models.Farm, id=pk)
farm.source = discovery.FARM_DEFAULT
try:
farm.save()
except IntegrityError:
return render(request, 'promgen/farm_duplicate.html', {
'pk': farm.pk,
'next': request.POST.get('next', reverse('farm-detail', args=[farm.pk])),
'farm_list': models.Farm.objects.filter(name=farm.name)
})
return HttpResponseRedirect(
request.POST.get('next', reverse('farm-detail', args=[farm.pk]))
)
class FarmLink(LoginRequiredMixin, View):
def get(self, request, pk, source):
context = {
'source': source,
'project': get_object_or_404(models.Project, id=pk),
'farm_list': sorted(models.Farm.fetch(source=source)),
}
return render(request, 'promgen/link_farm.html', context)
def post(self, request, pk, source):
project = get_object_or_404(models.Project, id=pk)
farm, created = models.Farm.objects.get_or_create(
name=request.POST['farm'],
source=source,
)
if created:
logger.info('Importing %s from %s', farm.name, source)
farm.refresh()
messages.info(request, 'Refreshed hosts')
project.farm = farm
project.save()
return HttpResponseRedirect(reverse('project-detail', args=[project.id]))
class ExporterRegister(LoginRequiredMixin, FormView, mixins.ProjectMixin):
model = models.Exporter
template_name = 'promgen/exporter_form.html'
form_class = forms.ExporterForm
def form_valid(self, form):
project = get_object_or_404(models.Project, id=self.kwargs['pk'])
exporter, _ = models.Exporter.objects.get_or_create(project=project, **form.clean())
return HttpResponseRedirect(reverse('project-detail', args=[project.id]))
class ExporterScrape(LoginRequiredMixin, View):
# TODO: Move to /rest/project/<slug>/scrape
def post(self, request, pk):
# Lookup our farm for testing
farm = get_object_or_404(models.Project, pk=pk).farm
# So we have a mutable dictionary
data = request.POST.dict()
# The default __metrics_path__ for Prometheus is /metrics so we need to
# manually add it here in the case it's not set for our test
if not data.setdefault("path", "/metrics"):
data["path"] = "/metrics"
def query():
futures = []
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
for host in farm.host_set.all():
futures.append(
executor.submit(
util.get,
"{scheme}://{host}:{port}{path}".format(
host=host.name, **data
),
)
)
for future in concurrent.futures.as_completed(futures):
try:
result = future.result()
result.raise_for_status()
yield result.url, result.status_code
except requests.ConnectionError as e:
logger.warning("Error connecting to server")
yield e.request.url, "Error connecting to server"
except requests.RequestException as e:
logger.warning("Error with response")
yield e.request.url, str(e)
except Exception:
logger.exception("Unknown Exception")
yield "Unknown URL", "Unknown error"
try:
return JsonResponse(dict(query()))
except Exception as e:
return JsonResponse({"error": "Error with query %s" % e})
class URLRegister(LoginRequiredMixin, FormView, mixins.ProjectMixin):
model = models.URL
template_name = 'promgen/url_form.html'
form_class = forms.URLForm
def form_valid(self, form):
project = get_object_or_404(models.Project, id=self.kwargs['pk'])
url, _ = models.URL.objects.get_or_create(project=project, **form.clean())
return HttpResponseRedirect(reverse('project-detail', args=[project.id]))
class URLDelete(LoginRequiredMixin, DeleteView):
model = models.URL
def get_success_url(self):
return reverse('project-detail', args=[self.object.project_id])
class URLList(LoginRequiredMixin, ListView):
queryset = models.URL.objects\
.prefetch_related(
'project',
'project__service',
'project__shard',
'probe',
)
class ProjectRegister(LoginRequiredMixin, CreateView):
button_label = _("Project Register")
model = models.Project
fields = ["name", "description", "owner", "shard"]
def get_initial(self):
initial = {"owner": self.request.user}
if "shard" in self.request.GET:
initial["shard"] = get_object_or_404(
models.Shard, pk=self.request.GET["shard"]
)
return initial
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["service"] = get_object_or_404(models.Service, id=self.kwargs["pk"])
context["shard_list"] = models.Shard.objects.all()
return context
def form_valid(self, form):
form.instance.service_id = self.kwargs["pk"]
return super().form_valid(form)
class ProjectUpdate(LoginRequiredMixin, UpdateView):
model = models.Project
button_label = _("Project Update")
template_name = "promgen/project_form.html"
fields = ["name", "description", "owner", "service", "shard"]
def get_context_data(self, **kwargs):
context = super(ProjectUpdate, self).get_context_data(**kwargs)
context["service"] = self.object.service
context["shard_list"] = models.Shard.objects.all()
return context
class ServiceUpdate(LoginRequiredMixin, UpdateView):
button_label = _('Update Service')
form_class = forms.ServiceUpdate
model = models.Service
class RuleDetail(LoginRequiredMixin, DetailView):
queryset = models.Rule.objects.prefetch_related(
"content_object",
"content_type",
"ruleannotation_set",
"rulelabel_set",
'overrides',
'overrides__ruleannotation_set',
'overrides__rulelabel_set',
"overrides__content_object",
"overrides__content_type",
)
class RuleUpdate(mixins.PromgenPermissionMixin, UpdateView):
def get_permission_denied_message(self):
return "Unable to edit rule %s. User lacks permission" % self.object
def get_permission_required(self):
# In the case of rules, we want to make sure the user has permission
# to change the rule itself, but also permission to change the linked object
self.object = self.get_object()
obj = self.object._meta
tgt = self.object.content_object._meta
yield "{}.change_{}".format(obj.app_label, obj.model_name)
yield "{}.change_{}".format(tgt.app_label, tgt.model_name)
queryset = models.Rule.objects.prefetch_related(
"content_object", "overrides", "overrides__content_object"
)
template_name = "promgen/rule_update.html"
form_class = forms.AlertRuleForm
def get_context_data(self, **kwargs):
context = super(RuleUpdate, self).get_context_data(**kwargs)
context.setdefault("formset_labels", forms.LabelFormset(instance=self.object))
context.setdefault("formset_annotations", forms.AnnotationFormset(instance=self.object))
context["macro"] = macro.EXCLUSION_MACRO
context["rules"] = [self.object.parent] if self.object.parent else [self.object]
return context
def form_invalid(self, **kwargs):
"""If the form is invalid, render the invalid form."""
return self.render_to_response(self.get_context_data(**kwargs))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
# Save a copy of our forms into a context var that we can use
# to re-render our form properly in case of errors
context = {}
context["form"] = form = self.get_form()
context["formset_labels"] = form_labels = forms.LabelFormset(
request.POST, request.FILES, instance=self.object
)
context["formset_annotations"] = form_annotations = forms.AnnotationFormset(
request.POST, request.FILES, instance=self.object
)
# Check validity of our labels and annotations in Django before we try to render
if not all([form_labels.is_valid(), form_annotations.is_valid()]):
return self.form_invalid(**context)
# Populate our cached_properties so we can render a test
# populate only rows with a 'value' so that we skip fields we're deleting
# see Django docs on cached_property and promgen.forms.RuleForm.clean()
form.instance.labels = {
l["name"]: l["value"] for l in form_labels.cleaned_data if "value" in l
}
form.instance.annotations = {
a["name"]: a["value"] for a in form_annotations.cleaned_data if "value" in a
}
# With our labels+annotations manually cached we can test
if not form.is_valid():
return self.form_invalid(**context)
# Save our labels
for instance in form_labels.save():
messages.info(request, "Added {} to {}".format(instance.name, self.object))
# Save our annotations
for instance in form_annotations.save():
messages.info(request, "Added {} to {}".format(instance.name, self.object))
return self.form_valid(form)
class AlertRuleRegister(mixins.PromgenPermissionMixin, mixins.RuleFormMixin, FormView):
model = models.Rule
template_name = "promgen/rule_register.html"
form_class = forms.AlertRuleForm
form_import_class = forms.ImportRuleForm
def get_permission_required(self):
# In the case of rules, we want to make sure the user has permission
# to add the rule itself, but also permission to change the linked object
yield "promgen.add_rule"
yield "promgen.change_" + self.kwargs["content_type"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Set a dummy rule, so that our header/breadcrumbs render correctly
context["rule"] = models.Rule()
context["rule"].pk = 0
context["rule"].set_object(
self.kwargs["content_type"], self.kwargs["object_id"]
)
context["macro"] = macro.EXCLUSION_MACRO
return context
def form_valid(self, form):
form.instance.save()
form.instance.add_label(
form.instance.content_type.model, form.instance.content_object.name
)
return HttpResponseRedirect(form.instance.get_absolute_url())
def form_import(self, form, content_object):
data = form.clean()
counters = prometheus.import_rules_v2(data["rules"], content_object)
messages.info(self.request, "Imported %s" % counters)
return HttpResponseRedirect(content_object.get_absolute_url())
class ServiceRegister(LoginRequiredMixin, CreateView):
button_label = _("Register Service")
model = models.Service
fields = ["name", "description", "owner"]
def get_initial(self):
return {"owner": self.request.user}
class FarmRegister(LoginRequiredMixin, FormView, mixins.ProjectMixin):
model = models.Farm
button_label = _('Register Farm')
template_name = 'promgen/farm_form.html'
form_class = forms.FarmForm
def form_valid(self, form):
project = get_object_or_404(models.Project, id=self.kwargs['pk'])
farm, _ = models.Farm.objects.get_or_create(source=discovery.FARM_DEFAULT, **form.clean())
project.farm = farm
project.save()
return HttpResponseRedirect(project.get_absolute_url())
class ProjectNotifierRegister(LoginRequiredMixin, FormView, mixins.ProjectMixin):
model = models.Sender
template_name = 'promgen/notifier_form.html'
form_class = forms.SenderForm
def form_valid(self, form):
project = get_object_or_404(models.Project, id=self.kwargs['pk'])
sender, created = models.Sender.objects.get_or_create(obj=project, owner=self.request.user, **form.clean())
signals.check_user_subscription(models.Sender, sender, created, self.request)
return HttpResponseRedirect(project.get_absolute_url())
class ServiceNotifierRegister(LoginRequiredMixin, FormView, mixins.ServiceMixin):
model = models.Sender
template_name = 'promgen/notifier_form.html'
form_class = forms.SenderForm
def form_valid(self, form):
service = get_object_or_404(models.Service, id=self.kwargs['pk'])
sender, created = models.Sender.objects.get_or_create(obj=service, owner=self.request.user, **form.clean())
signals.check_user_subscription(models.Sender, sender, created, self.request)
return HttpResponseRedirect(service.get_absolute_url())
class SiteDetail(LoginRequiredMixin, TemplateView):
template_name = "promgen/site_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["rule_list"] = models.Rule.objects.filter(
content_type__model="site", content_type__app_label="promgen"
).prefetch_related("content_object", "rulelabel_set", "ruleannotation_set")
return context
class Profile(LoginRequiredMixin, FormView):
form_class = forms.SenderForm
model = models.Sender
template_name = "promgen/profile.html"
def get_context_data(self, **kwargs):
context = super(Profile, self).get_context_data(**kwargs)
context['discovery_plugins'] = [entry for entry in plugins.discovery()]
context['notifier_plugins'] = [entry for entry in plugins.notifications()]
context['notifiers'] = {'notifiers': models.Sender.objects.filter(obj=self.request.user)}
context['subscriptions'] = models.Sender.objects.filter(
sender='promgen.notification.user', value=self.request.user.username)
return context
def form_valid(self, form):
sender, _ = models.Sender.objects.get_or_create(obj=self.request.user, owner=self.request.user, **form.clean())
return redirect('profile')
class HostRegister(LoginRequiredMixin, FormView):
model = models.Host
template_name = "promgen/host_form.html"
form_class = forms.HostForm
def get_context_data(self, **kwargs):
context = super(HostRegister, self).get_context_data(**kwargs)
context["farm"] = get_object_or_404(models.Farm, pk=self.kwargs["pk"])
context["project"] = context["farm"].project_set.first()
return context
def form_valid(self, form):
farm = get_object_or_404(models.Farm, id=self.kwargs["pk"])
for hostname in form.cleaned_data["hosts"]:
host, created = models.Host.objects.get_or_create(
name=hostname, farm_id=farm.id
)
if created:
logger.debug("Added %s to %s", host.name, farm.name)
if farm.project_set.count() == 0:
return redirect("farm-detail", pk=farm.id)
return redirect("project-detail", pk=farm.project_set.first().id)
class ApiConfig(View):
def get(self, request):
return HttpResponse(prometheus.render_config(), content_type='application/json')
def post(self, request, *args, **kwargs):
try:
body = json.loads(request.body.decode('utf-8'))
prometheus.import_config(body, **kwargs)
except Exception as e:
return HttpResponse(e, status=400)
return HttpResponse('Success', status=202)
class ApiQueue(View):
def post(self, request):
signals.trigger_write_config.send(request)
signals.trigger_write_rules.send(request)
signals.trigger_write_urls.send(request)
return HttpResponse('OK', status=202)
class Commit(LoginRequiredMixin, View):
def post(self, request):
signals.trigger_write_config.send(request)
return HttpResponseRedirect(request.POST.get('next', '/'))
class _ExportRules(View):
def format(self, rules=None, name='promgen'):
content = prometheus.render_rules(rules)
response = HttpResponse(content)
response['Content-Type'] = 'application/x-yaml'
response['Content-Disposition'] = 'attachment; filename=%s.rule.yml' % name
return response
class RulesConfig(_ExportRules):
def get(self, request):
return self.format()
class RuleExport(_ExportRules):
def get(self, request, content_type, object_id):
ct = ContentType.objects.get(app_label="promgen", model=content_type).get_object_for_this_type(pk=object_id)
rules = models.Rule.objects.filter(obj=ct)
return self.format(rules)
class URLConfig(View):
def get(self, request):
return HttpResponse(prometheus.render_urls(), content_type='application/json')
def post(self, request):
tasks.write_urls()
return HttpResponse('OK', status=202)
class Alert(View):
def post(self, request, *args, **kwargs):
# Normally it would be more 'correct' to check our 'alert_blacklist' here and avoid
# writing to the database, but to keep the alert ingestion queue as simple as possible
# we will go ahead and write all alerts to the database and then filter out (delete)
# when we run tasks.process_alert
alert = models.Alert.objects.create(body=request.body.decode("utf-8"))
tasks.process_alert.delay(alert.pk)
return HttpResponse("OK", status=202)
class AlertList(LoginRequiredMixin, ListView):
paginate_by = 20
queryset = models.Alert.objects.order_by("-created")
def get_queryset(self):
search = self.request.GET.get('search')
if search:
return self.queryset.filter(
Q(alertlabel__name="Service", alertlabel__value__icontains=search)
| Q(alertlabel__name="Project", alertlabel__value__icontains=search)
| Q(alertlabel__name="Job", alertlabel__value__icontains=search)
)
qs = self.queryset
for key, value in self.request.GET.items():
if key in ["page", "search"]:
continue
qs = qs.filter(alertlabel__name=key, alertlabel__value=value)
return qs
class AlertDetail(LoginRequiredMixin, DetailView):
model = models.Alert
class Metrics(View):
def __init__(self):
self.registry = prometheus_client.CollectorRegistry(auto_describe=True)
prometheus_client.GCCollector(registry=self.registry)
prometheus_client.PlatformCollector(registry=self.registry)
prometheus_client.ProcessCollector(registry=self.registry)
self.registry.register(self)
def get(self, request, *args, **kwargs):
return HttpResponse(
prometheus_client.generate_latest(self.registry),
content_type=prometheus_client.CONTENT_TYPE_LATEST,
)
def collect(self):
# https://github.com/prometheus/client_python#custom-collectors
v = GaugeMetricFamily(
"promgen_build_info", "Promgen Information", labels=["version", "python"]
)
v.add_metric([version.__version__, platform.python_version()], 1)
yield v
try:
yield CounterMetricFamily(
"promgen_alerts_processed",
"Alerts",
models.Alert.objects.latest("id").id,
)
except models.Alert.DoesNotExist:
pass
try:
yield CounterMetricFamily(
"promgen_alerts_failed",
"Failed Alerts",
models.AlertError.objects.latest("id").id,
)
except models.AlertError.DoesNotExist:
pass
yield GaugeMetricFamily(
"promgen_shards", "Registered Shards", models.Shard.objects.count()
)
yield GaugeMetricFamily(
"promgen_exporters", "Registered Exporters", models.Exporter.objects.count()
)
yield GaugeMetricFamily(
"promgen_services", "Registered Services", models.Service.objects.count()
)
yield GaugeMetricFamily(
"promgen_projects", "Registered Projects", models.Project.objects.count()
)
yield GaugeMetricFamily(
"promgen_rules", "Registered Rules", models.Rule.objects.count()
)
yield GaugeMetricFamily(
"promgen_urls", "Registered URLs", models.URL.objects.count()
)
# TODO Properly de-duplicate after refactoring
yield GaugeMetricFamily(
"promgen_hosts",
"Registered Hosts",
len(models.Host.objects.values("name").annotate(Count("name"))),
)
notifier = GaugeMetricFamily(
"promgen_notifiers", "Registered Notifiers", labels=["type", "sender"]
)
for entry in models.Sender.objects.values(
"content_type__model", "sender"
).annotate(Count("sender"), count=Count("content_type")):
notifier.add_metric(
[entry["content_type__model"], entry["sender"]], entry["count"]
)
yield notifier
class Search(LoginRequiredMixin, View):
def get(self, request):
MAPPING = {
'farm_list': {
'field': ('name__icontains',),
'model': models.Farm,
'prefetch': ('project_set', 'host_set'),
'query': ('search', 'var-farm'),
},
'host_list': {
'field': ('name__icontains',),
'model': models.Host,
'query': ('search', 'var-instance'),
},
'project_list': {
'field': ('name__icontains',),
'model': models.Project,
'prefetch': ('service', 'notifiers', 'exporter_set', 'notifiers__owner'),
'query': ('search', 'var-project'),
},
'rule_list': {
'field': ('name__icontains', 'clause__icontains'),
'model': models.Rule,
'prefetch': ('content_object', 'ruleannotation_set', 'rulelabel_set'),
'query': ('search', ),
},
'service_list': {
'field': ('name__icontains',),
'model': models.Service,
'prefetch': ('project_set', 'rule_set', 'notifiers', 'notifiers__owner'),
'query': ('search', 'var-service'),
}
}
context = {}
for target, obj in MAPPING.items():
# If our potential search keys are not in our query string
# then we can bail out quickly
query = set(obj['query']).intersection(request.GET.keys())
if not query:
logger.info('query for %s: <skipping>', target)
continue
logger.info('query for %s: %s', target, query)
qs = obj['model'].objects
if 'prefetch' in obj:
qs = qs.prefetch_related(*obj['prefetch'])
# Build our OR query by combining Q lookups
filters = None
for var in query:
for field in obj['field']:
if filters:
filters |= Q(**{field: request.GET[var]})
else:
filters = Q(**{field: request.GET[var]})
logger.info('filtering %s by %s', target, filters)
qs = qs.filter(filters)
context[target] = qs
return render(request, 'promgen/search.html', context)
class RuleImport(mixins.PromgenPermissionMixin, FormView):
form_class = forms.ImportRuleForm
template_name = 'promgen/rule_import.html'
# Since rule imports can change a lot of site wide stuff we
# require site edit permission here
permission_required = ('promgen.change_site', 'promgen.change_rule')
permisison_denied_message = 'User lacks permission to import'
def form_valid(self, form):
data = form.clean()
if data.get('file_field'):
rules = data['file_field'].read().decode('utf8')
elif data.get('rules'):
rules = data.get('rules')
else:
messages.warning(self.request, 'Missing rules')
return self.form_invalid(form)
try:
counters = prometheus.import_rules_v2(rules)
messages.info(self.request, 'Imported %s' % counters)
return redirect('rule-import')
except:
messages.error(self.request, 'Error importing rules')
return self.form_invalid(form)
class Import(mixins.PromgenPermissionMixin, FormView):
template_name = 'promgen/import_form.html'
form_class = forms.ImportConfigForm
# Since imports can change a lot of site wide stuff we
# require site edit permission here
permission_required = (
'promgen.change_site', 'promgen.change_rule', 'promgen.change_exporter'
)
permission_denied_message = 'User lacks permission to import'
def form_valid(self, form):
data = form.clean()
if data.get('file_field'):
messages.info(self.request, 'Importing config from file')
config = data['file_field'].read().decode('utf8')
elif data.get('url'):
messages.info(self.request, 'Importing config from url')
response = util.get(data['url'])
response.raise_for_status()
config = response.text
elif data.get('config'):
messages.info(self.request, 'Importing config')
config = data['config']
else:
messages.warning(self.request, 'Missing config')
return self.form_invalid(form)
kwargs = {}
# This also lets us catch passing an empty string to signal using
# the shard value from the post request
if data.get('shard'):
kwargs['replace_shard'] = data.get('shard')
imported, skipped = prometheus.import_config(json.loads(config), **kwargs)
if imported:
counters = {key: len(imported[key]) for key in imported}
messages.info(self.request, 'Imported %s' % counters)
if skipped:
counters = {key: len(skipped[key]) for key in skipped}
messages.info(self.request, 'Skipped %s' % counters)
# If we only have a single object in a category, automatically
# redirect to that category to make things easier to understand
if len(imported['Project']) == 1:
return HttpResponseRedirect(imported['Project'][0].get_absolute_url())
if len(imported['Service']) == 1:
return HttpResponseRedirect(imported['Service'][0].get_absolute_url())
if len(imported['Shard']) == 1:
return HttpResponseRedirect(imported['Shard'][0].get_absolute_url())
return redirect('service-list')
class RuleTest(LoginRequiredMixin, View):
def post(self, request, pk):
if pk == 0:
rule = models.Rule()
rule.set_object(request.POST['content_type'], request.POST['object_id'])
else:
rule = get_object_or_404(models.Rule, id=pk)
query = macro.rulemacro(rule, request.POST['query'])
# Since our rules affect all servers we use Promgen's proxy-query to test our rule
# against all the servers at once
url = resolve_domain('proxy-query')
logger.debug('Querying %s with %s', url, query)
start = time.time()
result = util.get(url, {'query': query}).json()
duration = datetime.timedelta(seconds=(time.time() - start))
context = {'status': result['status'], 'duration': duration, 'query': query}
context['data'] = result.get('data', {})
context['errors'] = {}
metrics = context['data'].get('result', [])
if metrics:
context['collapse'] = len(metrics) > 5
for row in metrics:
if 'service' not in row['metric'] and \
'project' not in row['metric']:
context['errors']['routing'] = 'Some metrics are missing service and project labels so Promgen will be unable to route message'
context['status'] = 'warning'
else:
context['status'] = 'info'
context['errors']['no_results'] = 'No results. You may need to remove conditional checks (> < ==) to verify'
# Place this at the bottom to have a query error show up as danger
if result['status'] != 'success':
context['status'] = 'danger'
context['errors']['Query'] = result['error']
return JsonResponse({request.POST['target']: render_to_string('promgen/ajax_clause_check.html', context)})
| 36.79985
| 147
| 0.631566
|
import collections
import concurrent.futures
import datetime
import json
import logging
import platform
import time
from itertools import chain
import prometheus_client
import requests
from prometheus_client.core import CounterMetricFamily, GaugeMetricFamily
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Q
from django.db.utils import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.generic import DetailView, ListView, UpdateView, View
from django.views.generic.base import RedirectView, TemplateView
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import CreateView, DeleteView, FormView
import promgen.templatetags.promgen as macro
from promgen import (
celery,
discovery,
forms,
mixins,
models,
plugins,
prometheus,
signals,
tasks,
util,
version,
)
from promgen.shortcuts import resolve_domain
logger = logging.getLogger(__name__)
class ShardList(LoginRequiredMixin, ListView):
queryset = models.Shard.objects.prefetch_related(
"project_set__service",
"project_set__service__owner",
"project_set__service__notifiers",
"project_set__service__notifiers__owner",
"project_set__service__rule_set",
"project_set",
"project_set__owner",
"project_set__farm",
"project_set__exporter_set",
"project_set__notifiers",
"project_set__notifiers__owner",
"prometheus_set",
)
class ShardDetail(LoginRequiredMixin, DetailView):
queryset = models.Shard.objects.prefetch_related(
"project_set__service",
"project_set__service__owner",
"project_set__service__notifiers",
"project_set__service__notifiers__owner",
"project_set__service__notifiers__filter_set",
"project_set__service__rule_set",
"project_set",
"project_set__owner",
"project_set__farm",
"project_set__exporter_set",
"project_set__notifiers",
"project_set__notifiers__owner",
"project_set__notifiers__filter_set",
)
class ServiceList(LoginRequiredMixin, ListView):
paginate_by = 20
queryset = models.Service.objects.prefetch_related(
"rule_set",
"rule_set__parent",
"project_set",
"project_set__owner",
"project_set__shard",
"project_set__notifiers",
"project_set__notifiers__owner",
"project_set__notifiers__filter_set",
"project_set__farm",
"project_set__exporter_set",
"owner",
"notifiers",
"notifiers__owner",
"notifiers__filter_set",
)
class HomeList(LoginRequiredMixin, ListView):
template_name = 'promgen/home.html'
def get_queryset(self):
senders = models.Sender.objects.filter(
value=self.request.user.username,
sender='promgen.notification.user',
content_type=ContentType.objects.get_for_model(models.Service),
).values_list('object_id')
return models.Service.objects.filter(pk__in=senders).prefetch_related(
'notifiers',
'notifiers__owner',
'owner',
'rule_set',
'rule_set__parent',
'project_set',
'project_set__farm',
'project_set__shard',
'project_set__exporter_set',
'project_set__notifiers',
'project_set__owner',
'project_set__notifiers__owner',
)
class HostList(LoginRequiredMixin, ListView):
queryset = models.Host.objects\
.prefetch_related(
'farm',
'farm__project_set',
'farm__project_set__service',
)
def get_context_data(self, **kwargs):
context = super(HostList, self).get_context_data(**kwargs)
context['host_groups'] = collections.defaultdict(list)
for host in context['object_list']:
context['host_groups'][host.name].append(host)
context['host_groups'] = dict(context['host_groups'])
return context
class HostDetail(LoginRequiredMixin, View):
def get(self, request, slug):
context = {}
context['slug'] = self.kwargs['slug']
context['host_list'] = models.Host.objects\
.filter(name__icontains=self.kwargs['slug'])\
.prefetch_related('farm')
if not context['host_list']:
return render(request, 'promgen/host_404.html', context, status=404)
context['farm_list'] = models.Farm.objects.filter(
id__in=context['host_list'].values_list('farm_id', flat=True)
)
context['project_list'] = models.Project.objects.filter(
id__in=context['farm_list'].values_list('project__id', flat=True)
).prefetch_related('notifiers', 'rule_set')
context['exporter_list'] = models.Exporter.objects.filter(
project_id__in=context['project_list'].values_list('id', flat=True)
).prefetch_related('project', 'project__service')
context['service_list'] = models.Service.objects.filter(
id__in=context['project_list'].values_list('service__id', flat=True)
).prefetch_related('notifiers', 'rule_set')
context['rule_list'] = models.Rule.objects.filter(
Q(id__in=context['project_list'].values_list('rule_set__id')) |
Q(id__in=context['service_list'].values_list('rule_set__id')) |
Q(id__in=models.Site.objects.get_current().rule_set.values_list('id'))
).select_related('content_type').prefetch_related('content_object')
context['notifier_list'] = models.Sender.objects.filter(
Q(id__in=context['project_list'].values_list('notifiers__id')) |
Q(id__in=context['service_list'].values_list('notifiers__id'))
).select_related('content_type').prefetch_related('content_object')
return render(request, 'promgen/host_detail.html', context)
class AuditList(LoginRequiredMixin, ListView):
model = models.Audit
FILTERS = {
'project': models.Project,
'service': models.Service,
'rule': models.Rule,
}
def get_queryset(self):
queryset = self.model.objects\
.order_by('-created')\
.prefetch_related(
'content_object', 'user'
)
for key in self.FILTERS:
if key in self.request.GET:
obj = self.FILTERS[key].objects.get(pk=self.request.GET[key])
qset = Q(
object_id=obj.id,
content_type_id=ContentType.objects.get_for_model(obj).id,
)
if key in ['project', 'service']:
qset |= Q(
content_type_id=ContentType.objects.get_for_model(models.Sender).id,
object_id__in=obj.notifiers.values_list('id', flat=True)
)
qset |= Q(
content_type_id=ContentType.objects.get_for_model(models.Rule).id,
object_id__in=obj.rule_set.values_list('id', flat=True)
)
if key == 'project':
qset |= Q(
content_type_id=ContentType.objects.get_for_model(models.Exporter).id,
object_id__in=obj.exporter_set.values_list('id', flat=True)
)
qset |= Q(
content_type_id=ContentType.objects.get_for_model(models.URL).id,
object_id__in=obj.url_set.values_list('id', flat=True)
)
queryset = queryset.filter(qset)
if 'user' in self.request.GET:
queryset = queryset.filter(
user_id=self.request.GET['user']
)
return queryset
paginate_by = 50
class ServiceDetail(LoginRequiredMixin, DetailView):
queryset = models.Service.objects\
.prefetch_related(
'rule_set',
'notifiers',
'notifiers__filter_set',
'notifiers__owner',
'project_set',
'project_set__shard',
'project_set__farm',
'project_set__exporter_set',
'project_set__notifiers',
'project_set__notifiers__owner'
)
class ServiceDelete(LoginRequiredMixin, DeleteView):
model = models.Service
def get_success_url(self):
return reverse('service-list')
class ProjectDelete(LoginRequiredMixin, DeleteView):
model = models.Project
def get_success_url(self):
return reverse('service-detail', args=[self.object.service_id])
class NotifierUpdate(LoginRequiredMixin, UpdateView):
model = models.Sender
form_class = forms.NotifierUpdate
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
obj = self.get_object()
context[obj.content_type.model] = obj.content_object
return context
def post(self, request, pk):
if 'filter.pk' in request.POST:
f = models.Filter.objects.get(pk=request.POST['filter.pk'])
f.delete()
messages.success(request, 'Removed filter {f.name} {f.value}'.format(f=f))
if 'filter.name' in request.POST:
obj = self.get_object()
f, created = obj.filter_set.get_or_create(name=request.POST['filter.name'], value=request.POST['filter.value'])
if created:
messages.success(request, 'Created filter {f.name} {f.value}'.format(f=f))
else:
messages.warning(request, 'Updated filter {f.name} {f.value}'.format(f=f))
if 'next' in request.POST:
return redirect(request.POST['next'])
return self.get(self, request, pk)
class NotifierDelete(LoginRequiredMixin, DeleteView):
model = models.Sender
def get_success_url(self):
if 'next' in self.request.POST:
return self.request.POST['next']
if hasattr(self.object.content_object, 'get_absolute_url'):
return self.object.content_object.get_absolute_url()
return reverse("profile")
class NotifierTest(LoginRequiredMixin, View):
def post(self, request, pk):
sender = get_object_or_404(models.Sender, id=pk)
try:
sender.test()
except Exception:
messages.warning(request, 'Error sending test message with ' + sender.sender)
else:
messages.info(request, 'Sent test message with ' + sender.sender)
if 'next' in request.POST:
return redirect(request.POST['next'])
if hasattr(sender.content_object, 'get_absolute_url'):
return redirect(sender.content_object)
return redirect("profile")
class ExporterDelete(LoginRequiredMixin, DeleteView):
model = models.Exporter
def get_success_url(self):
return reverse('project-detail', args=[self.object.project_id])
class ExporterToggle(LoginRequiredMixin, View):
def post(self, request, pk):
exporter = get_object_or_404(models.Exporter, id=pk)
exporter.enabled = not exporter.enabled
exporter.save()
signals.trigger_write_config.send(request)
return JsonResponse({'redirect': exporter.project.get_absolute_url()})
class NotifierToggle(LoginRequiredMixin, View):
def post(self, request, pk):
sender = get_object_or_404(models.Sender, id=pk)
sender.enabled = not sender.enabled
sender.save()
return JsonResponse({'redirect': ""})
class RuleDelete(mixins.PromgenPermissionMixin, DeleteView):
model = models.Rule
def get_permission_denied_message(self):
return 'Unable to delete rule %s. User lacks permission' % self.object
def get_permission_required(self):
self.object = self.get_object()
obj = self.object._meta
tgt = self.object.content_object._meta
yield '{}.delete_{}'.format(obj.app_label, obj.model_name)
yield '{}.change_{}'.format(tgt.app_label, tgt.model_name)
def get_success_url(self):
return self.object.content_object.get_absolute_url()
class RuleToggle(mixins.PromgenPermissionMixin, SingleObjectMixin, View):
model = models.Rule
def get_permission_denied_message(self):
return 'Unable to toggle rule %s. User lacks permission' % self.object
def get_permission_required(self):
self.object = self.get_object()
obj = self.object._meta
tgt = self.object.content_object._meta
yield '{}.change_{}'.format(obj.app_label, obj.model_name)
yield '{}.change_{}'.format(tgt.app_label, tgt.model_name)
def post(self, request, pk):
self.object.enabled = not self.object.enabled
self.object.save()
return JsonResponse({'redirect': self.object.content_object.get_absolute_url()})
class HostDelete(LoginRequiredMixin, DeleteView):
model = models.Host
def get_success_url(self):
# otherwise we redirect to our farm page
if self.object.farm.project_set.count():
return self.object.farm.project_set.first().get_absolute_url()
return self.object.farm.get_absolute_url()
class ProjectDetail(LoginRequiredMixin, DetailView):
queryset = models.Project.objects.prefetch_related(
'rule_set',
'rule_set__parent',
'notifiers',
'notifiers__owner',
'shard',
'service',
'service__rule_set',
'service__rule_set__parent',
)
def get_context_data(self, **kwargs):
context = super(ProjectDetail, self).get_context_data(**kwargs)
context['sources'] = models.Farm.driver_set()
context['url_form'] = forms.URLForm()
return context
class FarmList(LoginRequiredMixin, ListView):
paginate_by = 50
queryset = models.Farm.objects\
.prefetch_related(
'project_set',
'host_set',
)
class FarmDetail(LoginRequiredMixin, DetailView):
model = models.Farm
class FarmUpdate(LoginRequiredMixin, UpdateView):
model = models.Farm
button_label = _('Update Farm')
template_name = 'promgen/farm_form.html'
form_class = forms.FarmForm
def get_context_data(self, **kwargs):
context = super(FarmUpdate, self).get_context_data(**kwargs)
context['project'] = self.object.project_set.first()
context['service'] = context['project'].service
return context
def form_valid(self, form):
farm, created = models.Farm.objects.update_or_create(
id=self.kwargs['pk'],
defaults=form.clean(),
)
return HttpResponseRedirect(reverse('project-detail', args=[farm.project_set.first().id]))
class FarmDelete(LoginRequiredMixin, RedirectView):
pattern_name = 'farm-detail'
def post(self, request, pk):
farm = get_object_or_404(models.Farm, id=pk)
farm.delete()
return HttpResponseRedirect(
request.POST.get('next', reverse('service-list'))
)
class UnlinkFarm(LoginRequiredMixin, View):
def post(self, request, pk):
project = get_object_or_404(models.Project, id=pk)
oldfarm, project.farm = project.farm, None
project.save()
signals.trigger_write_config.send(request)
if oldfarm.project_set.count() == 0 and oldfarm.editable is False:
logger.debug('Cleaning up old farm %s', oldfarm)
oldfarm.delete()
return HttpResponseRedirect(reverse('project-detail', args=[project.id]))
class RulesList(LoginRequiredMixin, ListView, mixins.ServiceMixin):
template_name = "promgen/rule_list.html"
queryset = models.Rule.objects.prefetch_related("content_type", "content_object")
def get_context_data(self, **kwargs):
context = super(RulesList, self).get_context_data(**kwargs)
site_rules = models.Rule.objects.filter(
content_type__model="site", content_type__app_label="promgen"
).prefetch_related(
"content_object",
"rulelabel_set",
"ruleannotation_set",
)
service_rules = models.Rule.objects.filter(
content_type__model="service", content_type__app_label="promgen"
).prefetch_related(
"content_object",
"content_object",
"rulelabel_set",
"ruleannotation_set",
"parent",
)
project_rules = models.Rule.objects.filter(
content_type__model="project", content_type__app_label="promgen"
).prefetch_related(
"content_object",
"content_object__service",
"content_object__service",
"rulelabel_set",
"ruleannotation_set",
"parent",
)
context["rule_list"] = chain(site_rules, service_rules, project_rules)
return context
class RulesCopy(LoginRequiredMixin, View):
def post(self, request, pk):
original = get_object_or_404(models.Rule, id=pk)
form = forms.RuleCopyForm(request.POST)
if form.is_valid():
rule = original.copy_to(**form.clean())
return HttpResponseRedirect(reverse('rule-edit', args=[rule.id]))
else:
return HttpResponseRedirect(reverse('service-detail', args=[pk]))
class FarmRefresh(LoginRequiredMixin, RedirectView):
pattern_name = 'farm-detail'
def post(self, request, pk):
farm = get_object_or_404(models.Farm, id=pk)
# If any hosts are added or removed, then we want to
# trigger a config refresh
if any(farm.refresh()):
signals.trigger_write_config.send(request)
messages.info(request, 'Refreshed hosts')
if 'next' in request.POST:
return HttpResponseRedirect(request.POST['next'])
# If we don't have an explicit redirect, we can redirect to the farm
return redirect(farm)
class FarmConvert(LoginRequiredMixin, RedirectView):
pattern_name = 'farm-detail'
def post(self, request, pk):
farm = get_object_or_404(models.Farm, id=pk)
farm.source = discovery.FARM_DEFAULT
try:
farm.save()
except IntegrityError:
return render(request, 'promgen/farm_duplicate.html', {
'pk': farm.pk,
'next': request.POST.get('next', reverse('farm-detail', args=[farm.pk])),
'farm_list': models.Farm.objects.filter(name=farm.name)
})
return HttpResponseRedirect(
request.POST.get('next', reverse('farm-detail', args=[farm.pk]))
)
class FarmLink(LoginRequiredMixin, View):
def get(self, request, pk, source):
context = {
'source': source,
'project': get_object_or_404(models.Project, id=pk),
'farm_list': sorted(models.Farm.fetch(source=source)),
}
return render(request, 'promgen/link_farm.html', context)
def post(self, request, pk, source):
project = get_object_or_404(models.Project, id=pk)
farm, created = models.Farm.objects.get_or_create(
name=request.POST['farm'],
source=source,
)
if created:
logger.info('Importing %s from %s', farm.name, source)
farm.refresh()
messages.info(request, 'Refreshed hosts')
project.farm = farm
project.save()
return HttpResponseRedirect(reverse('project-detail', args=[project.id]))
class ExporterRegister(LoginRequiredMixin, FormView, mixins.ProjectMixin):
model = models.Exporter
template_name = 'promgen/exporter_form.html'
form_class = forms.ExporterForm
def form_valid(self, form):
project = get_object_or_404(models.Project, id=self.kwargs['pk'])
exporter, _ = models.Exporter.objects.get_or_create(project=project, **form.clean())
return HttpResponseRedirect(reverse('project-detail', args=[project.id]))
class ExporterScrape(LoginRequiredMixin, View):
def post(self, request, pk):
farm = get_object_or_404(models.Project, pk=pk).farm
data = request.POST.dict()
if not data.setdefault("path", "/metrics"):
data["path"] = "/metrics"
def query():
futures = []
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
for host in farm.host_set.all():
futures.append(
executor.submit(
util.get,
"{scheme}://{host}:{port}{path}".format(
host=host.name, **data
),
)
)
for future in concurrent.futures.as_completed(futures):
try:
result = future.result()
result.raise_for_status()
yield result.url, result.status_code
except requests.ConnectionError as e:
logger.warning("Error connecting to server")
yield e.request.url, "Error connecting to server"
except requests.RequestException as e:
logger.warning("Error with response")
yield e.request.url, str(e)
except Exception:
logger.exception("Unknown Exception")
yield "Unknown URL", "Unknown error"
try:
return JsonResponse(dict(query()))
except Exception as e:
return JsonResponse({"error": "Error with query %s" % e})
class URLRegister(LoginRequiredMixin, FormView, mixins.ProjectMixin):
model = models.URL
template_name = 'promgen/url_form.html'
form_class = forms.URLForm
def form_valid(self, form):
project = get_object_or_404(models.Project, id=self.kwargs['pk'])
url, _ = models.URL.objects.get_or_create(project=project, **form.clean())
return HttpResponseRedirect(reverse('project-detail', args=[project.id]))
class URLDelete(LoginRequiredMixin, DeleteView):
model = models.URL
def get_success_url(self):
return reverse('project-detail', args=[self.object.project_id])
class URLList(LoginRequiredMixin, ListView):
queryset = models.URL.objects\
.prefetch_related(
'project',
'project__service',
'project__shard',
'probe',
)
class ProjectRegister(LoginRequiredMixin, CreateView):
button_label = _("Project Register")
model = models.Project
fields = ["name", "description", "owner", "shard"]
def get_initial(self):
initial = {"owner": self.request.user}
if "shard" in self.request.GET:
initial["shard"] = get_object_or_404(
models.Shard, pk=self.request.GET["shard"]
)
return initial
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["service"] = get_object_or_404(models.Service, id=self.kwargs["pk"])
context["shard_list"] = models.Shard.objects.all()
return context
def form_valid(self, form):
form.instance.service_id = self.kwargs["pk"]
return super().form_valid(form)
class ProjectUpdate(LoginRequiredMixin, UpdateView):
model = models.Project
button_label = _("Project Update")
template_name = "promgen/project_form.html"
fields = ["name", "description", "owner", "service", "shard"]
def get_context_data(self, **kwargs):
context = super(ProjectUpdate, self).get_context_data(**kwargs)
context["service"] = self.object.service
context["shard_list"] = models.Shard.objects.all()
return context
class ServiceUpdate(LoginRequiredMixin, UpdateView):
button_label = _('Update Service')
form_class = forms.ServiceUpdate
model = models.Service
class RuleDetail(LoginRequiredMixin, DetailView):
queryset = models.Rule.objects.prefetch_related(
"content_object",
"content_type",
"ruleannotation_set",
"rulelabel_set",
'overrides',
'overrides__ruleannotation_set',
'overrides__rulelabel_set',
"overrides__content_object",
"overrides__content_type",
)
class RuleUpdate(mixins.PromgenPermissionMixin, UpdateView):
def get_permission_denied_message(self):
return "Unable to edit rule %s. User lacks permission" % self.object
def get_permission_required(self):
# In the case of rules, we want to make sure the user has permission
# to change the rule itself, but also permission to change the linked object
self.object = self.get_object()
obj = self.object._meta
tgt = self.object.content_object._meta
yield "{}.change_{}".format(obj.app_label, obj.model_name)
yield "{}.change_{}".format(tgt.app_label, tgt.model_name)
queryset = models.Rule.objects.prefetch_related(
"content_object", "overrides", "overrides__content_object"
)
template_name = "promgen/rule_update.html"
form_class = forms.AlertRuleForm
def get_context_data(self, **kwargs):
context = super(RuleUpdate, self).get_context_data(**kwargs)
context.setdefault("formset_labels", forms.LabelFormset(instance=self.object))
context.setdefault("formset_annotations", forms.AnnotationFormset(instance=self.object))
context["macro"] = macro.EXCLUSION_MACRO
context["rules"] = [self.object.parent] if self.object.parent else [self.object]
return context
def form_invalid(self, **kwargs):
return self.render_to_response(self.get_context_data(**kwargs))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
# Save a copy of our forms into a context var that we can use
# to re-render our form properly in case of errors
context = {}
context["form"] = form = self.get_form()
context["formset_labels"] = form_labels = forms.LabelFormset(
request.POST, request.FILES, instance=self.object
)
context["formset_annotations"] = form_annotations = forms.AnnotationFormset(
request.POST, request.FILES, instance=self.object
)
# Check validity of our labels and annotations in Django before we try to render
if not all([form_labels.is_valid(), form_annotations.is_valid()]):
return self.form_invalid(**context)
# Populate our cached_properties so we can render a test
# populate only rows with a 'value' so that we skip fields we're deleting
form.instance.labels = {
l["name"]: l["value"] for l in form_labels.cleaned_data if "value" in l
}
form.instance.annotations = {
a["name"]: a["value"] for a in form_annotations.cleaned_data if "value" in a
}
if not form.is_valid():
return self.form_invalid(**context)
for instance in form_labels.save():
messages.info(request, "Added {} to {}".format(instance.name, self.object))
for instance in form_annotations.save():
messages.info(request, "Added {} to {}".format(instance.name, self.object))
return self.form_valid(form)
class AlertRuleRegister(mixins.PromgenPermissionMixin, mixins.RuleFormMixin, FormView):
model = models.Rule
template_name = "promgen/rule_register.html"
form_class = forms.AlertRuleForm
form_import_class = forms.ImportRuleForm
def get_permission_required(self):
yield "promgen.add_rule"
yield "promgen.change_" + self.kwargs["content_type"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["rule"] = models.Rule()
context["rule"].pk = 0
context["rule"].set_object(
self.kwargs["content_type"], self.kwargs["object_id"]
)
context["macro"] = macro.EXCLUSION_MACRO
return context
def form_valid(self, form):
form.instance.save()
form.instance.add_label(
form.instance.content_type.model, form.instance.content_object.name
)
return HttpResponseRedirect(form.instance.get_absolute_url())
def form_import(self, form, content_object):
data = form.clean()
counters = prometheus.import_rules_v2(data["rules"], content_object)
messages.info(self.request, "Imported %s" % counters)
return HttpResponseRedirect(content_object.get_absolute_url())
class ServiceRegister(LoginRequiredMixin, CreateView):
button_label = _("Register Service")
model = models.Service
fields = ["name", "description", "owner"]
def get_initial(self):
return {"owner": self.request.user}
class FarmRegister(LoginRequiredMixin, FormView, mixins.ProjectMixin):
model = models.Farm
button_label = _('Register Farm')
template_name = 'promgen/farm_form.html'
form_class = forms.FarmForm
def form_valid(self, form):
project = get_object_or_404(models.Project, id=self.kwargs['pk'])
farm, _ = models.Farm.objects.get_or_create(source=discovery.FARM_DEFAULT, **form.clean())
project.farm = farm
project.save()
return HttpResponseRedirect(project.get_absolute_url())
class ProjectNotifierRegister(LoginRequiredMixin, FormView, mixins.ProjectMixin):
model = models.Sender
template_name = 'promgen/notifier_form.html'
form_class = forms.SenderForm
def form_valid(self, form):
project = get_object_or_404(models.Project, id=self.kwargs['pk'])
sender, created = models.Sender.objects.get_or_create(obj=project, owner=self.request.user, **form.clean())
signals.check_user_subscription(models.Sender, sender, created, self.request)
return HttpResponseRedirect(project.get_absolute_url())
class ServiceNotifierRegister(LoginRequiredMixin, FormView, mixins.ServiceMixin):
model = models.Sender
template_name = 'promgen/notifier_form.html'
form_class = forms.SenderForm
def form_valid(self, form):
service = get_object_or_404(models.Service, id=self.kwargs['pk'])
sender, created = models.Sender.objects.get_or_create(obj=service, owner=self.request.user, **form.clean())
signals.check_user_subscription(models.Sender, sender, created, self.request)
return HttpResponseRedirect(service.get_absolute_url())
class SiteDetail(LoginRequiredMixin, TemplateView):
template_name = "promgen/site_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["rule_list"] = models.Rule.objects.filter(
content_type__model="site", content_type__app_label="promgen"
).prefetch_related("content_object", "rulelabel_set", "ruleannotation_set")
return context
class Profile(LoginRequiredMixin, FormView):
form_class = forms.SenderForm
model = models.Sender
template_name = "promgen/profile.html"
def get_context_data(self, **kwargs):
context = super(Profile, self).get_context_data(**kwargs)
context['discovery_plugins'] = [entry for entry in plugins.discovery()]
context['notifier_plugins'] = [entry for entry in plugins.notifications()]
context['notifiers'] = {'notifiers': models.Sender.objects.filter(obj=self.request.user)}
context['subscriptions'] = models.Sender.objects.filter(
sender='promgen.notification.user', value=self.request.user.username)
return context
def form_valid(self, form):
sender, _ = models.Sender.objects.get_or_create(obj=self.request.user, owner=self.request.user, **form.clean())
return redirect('profile')
class HostRegister(LoginRequiredMixin, FormView):
model = models.Host
template_name = "promgen/host_form.html"
form_class = forms.HostForm
def get_context_data(self, **kwargs):
context = super(HostRegister, self).get_context_data(**kwargs)
context["farm"] = get_object_or_404(models.Farm, pk=self.kwargs["pk"])
context["project"] = context["farm"].project_set.first()
return context
def form_valid(self, form):
farm = get_object_or_404(models.Farm, id=self.kwargs["pk"])
for hostname in form.cleaned_data["hosts"]:
host, created = models.Host.objects.get_or_create(
name=hostname, farm_id=farm.id
)
if created:
logger.debug("Added %s to %s", host.name, farm.name)
if farm.project_set.count() == 0:
return redirect("farm-detail", pk=farm.id)
return redirect("project-detail", pk=farm.project_set.first().id)
class ApiConfig(View):
def get(self, request):
return HttpResponse(prometheus.render_config(), content_type='application/json')
def post(self, request, *args, **kwargs):
try:
body = json.loads(request.body.decode('utf-8'))
prometheus.import_config(body, **kwargs)
except Exception as e:
return HttpResponse(e, status=400)
return HttpResponse('Success', status=202)
class ApiQueue(View):
def post(self, request):
signals.trigger_write_config.send(request)
signals.trigger_write_rules.send(request)
signals.trigger_write_urls.send(request)
return HttpResponse('OK', status=202)
class Commit(LoginRequiredMixin, View):
def post(self, request):
signals.trigger_write_config.send(request)
return HttpResponseRedirect(request.POST.get('next', '/'))
class _ExportRules(View):
def format(self, rules=None, name='promgen'):
content = prometheus.render_rules(rules)
response = HttpResponse(content)
response['Content-Type'] = 'application/x-yaml'
response['Content-Disposition'] = 'attachment; filename=%s.rule.yml' % name
return response
class RulesConfig(_ExportRules):
def get(self, request):
return self.format()
class RuleExport(_ExportRules):
def get(self, request, content_type, object_id):
ct = ContentType.objects.get(app_label="promgen", model=content_type).get_object_for_this_type(pk=object_id)
rules = models.Rule.objects.filter(obj=ct)
return self.format(rules)
class URLConfig(View):
def get(self, request):
return HttpResponse(prometheus.render_urls(), content_type='application/json')
def post(self, request):
tasks.write_urls()
return HttpResponse('OK', status=202)
class Alert(View):
def post(self, request, *args, **kwargs):
alert = models.Alert.objects.create(body=request.body.decode("utf-8"))
tasks.process_alert.delay(alert.pk)
return HttpResponse("OK", status=202)
class AlertList(LoginRequiredMixin, ListView):
paginate_by = 20
queryset = models.Alert.objects.order_by("-created")
def get_queryset(self):
search = self.request.GET.get('search')
if search:
return self.queryset.filter(
Q(alertlabel__name="Service", alertlabel__value__icontains=search)
| Q(alertlabel__name="Project", alertlabel__value__icontains=search)
| Q(alertlabel__name="Job", alertlabel__value__icontains=search)
)
qs = self.queryset
for key, value in self.request.GET.items():
if key in ["page", "search"]:
continue
qs = qs.filter(alertlabel__name=key, alertlabel__value=value)
return qs
class AlertDetail(LoginRequiredMixin, DetailView):
model = models.Alert
class Metrics(View):
def __init__(self):
self.registry = prometheus_client.CollectorRegistry(auto_describe=True)
prometheus_client.GCCollector(registry=self.registry)
prometheus_client.PlatformCollector(registry=self.registry)
prometheus_client.ProcessCollector(registry=self.registry)
self.registry.register(self)
def get(self, request, *args, **kwargs):
return HttpResponse(
prometheus_client.generate_latest(self.registry),
content_type=prometheus_client.CONTENT_TYPE_LATEST,
)
def collect(self):
MetricFamily(
"promgen_build_info", "Promgen Information", labels=["version", "python"]
)
v.add_metric([version.__version__, platform.python_version()], 1)
yield v
try:
yield CounterMetricFamily(
"promgen_alerts_processed",
"Alerts",
models.Alert.objects.latest("id").id,
)
except models.Alert.DoesNotExist:
pass
try:
yield CounterMetricFamily(
"promgen_alerts_failed",
"Failed Alerts",
models.AlertError.objects.latest("id").id,
)
except models.AlertError.DoesNotExist:
pass
yield GaugeMetricFamily(
"promgen_shards", "Registered Shards", models.Shard.objects.count()
)
yield GaugeMetricFamily(
"promgen_exporters", "Registered Exporters", models.Exporter.objects.count()
)
yield GaugeMetricFamily(
"promgen_services", "Registered Services", models.Service.objects.count()
)
yield GaugeMetricFamily(
"promgen_projects", "Registered Projects", models.Project.objects.count()
)
yield GaugeMetricFamily(
"promgen_rules", "Registered Rules", models.Rule.objects.count()
)
yield GaugeMetricFamily(
"promgen_urls", "Registered URLs", models.URL.objects.count()
)
yield GaugeMetricFamily(
"promgen_hosts",
"Registered Hosts",
len(models.Host.objects.values("name").annotate(Count("name"))),
)
notifier = GaugeMetricFamily(
"promgen_notifiers", "Registered Notifiers", labels=["type", "sender"]
)
for entry in models.Sender.objects.values(
"content_type__model", "sender"
).annotate(Count("sender"), count=Count("content_type")):
notifier.add_metric(
[entry["content_type__model"], entry["sender"]], entry["count"]
)
yield notifier
class Search(LoginRequiredMixin, View):
def get(self, request):
MAPPING = {
'farm_list': {
'field': ('name__icontains',),
'model': models.Farm,
'prefetch': ('project_set', 'host_set'),
'query': ('search', 'var-farm'),
},
'host_list': {
'field': ('name__icontains',),
'model': models.Host,
'query': ('search', 'var-instance'),
},
'project_list': {
'field': ('name__icontains',),
'model': models.Project,
'prefetch': ('service', 'notifiers', 'exporter_set', 'notifiers__owner'),
'query': ('search', 'var-project'),
},
'rule_list': {
'field': ('name__icontains', 'clause__icontains'),
'model': models.Rule,
'prefetch': ('content_object', 'ruleannotation_set', 'rulelabel_set'),
'query': ('search', ),
},
'service_list': {
'field': ('name__icontains',),
'model': models.Service,
'prefetch': ('project_set', 'rule_set', 'notifiers', 'notifiers__owner'),
'query': ('search', 'var-service'),
}
}
context = {}
for target, obj in MAPPING.items():
query = set(obj['query']).intersection(request.GET.keys())
if not query:
logger.info('query for %s: <skipping>', target)
continue
logger.info('query for %s: %s', target, query)
qs = obj['model'].objects
if 'prefetch' in obj:
qs = qs.prefetch_related(*obj['prefetch'])
filters = None
for var in query:
for field in obj['field']:
if filters:
filters |= Q(**{field: request.GET[var]})
else:
filters = Q(**{field: request.GET[var]})
logger.info('filtering %s by %s', target, filters)
qs = qs.filter(filters)
context[target] = qs
return render(request, 'promgen/search.html', context)
class RuleImport(mixins.PromgenPermissionMixin, FormView):
form_class = forms.ImportRuleForm
template_name = 'promgen/rule_import.html'
permission_required = ('promgen.change_site', 'promgen.change_rule')
permisison_denied_message = 'User lacks permission to import'
def form_valid(self, form):
data = form.clean()
if data.get('file_field'):
rules = data['file_field'].read().decode('utf8')
elif data.get('rules'):
rules = data.get('rules')
else:
messages.warning(self.request, 'Missing rules')
return self.form_invalid(form)
try:
counters = prometheus.import_rules_v2(rules)
messages.info(self.request, 'Imported %s' % counters)
return redirect('rule-import')
except:
messages.error(self.request, 'Error importing rules')
return self.form_invalid(form)
class Import(mixins.PromgenPermissionMixin, FormView):
template_name = 'promgen/import_form.html'
form_class = forms.ImportConfigForm
permission_required = (
'promgen.change_site', 'promgen.change_rule', 'promgen.change_exporter'
)
permission_denied_message = 'User lacks permission to import'
def form_valid(self, form):
data = form.clean()
if data.get('file_field'):
messages.info(self.request, 'Importing config from file')
config = data['file_field'].read().decode('utf8')
elif data.get('url'):
messages.info(self.request, 'Importing config from url')
response = util.get(data['url'])
response.raise_for_status()
config = response.text
elif data.get('config'):
messages.info(self.request, 'Importing config')
config = data['config']
else:
messages.warning(self.request, 'Missing config')
return self.form_invalid(form)
kwargs = {}
if data.get('shard'):
kwargs['replace_shard'] = data.get('shard')
imported, skipped = prometheus.import_config(json.loads(config), **kwargs)
if imported:
counters = {key: len(imported[key]) for key in imported}
messages.info(self.request, 'Imported %s' % counters)
if skipped:
counters = {key: len(skipped[key]) for key in skipped}
messages.info(self.request, 'Skipped %s' % counters)
if len(imported['Project']) == 1:
return HttpResponseRedirect(imported['Project'][0].get_absolute_url())
if len(imported['Service']) == 1:
return HttpResponseRedirect(imported['Service'][0].get_absolute_url())
if len(imported['Shard']) == 1:
return HttpResponseRedirect(imported['Shard'][0].get_absolute_url())
return redirect('service-list')
class RuleTest(LoginRequiredMixin, View):
def post(self, request, pk):
if pk == 0:
rule = models.Rule()
rule.set_object(request.POST['content_type'], request.POST['object_id'])
else:
rule = get_object_or_404(models.Rule, id=pk)
query = macro.rulemacro(rule, request.POST['query'])
# against all the servers at once
url = resolve_domain('proxy-query')
logger.debug('Querying %s with %s', url, query)
start = time.time()
result = util.get(url, {'query': query}).json()
duration = datetime.timedelta(seconds=(time.time() - start))
context = {'status': result['status'], 'duration': duration, 'query': query}
context['data'] = result.get('data', {})
context['errors'] = {}
metrics = context['data'].get('result', [])
if metrics:
context['collapse'] = len(metrics) > 5
for row in metrics:
if 'service' not in row['metric'] and \
'project' not in row['metric']:
context['errors']['routing'] = 'Some metrics are missing service and project labels so Promgen will be unable to route message'
context['status'] = 'warning'
else:
context['status'] = 'info'
context['errors']['no_results'] = 'No results. You may need to remove conditional checks (> < ==) to verify'
# Place this at the bottom to have a query error show up as danger
if result['status'] != 'success':
context['status'] = 'danger'
context['errors']['Query'] = result['error']
return JsonResponse({request.POST['target']: render_to_string('promgen/ajax_clause_check.html', context)})
| true
| true
|
1c44a983770db16ec1a6f33dad6de70d9d9f5c16
| 5,443
|
py
|
Python
|
build/update_version.py
|
ruanyijian/QRCodeScanner
|
de3df01dec09a662d035dd43091dd024b322daf0
|
[
"MIT"
] | 1
|
2019-03-07T14:07:59.000Z
|
2019-03-07T14:07:59.000Z
|
build/update_version.py
|
ruanyijian/QRCodeScanner
|
de3df01dec09a662d035dd43091dd024b322daf0
|
[
"MIT"
] | null | null | null |
build/update_version.py
|
ruanyijian/QRCodeScanner
|
de3df01dec09a662d035dd43091dd024b322daf0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import xml.etree.cElementTree as etree
import datetime
TARGET_SRC_ROOT = "../src"
TARGET_PROJECT_NAMES = ["organic"]
TARGET_PLIST_FILES = ["Info.plist"]
DEBUG_ENABLE = False
def get_value_node_by_key(dict_node, key):
key_node = None
value_node = None
for key_value_node in dict_node:
text = key_value_node.text
if text == key:
key_node = key_value_node
continue
if key_node is not None:
value_node = key_value_node
break
return value_node
def get_version_string(root):
dict_node = root.find("dict")
if not dict_node:
return ""
version_string_node = get_value_node_by_key(dict_node, "CFBundleVersion")
if version_string_node is None:
return ""
return version_string_node.text
def get_version_code(root):
dict_node = root.find("dict")
if not dict_node:
return ""
version_value_node = get_value_node_by_key(dict_node, "CFBundleShortVersionString")
if version_value_node is None:
return ""
return version_value_node.text
def replace_version_code(file_path, old_version_code, new_version_code):
file_content = None
with open(file_path, "rb") as f:
file_content = f.read()
if not isinstance(file_content, str):
print("file_content is not instance of str, actually: %s" % type(file_content))
return False
if DEBUG_ENABLE:
print("old_version_code: " + old_version_code)
print("new_version_code: " + new_version_code)
file_content = file_content.replace(old_version_code, new_version_code)
with open(file_path, "wb") as f:
f.write(file_content)
return True
# 根据旧的版本号生成新的版本号
# 项目要求格式是 YYMMDD + Build,如 16010102
# 表示 2016 年 1 月 1 日 第二次构建的版本
def generate_new_version_string(old_version_string):
if not isinstance(old_version_string, str):
print("old_version_string is not instance of str, actually: %s" % type(old_version_string))
return None
if len(old_version_string) != 8:
print("old_version_string length is expect like YYMMDDbb, actually: %s" % type(old_version_string))
return None
date_code = old_version_string[0:6]
build_code = old_version_string[6:]
int_build_code = int(build_code)
today = datetime.date.today()
new_date_code = today.strftime("%Y%m%d")
new_date_code = new_date_code[2:]
if new_date_code == date_code:
int_build_code += 1
else:
int_build_code = 1
new_build_code = str(int_build_code).zfill(2)
new_version_code = new_date_code + new_build_code
return new_version_code
# 根据旧的版本号生成修复包的版本号
# 修复包 (a.b.c, c > 0) 要求不能更新日期域,只能更新Build域
def generate_fix_version_string(old_version_string):
if not isinstance(old_version_string, str):
return None
if len(old_version_string) != 8:
return None
date_code = old_version_string[0:6]
build_code = old_version_string[6:]
int_build_code = int(build_code)
int_build_code += 1
new_build_code = str(int_build_code).zfill(2)
new_version_code = date_code + new_build_code
return new_version_code
def generate_version_string(version_code, version_string):
result_version_string = version_string
version_codes = version_code.split('.')
if len(version_codes) != 3:
print("invalid version_codes, should be x.y.z, actually: %s" % version_codes)
return result_version_string
fix_code_string = version_codes[2]
fix_code_int = int(fix_code_string)
if fix_code_int == 0:
result_version_string = generate_new_version_string(version_string)
else:
result_version_string = generate_fix_version_string(version_string)
return result_version_string
def update_version(plist_file_path):
if not isinstance(plist_file_path, str):
return False
xml_tree = etree.ElementTree(file=plist_file_path)
if not xml_tree:
return False
root = xml_tree.getroot()
version_code = get_version_code(root)
version_string = get_version_string(root)
if not isinstance(version_code, str):
return False
new_version_string = generate_version_string(version_code, version_string)
result = replace_version_code(plist_file_path, version_string, new_version_string)
return result
def file_log(message):
current_dir = os.path.realpath(os.path.dirname(__file__))
path = os.path.join(current_dir, "log.txt")
with open(path, "a+") as f:
f.write(message)
f.write("\r\n")
# def commit_files(plist_files):
# commit_command = """svn ci -m "* auto update version script" """
# for plist_file in plist_files:
# commit_command += '"' + plist_file + '" '
# os.system(commit_command)
# file_log("exec command " + commit_command)
def main():
current_dir = os.path.realpath(os.path.dirname(__file__))
plist_files = []
for project_name in TARGET_PROJECT_NAMES:
path = os.path.join(TARGET_SRC_ROOT, project_name)
path = os.path.join(current_dir, path)
for plist_file in TARGET_PLIST_FILES:
plist_path = os.path.join(path, plist_file)
plist_path = os.path.abspath(plist_path)
update_version(plist_path)
plist_files.append(plist_path)
# commit_files(plist_files)
if __name__ == '__main__':
main()
| 28.497382
| 107
| 0.693184
|
import os
import xml.etree.cElementTree as etree
import datetime
TARGET_SRC_ROOT = "../src"
TARGET_PROJECT_NAMES = ["organic"]
TARGET_PLIST_FILES = ["Info.plist"]
DEBUG_ENABLE = False
def get_value_node_by_key(dict_node, key):
key_node = None
value_node = None
for key_value_node in dict_node:
text = key_value_node.text
if text == key:
key_node = key_value_node
continue
if key_node is not None:
value_node = key_value_node
break
return value_node
def get_version_string(root):
dict_node = root.find("dict")
if not dict_node:
return ""
version_string_node = get_value_node_by_key(dict_node, "CFBundleVersion")
if version_string_node is None:
return ""
return version_string_node.text
def get_version_code(root):
dict_node = root.find("dict")
if not dict_node:
return ""
version_value_node = get_value_node_by_key(dict_node, "CFBundleShortVersionString")
if version_value_node is None:
return ""
return version_value_node.text
def replace_version_code(file_path, old_version_code, new_version_code):
file_content = None
with open(file_path, "rb") as f:
file_content = f.read()
if not isinstance(file_content, str):
print("file_content is not instance of str, actually: %s" % type(file_content))
return False
if DEBUG_ENABLE:
print("old_version_code: " + old_version_code)
print("new_version_code: " + new_version_code)
file_content = file_content.replace(old_version_code, new_version_code)
with open(file_path, "wb") as f:
f.write(file_content)
return True
def generate_new_version_string(old_version_string):
if not isinstance(old_version_string, str):
print("old_version_string is not instance of str, actually: %s" % type(old_version_string))
return None
if len(old_version_string) != 8:
print("old_version_string length is expect like YYMMDDbb, actually: %s" % type(old_version_string))
return None
date_code = old_version_string[0:6]
build_code = old_version_string[6:]
int_build_code = int(build_code)
today = datetime.date.today()
new_date_code = today.strftime("%Y%m%d")
new_date_code = new_date_code[2:]
if new_date_code == date_code:
int_build_code += 1
else:
int_build_code = 1
new_build_code = str(int_build_code).zfill(2)
new_version_code = new_date_code + new_build_code
return new_version_code
def generate_fix_version_string(old_version_string):
if not isinstance(old_version_string, str):
return None
if len(old_version_string) != 8:
return None
date_code = old_version_string[0:6]
build_code = old_version_string[6:]
int_build_code = int(build_code)
int_build_code += 1
new_build_code = str(int_build_code).zfill(2)
new_version_code = date_code + new_build_code
return new_version_code
def generate_version_string(version_code, version_string):
result_version_string = version_string
version_codes = version_code.split('.')
if len(version_codes) != 3:
print("invalid version_codes, should be x.y.z, actually: %s" % version_codes)
return result_version_string
fix_code_string = version_codes[2]
fix_code_int = int(fix_code_string)
if fix_code_int == 0:
result_version_string = generate_new_version_string(version_string)
else:
result_version_string = generate_fix_version_string(version_string)
return result_version_string
def update_version(plist_file_path):
if not isinstance(plist_file_path, str):
return False
xml_tree = etree.ElementTree(file=plist_file_path)
if not xml_tree:
return False
root = xml_tree.getroot()
version_code = get_version_code(root)
version_string = get_version_string(root)
if not isinstance(version_code, str):
return False
new_version_string = generate_version_string(version_code, version_string)
result = replace_version_code(plist_file_path, version_string, new_version_string)
return result
def file_log(message):
current_dir = os.path.realpath(os.path.dirname(__file__))
path = os.path.join(current_dir, "log.txt")
with open(path, "a+") as f:
f.write(message)
f.write("\r\n")
def main():
current_dir = os.path.realpath(os.path.dirname(__file__))
plist_files = []
for project_name in TARGET_PROJECT_NAMES:
path = os.path.join(TARGET_SRC_ROOT, project_name)
path = os.path.join(current_dir, path)
for plist_file in TARGET_PLIST_FILES:
plist_path = os.path.join(path, plist_file)
plist_path = os.path.abspath(plist_path)
update_version(plist_path)
plist_files.append(plist_path)
if __name__ == '__main__':
main()
| true
| true
|
1c44aa3d721e63405060a14d3289a34802de3b56
| 336
|
py
|
Python
|
Python/widest-vertical-area-between-two-points-containing-no-points.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
Python/widest-vertical-area-between-two-points-containing-no-points.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
Python/widest-vertical-area-between-two-points-containing-no-points.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
# Time: O(nlogn)
# Space: O(n)
import itertools
class Solution(object):
def maxWidthOfVerticalArea(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
sorted_x = sorted({x for x, y in points})
return max([b-a for a, b in itertools.zip(sorted_x, sorted_x[1:])] + [0])
| 22.4
| 81
| 0.568452
|
import itertools
class Solution(object):
def maxWidthOfVerticalArea(self, points):
sorted_x = sorted({x for x, y in points})
return max([b-a for a, b in itertools.zip(sorted_x, sorted_x[1:])] + [0])
| true
| true
|
1c44aa96dbe8157f19e95301eb7b709329e47002
| 380
|
py
|
Python
|
engine/src/hopeit/testing/__init__.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | 15
|
2020-07-09T17:41:14.000Z
|
2021-10-04T20:13:08.000Z
|
engine/src/hopeit/testing/__init__.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | 48
|
2020-07-10T15:16:17.000Z
|
2022-03-03T19:46:46.000Z
|
engine/src/hopeit/testing/__init__.py
|
pcanto-hopeit/hopeit.engine
|
c17b0438e56940a4d1b2f071cca90ae8b6f70629
|
[
"Apache-2.0"
] | 3
|
2020-07-08T20:12:58.000Z
|
2021-01-10T15:57:21.000Z
|
"""
hopeit.engine testing module
Provides utilities to test and write unit or integration tests for App Events:
* **apps**: load config and execute app events for testing behaviour. Allows execution
of events without starting a server.
* **encryption**: provides data encryption for tests. Useful to test data apps.
"""
__all__ = ['apps',
'encryption']
| 31.666667
| 90
| 0.705263
|
__all__ = ['apps',
'encryption']
| true
| true
|
1c44ab3987f668f224f7607d88fa170256a0b39a
| 661
|
py
|
Python
|
6 Web Page with Flask/basic page/script1.py
|
mself9/pythonteachingcode
|
45da22291ef38fa4cc971bc196e9bba968b9fe9e
|
[
"MIT"
] | null | null | null |
6 Web Page with Flask/basic page/script1.py
|
mself9/pythonteachingcode
|
45da22291ef38fa4cc971bc196e9bba968b9fe9e
|
[
"MIT"
] | null | null | null |
6 Web Page with Flask/basic page/script1.py
|
mself9/pythonteachingcode
|
45da22291ef38fa4cc971bc196e9bba968b9fe9e
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request
app=Flask(__name__)
@app.route('/greet', methods=['POST'])
def greet():
inputName = request.form['myName']
ip = request.remote_addr
#write data to file or to DB
inputName = inputName.upper()+" hi! Visiting from " + str(ip)
return render_template("home.html",myName=inputName)
@app.route('/')
def home():
return render_template("home.html",myName="")
@app.route('/about/')
def about():
return render_template("about.html")
@app.route('/madelyn/')
def madelyn():
return render_template('self.html')
if __name__=="__main__":
app.run(debug=True)
| 26.44
| 67
| 0.652042
|
from flask import Flask, render_template, request
app=Flask(__name__)
@app.route('/greet', methods=['POST'])
def greet():
inputName = request.form['myName']
ip = request.remote_addr
inputName = inputName.upper()+" hi! Visiting from " + str(ip)
return render_template("home.html",myName=inputName)
@app.route('/')
def home():
return render_template("home.html",myName="")
@app.route('/about/')
def about():
return render_template("about.html")
@app.route('/madelyn/')
def madelyn():
return render_template('self.html')
if __name__=="__main__":
app.run(debug=True)
| true
| true
|
1c44adf778926c615954fa37210f146b492819bd
| 573
|
py
|
Python
|
active_directory2/log.py
|
tjguk/active_directory2
|
0338ea9ea168fd37869689c108fe08f716408c95
|
[
"MIT"
] | 2
|
2016-05-30T14:15:42.000Z
|
2021-05-15T03:26:22.000Z
|
active_directory2/log.py
|
tjguk/active_directory2
|
0338ea9ea168fd37869689c108fe08f716408c95
|
[
"MIT"
] | null | null | null |
active_directory2/log.py
|
tjguk/active_directory2
|
0338ea9ea168fd37869689c108fe08f716408c95
|
[
"MIT"
] | null | null | null |
import os, sys
import logging
formatter = logging.Formatter ("[%(levelname)s] %(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger ("active_directory2")
logger.setLevel (logging.DEBUG)
stderr_handler = logging.StreamHandler (sys.stderr)
stderr_handler.setLevel (logging.WARN)
stderr_handler.setFormatter (formatter)
logger.addHandler (stderr_handler)
debug_handler = logging.FileHandler ("active_directory2.debug.log", mode="w")
debug_handler.setLevel (logging.DEBUG)
debug_handler.setFormatter (formatter)
logger.addHandler (debug_handler)
| 33.705882
| 87
| 0.77836
|
import os, sys
import logging
formatter = logging.Formatter ("[%(levelname)s] %(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger ("active_directory2")
logger.setLevel (logging.DEBUG)
stderr_handler = logging.StreamHandler (sys.stderr)
stderr_handler.setLevel (logging.WARN)
stderr_handler.setFormatter (formatter)
logger.addHandler (stderr_handler)
debug_handler = logging.FileHandler ("active_directory2.debug.log", mode="w")
debug_handler.setLevel (logging.DEBUG)
debug_handler.setFormatter (formatter)
logger.addHandler (debug_handler)
| true
| true
|
1c44ae6d5a623af6a8eb72eb8c395b5f3d6c50e0
| 146
|
py
|
Python
|
travis_test/__init__.py
|
Fixdq/Pagination
|
3497bc72f1a010c382c7ca686d50dc95566f0a96
|
[
"MIT"
] | 1
|
2019-02-19T06:04:50.000Z
|
2019-02-19T06:04:50.000Z
|
travis_test/__init__.py
|
Fixdq/pagination
|
3497bc72f1a010c382c7ca686d50dc95566f0a96
|
[
"MIT"
] | null | null | null |
travis_test/__init__.py
|
Fixdq/pagination
|
3497bc72f1a010c382c7ca686d50dc95566f0a96
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 18-7-26 上午12:01
# @Author : fixdq
# @File : __init__.py.py
# @Software: PyCharm
| 16.222222
| 28
| 0.568493
| true
| true
|
|
1c44aeadb7721c811d2848f1f8dd459a92307775
| 294
|
py
|
Python
|
rss_scrapper/tasks/dummy.py
|
abrioy/rss_scrapper
|
b4778ae922ca7e2ef3a7720dc1c69eafffccf0af
|
[
"MIT"
] | null | null | null |
rss_scrapper/tasks/dummy.py
|
abrioy/rss_scrapper
|
b4778ae922ca7e2ef3a7720dc1c69eafffccf0af
|
[
"MIT"
] | null | null | null |
rss_scrapper/tasks/dummy.py
|
abrioy/rss_scrapper
|
b4778ae922ca7e2ef3a7720dc1c69eafffccf0af
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from rss_scrapper.tasks.task import Task
logger = logging.getLogger(__name__)
class DummyTask(Task):
name = "dummy"
def init(self):
pass
def init_conf(self, conf):
pass
def do_execute(self, data):
yield data
| 14.7
| 40
| 0.62585
|
import logging
from rss_scrapper.tasks.task import Task
logger = logging.getLogger(__name__)
class DummyTask(Task):
name = "dummy"
def init(self):
pass
def init_conf(self, conf):
pass
def do_execute(self, data):
yield data
| true
| true
|
1c44aec2f19d1cb0ee064b23305e2fcaf8df3269
| 10,034
|
py
|
Python
|
test/data/test_modules.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | 1
|
2022-01-03T17:30:57.000Z
|
2022-01-03T17:30:57.000Z
|
test/data/test_modules.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | null | null | null |
test/data/test_modules.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
from torch.nn import Linear
from torch.nn.functional import multi_head_attention_forward as mha_forward
from torchtext.nn import InProjContainer, MultiheadAttentionContainer, ScaledDotProduct
from ..common.torchtext_test_case import TorchtextTestCase
class TestModels(TorchtextTestCase):
def test_multiheadattention(self):
embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64
# Build torchtext MultiheadAttention module
in_proj = InProjContainer(
Linear(embed_dim, embed_dim, bias=False),
Linear(embed_dim, embed_dim, bias=False),
Linear(embed_dim, embed_dim, bias=False),
)
MHA = MultiheadAttentionContainer(nhead, in_proj, ScaledDotProduct(), Linear(embed_dim, embed_dim, bias=False))
query = torch.rand((tgt_len, bsz, embed_dim))
key = value = torch.rand((src_len, bsz, embed_dim))
attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)
bias_k = bias_v = torch.rand((1, 1, embed_dim))
mha_output, attn_weights = MHA(
query,
key,
value,
attn_mask=torch.stack([attn_mask_2D] * (bsz * nhead)),
bias_k=bias_k.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),
bias_v=bias_v.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),
)
# Use torch.nn.functional.multi_head_attention_forward
torch_attn_mask = torch.zeros((tgt_len, src_len)).masked_fill_(attn_mask_2D, float("-inf"))
in_proj_weight = torch.cat(
[
MHA.in_proj_container.query_proj.weight,
MHA.in_proj_container.key_proj.weight,
MHA.in_proj_container.value_proj.weight,
]
)
torch_mha_output, torch_mha_weights = mha_forward(
query,
key,
value,
embed_dim,
nhead,
in_proj_weight,
None,
bias_k,
bias_v,
False,
0.0,
MHA.out_proj.weight,
None,
attn_mask=torch_attn_mask,
)
self.assertEqual(mha_output, torch_mha_output)
# With bias_k and bias_v, src_len needs to plus 1
attn_weights = attn_weights.view(bsz, nhead, tgt_len, src_len + 1).sum(dim=1) / nhead
self.assertEqual(attn_weights, torch_mha_weights)
def test_mha_batch_first(self):
embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64
# Build torchtext MultiheadAttention module
in_proj = InProjContainer(
Linear(embed_dim, embed_dim, bias=False),
Linear(embed_dim, embed_dim, bias=False),
Linear(embed_dim, embed_dim, bias=False),
)
MHA_batch_1st = MultiheadAttentionContainer(
nhead, in_proj, ScaledDotProduct(), Linear(embed_dim, embed_dim, bias=False), batch_first=True
)
query = torch.rand((tgt_len, bsz, embed_dim))
key = value = torch.rand((src_len, bsz, embed_dim))
attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)
bias_k = bias_v = torch.rand((1, 1, embed_dim))
mha_output_1st, attn_weights_1st = MHA_batch_1st(
query.transpose(0, 1),
key.transpose(0, 1),
value.transpose(0, 1),
attn_mask=torch.stack([attn_mask_2D] * (bsz * nhead)),
bias_k=bias_k.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),
bias_v=bias_v.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),
)
# Use torch.nn.functional.multi_head_attention_forward
torch_attn_mask = torch.zeros((tgt_len, src_len)).masked_fill_(attn_mask_2D, float("-inf"))
in_proj_weight = torch.cat(
[
MHA_batch_1st.in_proj_container.query_proj.weight,
MHA_batch_1st.in_proj_container.key_proj.weight,
MHA_batch_1st.in_proj_container.value_proj.weight,
]
)
torch_mha_output, torch_mha_weights = mha_forward(
query,
key,
value,
embed_dim,
nhead,
in_proj_weight,
None,
bias_k,
bias_v,
False,
0.0,
MHA_batch_1st.out_proj.weight,
None,
attn_mask=torch_attn_mask,
)
self.assertEqual(mha_output_1st.transpose(0, 1), torch_mha_output)
# With bias_k and bias_v, src_len needs to plus 1
attn_weights_1st = attn_weights_1st.view(bsz, nhead, tgt_len, src_len + 1).sum(dim=1) / nhead
self.assertEqual(attn_weights_1st, torch_mha_weights)
def test_broadcast_scaled_dot_product(self):
embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64
SDP = ScaledDotProduct()
query = torch.rand((tgt_len, 1, embed_dim))
key = value = torch.rand((src_len, 1, embed_dim))
attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)
sdp_attn_output_full, sdp_attn_weights_full = SDP(
query.expand(tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, bsz * nhead, embed_dim),
value.expand(src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
# query has a batch size of 1 while key/value have a batch size of bsz * nhead
sdp_attn_output, sdp_attn_weights = SDP(
query,
key.expand(src_len, bsz * nhead, embed_dim),
value.expand(src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
self.assertEqual(sdp_attn_output, sdp_attn_output_full)
self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)
# key/value have a batch size of 1 while query has a batch size of bsz * nhead
sdp_attn_output, sdp_attn_weights = SDP(
query.expand(tgt_len, bsz * nhead, embed_dim),
key,
value,
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
self.assertEqual(sdp_attn_output, sdp_attn_output_full)
self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)
# key/value have a size of (3, 3, src_len, bsz * nhead, embed_dim)
# while query has a size of (tgt_len, 1, embed_dim)
sdp_attn_output, sdp_attn_weights = SDP(
query.expand(tgt_len, 1, embed_dim),
key.expand(3, 3, src_len, bsz * nhead, embed_dim),
value.expand(3, 3, src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
assert list(sdp_attn_output.size()) == [3, 3, tgt_len, bsz * nhead, embed_dim]
assert list(sdp_attn_weights.size()) == [3, 3, bsz * nhead, tgt_len, embed_dim]
self.assertEqual(sdp_attn_output[2][2], sdp_attn_output_full)
self.assertEqual(sdp_attn_weights[2][2], sdp_attn_weights_full)
# dim -2 is not equal to neither key/value's dim -2 or 1
with self.assertRaises(RuntimeError):
SDP(
query.expand(tgt_len, 2, embed_dim),
key.expand(3, 3, src_len, bsz * nhead, embed_dim),
value.expand(3, 3, src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
# key/value have a size of (src_len, 1, embed_dim)
# while query has a size of (1, 2, 3, tgt_len, bsz * nhead, embed_dim)
sdp_attn_output, sdp_attn_weights = SDP(
query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, 1, embed_dim),
value.expand(src_len, 1, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
assert list(sdp_attn_output.size()) == [1, 2, 3, tgt_len, bsz * nhead, embed_dim]
assert list(sdp_attn_weights.size()) == [1, 2, 3, bsz * nhead, tgt_len, embed_dim]
self.assertEqual(sdp_attn_output[0][1][2], sdp_attn_output_full)
self.assertEqual(sdp_attn_weights[0][1][2], sdp_attn_weights_full)
# key dim -2 is not equal to value dim -2
with self.assertRaisesRegex(AssertionError, "Shape of key, value must match"):
SDP(
query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, 2, embed_dim),
value.expand(src_len, 1, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
# key/value dim -2 is not equal to neither query's dim -2 or 1
with self.assertRaises(RuntimeError):
SDP(
query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, 2, embed_dim),
value.expand(src_len, 2, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
# attn_mask in a size of (1, tgt_len, src_len)
# 2D tensor is not supported for attn_mask
sdp_attn_output, sdp_attn_weights = SDP(
query.expand(tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, bsz * nhead, embed_dim),
value.expand(src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(1, tgt_len, src_len),
)
self.assertEqual(sdp_attn_output, sdp_attn_output_full)
self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)
# attn_mask's dim -3 is not equal to neither batch size or 1
with self.assertRaisesRegex(RuntimeError, "The size of the attn_mask is not correct."):
SDP(
query.expand(tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, bsz * nhead, embed_dim),
value.expand(src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(2, tgt_len, src_len),
)
| 44.794643
| 119
| 0.607435
|
import torch
from torch.nn import Linear
from torch.nn.functional import multi_head_attention_forward as mha_forward
from torchtext.nn import InProjContainer, MultiheadAttentionContainer, ScaledDotProduct
from ..common.torchtext_test_case import TorchtextTestCase
class TestModels(TorchtextTestCase):
def test_multiheadattention(self):
embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64
in_proj = InProjContainer(
Linear(embed_dim, embed_dim, bias=False),
Linear(embed_dim, embed_dim, bias=False),
Linear(embed_dim, embed_dim, bias=False),
)
MHA = MultiheadAttentionContainer(nhead, in_proj, ScaledDotProduct(), Linear(embed_dim, embed_dim, bias=False))
query = torch.rand((tgt_len, bsz, embed_dim))
key = value = torch.rand((src_len, bsz, embed_dim))
attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)
bias_k = bias_v = torch.rand((1, 1, embed_dim))
mha_output, attn_weights = MHA(
query,
key,
value,
attn_mask=torch.stack([attn_mask_2D] * (bsz * nhead)),
bias_k=bias_k.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),
bias_v=bias_v.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),
)
torch_attn_mask = torch.zeros((tgt_len, src_len)).masked_fill_(attn_mask_2D, float("-inf"))
in_proj_weight = torch.cat(
[
MHA.in_proj_container.query_proj.weight,
MHA.in_proj_container.key_proj.weight,
MHA.in_proj_container.value_proj.weight,
]
)
torch_mha_output, torch_mha_weights = mha_forward(
query,
key,
value,
embed_dim,
nhead,
in_proj_weight,
None,
bias_k,
bias_v,
False,
0.0,
MHA.out_proj.weight,
None,
attn_mask=torch_attn_mask,
)
self.assertEqual(mha_output, torch_mha_output)
attn_weights = attn_weights.view(bsz, nhead, tgt_len, src_len + 1).sum(dim=1) / nhead
self.assertEqual(attn_weights, torch_mha_weights)
def test_mha_batch_first(self):
embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64
in_proj = InProjContainer(
Linear(embed_dim, embed_dim, bias=False),
Linear(embed_dim, embed_dim, bias=False),
Linear(embed_dim, embed_dim, bias=False),
)
MHA_batch_1st = MultiheadAttentionContainer(
nhead, in_proj, ScaledDotProduct(), Linear(embed_dim, embed_dim, bias=False), batch_first=True
)
query = torch.rand((tgt_len, bsz, embed_dim))
key = value = torch.rand((src_len, bsz, embed_dim))
attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)
bias_k = bias_v = torch.rand((1, 1, embed_dim))
mha_output_1st, attn_weights_1st = MHA_batch_1st(
query.transpose(0, 1),
key.transpose(0, 1),
value.transpose(0, 1),
attn_mask=torch.stack([attn_mask_2D] * (bsz * nhead)),
bias_k=bias_k.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),
bias_v=bias_v.repeat(1, bsz, 1).reshape(1, bsz * nhead, -1),
)
torch_attn_mask = torch.zeros((tgt_len, src_len)).masked_fill_(attn_mask_2D, float("-inf"))
in_proj_weight = torch.cat(
[
MHA_batch_1st.in_proj_container.query_proj.weight,
MHA_batch_1st.in_proj_container.key_proj.weight,
MHA_batch_1st.in_proj_container.value_proj.weight,
]
)
torch_mha_output, torch_mha_weights = mha_forward(
query,
key,
value,
embed_dim,
nhead,
in_proj_weight,
None,
bias_k,
bias_v,
False,
0.0,
MHA_batch_1st.out_proj.weight,
None,
attn_mask=torch_attn_mask,
)
self.assertEqual(mha_output_1st.transpose(0, 1), torch_mha_output)
attn_weights_1st = attn_weights_1st.view(bsz, nhead, tgt_len, src_len + 1).sum(dim=1) / nhead
self.assertEqual(attn_weights_1st, torch_mha_weights)
def test_broadcast_scaled_dot_product(self):
embed_dim, nhead, tgt_len, src_len, bsz = 10, 5, 6, 10, 64
SDP = ScaledDotProduct()
query = torch.rand((tgt_len, 1, embed_dim))
key = value = torch.rand((src_len, 1, embed_dim))
attn_mask_2D = torch.randint(0, 2, (tgt_len, src_len)).to(torch.bool)
sdp_attn_output_full, sdp_attn_weights_full = SDP(
query.expand(tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, bsz * nhead, embed_dim),
value.expand(src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
sdp_attn_output, sdp_attn_weights = SDP(
query,
key.expand(src_len, bsz * nhead, embed_dim),
value.expand(src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
self.assertEqual(sdp_attn_output, sdp_attn_output_full)
self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)
sdp_attn_output, sdp_attn_weights = SDP(
query.expand(tgt_len, bsz * nhead, embed_dim),
key,
value,
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
self.assertEqual(sdp_attn_output, sdp_attn_output_full)
self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)
sdp_attn_output, sdp_attn_weights = SDP(
query.expand(tgt_len, 1, embed_dim),
key.expand(3, 3, src_len, bsz * nhead, embed_dim),
value.expand(3, 3, src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
assert list(sdp_attn_output.size()) == [3, 3, tgt_len, bsz * nhead, embed_dim]
assert list(sdp_attn_weights.size()) == [3, 3, bsz * nhead, tgt_len, embed_dim]
self.assertEqual(sdp_attn_output[2][2], sdp_attn_output_full)
self.assertEqual(sdp_attn_weights[2][2], sdp_attn_weights_full)
with self.assertRaises(RuntimeError):
SDP(
query.expand(tgt_len, 2, embed_dim),
key.expand(3, 3, src_len, bsz * nhead, embed_dim),
value.expand(3, 3, src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
# key/value have a size of (src_len, 1, embed_dim)
# while query has a size of (1, 2, 3, tgt_len, bsz * nhead, embed_dim)
sdp_attn_output, sdp_attn_weights = SDP(
query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, 1, embed_dim),
value.expand(src_len, 1, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
assert list(sdp_attn_output.size()) == [1, 2, 3, tgt_len, bsz * nhead, embed_dim]
assert list(sdp_attn_weights.size()) == [1, 2, 3, bsz * nhead, tgt_len, embed_dim]
self.assertEqual(sdp_attn_output[0][1][2], sdp_attn_output_full)
self.assertEqual(sdp_attn_weights[0][1][2], sdp_attn_weights_full)
# key dim -2 is not equal to value dim -2
with self.assertRaisesRegex(AssertionError, "Shape of key, value must match"):
SDP(
query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, 2, embed_dim),
value.expand(src_len, 1, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
# key/value dim -2 is not equal to neither query's dim -2 or 1
with self.assertRaises(RuntimeError):
SDP(
query.expand(1, 2, 3, tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, 2, embed_dim),
value.expand(src_len, 2, embed_dim),
attn_mask=attn_mask_2D.expand(bsz * nhead, tgt_len, src_len),
)
sdp_attn_output, sdp_attn_weights = SDP(
query.expand(tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, bsz * nhead, embed_dim),
value.expand(src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(1, tgt_len, src_len),
)
self.assertEqual(sdp_attn_output, sdp_attn_output_full)
self.assertEqual(sdp_attn_weights, sdp_attn_weights_full)
with self.assertRaisesRegex(RuntimeError, "The size of the attn_mask is not correct."):
SDP(
query.expand(tgt_len, bsz * nhead, embed_dim),
key.expand(src_len, bsz * nhead, embed_dim),
value.expand(src_len, bsz * nhead, embed_dim),
attn_mask=attn_mask_2D.expand(2, tgt_len, src_len),
)
| true
| true
|
1c44af7b481705d54dd67e7ed0c411ed35f66a39
| 9,372
|
py
|
Python
|
docs/conf.py
|
MasterScott/pem
|
9d910ab8b5d1b965ad696ddb19060d100dd6aba6
|
[
"MIT"
] | 89
|
2015-01-31T20:54:34.000Z
|
2022-03-09T08:24:43.000Z
|
docs/conf.py
|
MasterScott/pem
|
9d910ab8b5d1b965ad696ddb19060d100dd6aba6
|
[
"MIT"
] | 41
|
2015-01-13T14:46:20.000Z
|
2021-04-07T15:01:29.000Z
|
docs/conf.py
|
MasterScott/pem
|
9d910ab8b5d1b965ad696ddb19060d100dd6aba6
|
[
"MIT"
] | 32
|
2015-01-09T20:45:11.000Z
|
2021-04-23T13:30:54.000Z
|
# -*- coding: utf-8 -*-
#
# pem documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 9 13:12:00 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import pem
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
linkcheck_ignore = [
r"https://github.com/.*/(issues|pull)/\d+",
]
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pem"
author = "Hynek Schlawack"
copyright = "2013, " + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pem.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = "furo"
html_theme_options = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pemdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pem.tex", "pem Documentation", author, "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pem", "pem Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pem",
"pem Documentation",
author,
"pem",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/3/": None}
| 31.662162
| 79
| 0.705079
|
import pem
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
linkcheck_ignore = [
r"https://github.com/.*/(issues|pull)/\d+",
]
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "pem"
author = "Hynek Schlawack"
copyright = "2013, " + author
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pem.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = "furo"
html_theme_options = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pemdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pem.tex", "pem Documentation", author, "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pem", "pem Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pem",
"pem Documentation",
author,
"pem",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
intersphinx_mapping = {"https://docs.python.org/3/": None}
| true
| true
|
1c44afe65f59be39bef35bdf6165a67aa4f650fc
| 92
|
py
|
Python
|
app/auth/__init__.py
|
Bernicetwili/Pitch
|
35d49df19eaff05bff77af1e4c71e155165c74ad
|
[
"MIT"
] | null | null | null |
app/auth/__init__.py
|
Bernicetwili/Pitch
|
35d49df19eaff05bff77af1e4c71e155165c74ad
|
[
"MIT"
] | null | null | null |
app/auth/__init__.py
|
Bernicetwili/Pitch
|
35d49df19eaff05bff77af1e4c71e155165c74ad
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
from . import views,forms
auth = Blueprint('auth',__name__)
| 11.5
| 33
| 0.75
|
from flask import Blueprint
from . import views,forms
auth = Blueprint('auth',__name__)
| true
| true
|
1c44b02f6ceca7ed58c6127bce66e10cd451cce5
| 3,984
|
py
|
Python
|
CLIDice.py
|
DNEAVES/CLIDice
|
d1cd7b069ea892f4747fc21da3e6623f2ecdacc7
|
[
"MIT"
] | null | null | null |
CLIDice.py
|
DNEAVES/CLIDice
|
d1cd7b069ea892f4747fc21da3e6623f2ecdacc7
|
[
"MIT"
] | null | null | null |
CLIDice.py
|
DNEAVES/CLIDice
|
d1cd7b069ea892f4747fc21da3e6623f2ecdacc7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.10
import sys
import random
import re
from pprint import pprint
from Print.help import dice_help
from Print.help2 import dice_help_two
from Print.credits import dice_credits
standard_dice = [2, 4, 6, 8, 10, 00, 12, 20, 30, 100]
def roll(dice: int, idv_mod: int, ignore_zero: bool = False):
if dice != 00:
diceroll = random.randrange(1, dice+1)+idv_mod
return diceroll
elif dice == 00:
diceroll = random.randrange(0, 100, 10) + (1 if ignore_zero else 0)
return diceroll
def parse_roll(query: str, ignore_zero: bool = False):
q = query.split('d', 1)
quant = int(q[0])
which = q[1]
mod = 0
mod_each = 0
results = []
if "+" in which:
t = which.split("+")
which = t[0]
if t[1].startswith("m"):
mod += int(t[1].strip("m"))
if t[1].startswith("e"):
mod_each += int(t[1].strip("e"))
try:
if t[2].startswith("m"):
mod += int(t[2].strip("m"))
if t[2].startswith("e"):
mod_each += int(t[2].strip("e"))
except IndexError:
pass
which = int(which)
if which not in standard_dice:
print("An unconventional dice roll. I'll still roll it anyway...")
total = 0
for r in range(0, quant):
work = roll(which, mod_each, ignore_zero)
results.append(work)
total += work
total += mod
return total, results
def parse(query: str, ignore_zero: bool):
argument = query.split(" ")
results = []
roll_pattern = re.compile(r"[0-9]+d[0-9]+\+?[m,e]?-?[0-9]?\+?[m,e]?-?[0-9]?")
mod_pattern = re.compile(r"\+t-?[0-9]?")
for item in argument:
if roll_pattern.match(item):
p_roll, indv_rolls = parse_roll(item, ignore_zero)
if len(indv_rolls) == 1:
print(str(item) + " rolled " + str(p_roll)+"!")
else:
print(str(item) + " rolled " + str(p_roll) + "!")
print("Individual results:")
pprint(indv_rolls)
results.append(p_roll)
if mod_pattern.match(item):
results.append(int(item.strip("+t")))
print("Plus "+item.strip("+t"))
if not (roll_pattern.match(item) or mod_pattern.match(item)):
print("I think you might have entered something wrong. Check this one: "+item)
print("")
break
if sum(results) != 0:
print("The total of all rolls is: "+str(sum(results)))
print("")
else:
print("No rolls made.")
def main():
loop = True
ignore_zero = False
print("CLI Dice Roller")
print('Type out what you want rolled. Use "help" for assistance, or "quit" when done.')
print("")
while loop:
i = input(">")
match i:
case "help":
dice_help()
case "help 2":
dice_help_two()
case "credits":
dice_credits()
case "license":
file = str(sys.path[0] + "/LICENSE")
with open(file, "r") as f:
for line in f:
print(line)
case "00 mode":
ignore_zero = not ignore_zero
match ignore_zero:
case True:
print("Adding 1 to 00 rolls.")
print("")
case False:
print("Not adding 1 to 00 rolls.")
print("")
case "coinflip":
flip = roll(2, 0)
match flip:
case 1:
print("HEADS!")
case 2:
print("TAILS!")
case "quit":
loop = False
case other:
parse(other, ignore_zero)
if __name__ == '__main__':
if sys.stdin.isatty():
main()
| 30.181818
| 91
| 0.490462
|
import sys
import random
import re
from pprint import pprint
from Print.help import dice_help
from Print.help2 import dice_help_two
from Print.credits import dice_credits
standard_dice = [2, 4, 6, 8, 10, 00, 12, 20, 30, 100]
def roll(dice: int, idv_mod: int, ignore_zero: bool = False):
if dice != 00:
diceroll = random.randrange(1, dice+1)+idv_mod
return diceroll
elif dice == 00:
diceroll = random.randrange(0, 100, 10) + (1 if ignore_zero else 0)
return diceroll
def parse_roll(query: str, ignore_zero: bool = False):
q = query.split('d', 1)
quant = int(q[0])
which = q[1]
mod = 0
mod_each = 0
results = []
if "+" in which:
t = which.split("+")
which = t[0]
if t[1].startswith("m"):
mod += int(t[1].strip("m"))
if t[1].startswith("e"):
mod_each += int(t[1].strip("e"))
try:
if t[2].startswith("m"):
mod += int(t[2].strip("m"))
if t[2].startswith("e"):
mod_each += int(t[2].strip("e"))
except IndexError:
pass
which = int(which)
if which not in standard_dice:
print("An unconventional dice roll. I'll still roll it anyway...")
total = 0
for r in range(0, quant):
work = roll(which, mod_each, ignore_zero)
results.append(work)
total += work
total += mod
return total, results
def parse(query: str, ignore_zero: bool):
argument = query.split(" ")
results = []
roll_pattern = re.compile(r"[0-9]+d[0-9]+\+?[m,e]?-?[0-9]?\+?[m,e]?-?[0-9]?")
mod_pattern = re.compile(r"\+t-?[0-9]?")
for item in argument:
if roll_pattern.match(item):
p_roll, indv_rolls = parse_roll(item, ignore_zero)
if len(indv_rolls) == 1:
print(str(item) + " rolled " + str(p_roll)+"!")
else:
print(str(item) + " rolled " + str(p_roll) + "!")
print("Individual results:")
pprint(indv_rolls)
results.append(p_roll)
if mod_pattern.match(item):
results.append(int(item.strip("+t")))
print("Plus "+item.strip("+t"))
if not (roll_pattern.match(item) or mod_pattern.match(item)):
print("I think you might have entered something wrong. Check this one: "+item)
print("")
break
if sum(results) != 0:
print("The total of all rolls is: "+str(sum(results)))
print("")
else:
print("No rolls made.")
def main():
loop = True
ignore_zero = False
print("CLI Dice Roller")
print('Type out what you want rolled. Use "help" for assistance, or "quit" when done.')
print("")
while loop:
i = input(">")
match i:
case "help":
dice_help()
case "help 2":
dice_help_two()
case "credits":
dice_credits()
case "license":
file = str(sys.path[0] + "/LICENSE")
with open(file, "r") as f:
for line in f:
print(line)
case "00 mode":
ignore_zero = not ignore_zero
match ignore_zero:
case True:
print("Adding 1 to 00 rolls.")
print("")
case False:
print("Not adding 1 to 00 rolls.")
print("")
case "coinflip":
flip = roll(2, 0)
match flip:
case 1:
print("HEADS!")
case 2:
print("TAILS!")
case "quit":
loop = False
case other:
parse(other, ignore_zero)
if __name__ == '__main__':
if sys.stdin.isatty():
main()
| true
| true
|
1c44b16a40d680be969d40f8ef3debeda1405080
| 7,935
|
py
|
Python
|
2019-2020/Lato/Sztuczna Inteligencja/Lista01/zad5.py
|
ldept/University
|
f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee
|
[
"FTL"
] | null | null | null |
2019-2020/Lato/Sztuczna Inteligencja/Lista01/zad5.py
|
ldept/University
|
f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee
|
[
"FTL"
] | null | null | null |
2019-2020/Lato/Sztuczna Inteligencja/Lista01/zad5.py
|
ldept/University
|
f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee
|
[
"FTL"
] | null | null | null |
import random
from zad4 import opt_dist
def print_nonogram(nonogram):
print(" ", end='')
for i in range(len(nonogram)):
print(col_numbers[i], end='')
print()
for index, row in enumerate(nonogram):
print(row_numbers[index], end='')
for col in row:
print("#" if col==1 else ".", end='')
print()
def print_nonogram_to_output(nonogram):
with open("zad5_output.txt", "w") as output_file:
for row in nonogram:
for col in row:
print("#" if col==1 else ".", end='', file=output_file)
print("", file=output_file)
row_numbers = []
col_numbers = []
with open("zad5_input.txt") as file:
row_size, col_size = [int(x) for x in next(file).split()]
row_break = row_size
for line in file:
row_numbers.append(int(line.split()[0]))
row_break-=1
if(row_break == 0):
break
for line in file:
col_numbers.append(int(line.split()[0]))
def init_ready_rows_cols(rows,cols):
ready_rows = [(0,-1)] * len(rows)
ready_cols = [(0,-1)] * len(cols)
for index, row in enumerate(rows):
dist = opt_dist(row, row_numbers[index])
if dist == 0:
ready_rows[index] = (1,dist)
else:
ready_rows[index] = (0,dist)
for index, col in enumerate(cols):
dist = opt_dist(col, col_numbers[index])
if dist == 0:
ready_cols[index] = (1,dist)
else:
ready_cols[index] = (0,dist)
return ready_rows, ready_cols
def update_ready_rows_cols(rows, ready_rows, cols, ready_cols, row_index, col_index):
# for i in range(len(rows)):
# row_dist = opt_dist(rows[row_index], row_numbers[row_index])
# ready_rows[row_index] = (1, row_dist) if row_dist == 0 else (0, row_dist)
# for i in range(len(cols)):
# col_dist = opt_dist(cols[col_index], col_numbers[col_index])
# ready_cols[col_index] = (1, col_dist) if col_dist == 0 else (0, col_dist)
row_dist = opt_dist(rows[row_index], row_numbers[row_index])
col_dist = opt_dist(cols[col_index], col_numbers[col_index])
ready_rows[row_index] = (1, row_dist) if row_dist == 0 else (0, row_dist)
ready_cols[col_index] = (1, col_dist) if col_dist == 0 else (0, col_dist)
def get_populated_grid():
return [[random.randint(0,1) for i in range(col_size)] for j in range(row_size)]
#return [[0]*(col_size-1) for i in range(row_size)]
def transpose(rows):
return [list(x) for x in zip(*rows)]
def find_best_index(ready_rows, ready_cols, rows, cols, row_index):
(col_index, max_dist) = (-1,-1)
row_dist = ready_rows[row_index][1]
cols_to_change = []
for index, (ready, col_dist) in enumerate(ready_cols):
test_col = cols[index].copy() #copy
test_col[row_index] = int(not test_col[row_index]) #change of that one (i,j) spot
prev_dist = col_dist + row_dist
if prev_dist == 0:
continue
test_row = rows[row_index].copy()
test_row[index] = int(not test_row[index])
new_dist = opt_dist(test_row, row_numbers[row_index]) + opt_dist(test_col, col_numbers[index]) #sum change in match level
if max_dist == -1 or prev_dist - new_dist > max_dist: #if new best then flush the list
cols_to_change = [index]
max_dist = prev_dist - new_dist
elif prev_dist - new_dist == max_dist and max_dist != -1: #if as good as previously checked - add to potential outcome
cols_to_change.append(index)
col_index = cols_to_change[random.randint(0,len(cols_to_change)-1)] #pick random col to change as outcome
return (col_index, max_dist) #every column in cols_to_change has the same dist difference
def init_grid():
rows = get_populated_grid()
cols = transpose(rows)
ready_rows, ready_cols = init_ready_rows_cols(rows, cols)
return rows,cols,ready_rows, ready_cols
def get_random_bad(bad_rows_or_cols):
if len(bad_rows_or_cols) == 0:
return -1, -1
random_bad = random.randint(0,len(bad_rows_or_cols-1))
for index, elem in enumerate(bad_rows_or_cols):
if elem[0] == 0:
random_bad -= 1
if random_bad <= 0 and elem[0] == 0:
return index, elem[1]
def find_best_col(ready_rows, ready_cols, rows, cols, index):
row_dist = ready_rows[index][1]
best = 0
idx = 0
idx_to_change = []
for j in range(len(cols)):
col_dist = ready_cols[j][1]
test_col = cols[j].copy()
test_col[index] = int(not test_col[index])
test_row = rows[index].copy()
test_row[j] = int(not test_row[j])
new_row_dist = opt_dist(test_row, row_numbers[index])
new_col_dist = opt_dist(test_col, col_numbers[j])
#higher == better
change_indicator = (row_dist - new_row_dist) + (col_dist - new_col_dist)
if change_indicator > best:
best = change_indicator
idx_to_change = [j]
elif change_indicator == best:
idx_to_change.append(j)
return idx_to_change[random.randint(0,len(idx_to_change)-1)]
def find_best_row(ready_rows, ready_cols, rows, cols, index):
col_dist = ready_cols[index][1]
best = 0
idx = 0
idx_to_change = []
for i in range(len(rows)):
row_dist = ready_rows[i][1]
test_row = rows[i].copy()
test_row[index] = int(not test_row[index])
test_col = cols[index].copy()
test_col[i] = int(not test_col[i])
new_row_dist = opt_dist(test_row, row_numbers[i])
new_col_dist = opt_dist(test_col, col_numbers[index])
#higher == better
change_indicator = (row_dist - new_row_dist) + (col_dist - new_col_dist)
if change_indicator > best:
best = change_indicator
idx_to_change = [i]
elif change_indicator == best:
idx_to_change.append(i)
return idx_to_change[random.randint(0,len(idx_to_change)-1)]
def solve():
global row_numbers
global col_numbers
max_changes = row_size * col_size * 2
while True:
rows, cols, ready_rows, ready_cols = init_grid() #start again
print("STARTING NONOGRAM:")
print_nonogram(rows)
for i in range(max_changes):
bad_rows = [(index, "row") for index in range(len(ready_rows)) if ready_rows[index][0] == 0]
bad_cols = [(index, "col") for index in range(len(ready_cols)) if ready_cols[index][0] == 0]
bad_rows_and_cols = bad_cols + bad_rows
if len(bad_rows_and_cols) == 0:
print("ENDING NONOGRAM:")
print_nonogram(rows)
print_nonogram_to_output(rows)
return 0
#First fix all rows
if len(bad_rows) != 0:
(index, row_or_col) = bad_rows[random.randint(0,len(bad_rows)-1)]
else: #if no rows left to fix start fixing columns
(index, row_or_col) = bad_rows_and_cols[random.randint(0, len(bad_rows_and_cols) - 1)]
if(row_or_col == "col"):
row = find_best_row(ready_rows,ready_cols,rows,cols,index)# (row, dist) = find_best_index(ready_cols, ready_rows, cols, rows, index)
cols[index][row] = int(not cols[index][row])
rows[row][index] = int(not rows[row][index])
update_ready_rows_cols(rows, ready_rows, cols, ready_cols, row, index)
else:
col = find_best_col(ready_rows,ready_cols,rows,cols,index)#(col, dist) = find_best_index(ready_rows, ready_cols, rows, cols, index)
rows[index][col] = int(not rows[index][col])
cols[col][index] = int(not cols[col][index])
update_ready_rows_cols(rows, ready_rows, cols, ready_cols, index, col)
print("At least i tried")
print_nonogram(rows)
solve()
| 40.075758
| 148
| 0.616761
|
import random
from zad4 import opt_dist
def print_nonogram(nonogram):
print(" ", end='')
for i in range(len(nonogram)):
print(col_numbers[i], end='')
print()
for index, row in enumerate(nonogram):
print(row_numbers[index], end='')
for col in row:
print("#" if col==1 else ".", end='')
print()
def print_nonogram_to_output(nonogram):
with open("zad5_output.txt", "w") as output_file:
for row in nonogram:
for col in row:
print("#" if col==1 else ".", end='', file=output_file)
print("", file=output_file)
row_numbers = []
col_numbers = []
with open("zad5_input.txt") as file:
row_size, col_size = [int(x) for x in next(file).split()]
row_break = row_size
for line in file:
row_numbers.append(int(line.split()[0]))
row_break-=1
if(row_break == 0):
break
for line in file:
col_numbers.append(int(line.split()[0]))
def init_ready_rows_cols(rows,cols):
ready_rows = [(0,-1)] * len(rows)
ready_cols = [(0,-1)] * len(cols)
for index, row in enumerate(rows):
dist = opt_dist(row, row_numbers[index])
if dist == 0:
ready_rows[index] = (1,dist)
else:
ready_rows[index] = (0,dist)
for index, col in enumerate(cols):
dist = opt_dist(col, col_numbers[index])
if dist == 0:
ready_cols[index] = (1,dist)
else:
ready_cols[index] = (0,dist)
return ready_rows, ready_cols
def update_ready_rows_cols(rows, ready_rows, cols, ready_cols, row_index, col_index):
row_dist = opt_dist(rows[row_index], row_numbers[row_index])
col_dist = opt_dist(cols[col_index], col_numbers[col_index])
ready_rows[row_index] = (1, row_dist) if row_dist == 0 else (0, row_dist)
ready_cols[col_index] = (1, col_dist) if col_dist == 0 else (0, col_dist)
def get_populated_grid():
return [[random.randint(0,1) for i in range(col_size)] for j in range(row_size)]
def transpose(rows):
return [list(x) for x in zip(*rows)]
def find_best_index(ready_rows, ready_cols, rows, cols, row_index):
(col_index, max_dist) = (-1,-1)
row_dist = ready_rows[row_index][1]
cols_to_change = []
for index, (ready, col_dist) in enumerate(ready_cols):
test_col = cols[index].copy()
test_col[row_index] = int(not test_col[row_index])
prev_dist = col_dist + row_dist
if prev_dist == 0:
continue
test_row = rows[row_index].copy()
test_row[index] = int(not test_row[index])
new_dist = opt_dist(test_row, row_numbers[row_index]) + opt_dist(test_col, col_numbers[index])
if max_dist == -1 or prev_dist - new_dist > max_dist:
cols_to_change = [index]
max_dist = prev_dist - new_dist
elif prev_dist - new_dist == max_dist and max_dist != -1:
cols_to_change.append(index)
col_index = cols_to_change[random.randint(0,len(cols_to_change)-1)]
return (col_index, max_dist)
def init_grid():
rows = get_populated_grid()
cols = transpose(rows)
ready_rows, ready_cols = init_ready_rows_cols(rows, cols)
return rows,cols,ready_rows, ready_cols
def get_random_bad(bad_rows_or_cols):
if len(bad_rows_or_cols) == 0:
return -1, -1
random_bad = random.randint(0,len(bad_rows_or_cols-1))
for index, elem in enumerate(bad_rows_or_cols):
if elem[0] == 0:
random_bad -= 1
if random_bad <= 0 and elem[0] == 0:
return index, elem[1]
def find_best_col(ready_rows, ready_cols, rows, cols, index):
row_dist = ready_rows[index][1]
best = 0
idx = 0
idx_to_change = []
for j in range(len(cols)):
col_dist = ready_cols[j][1]
test_col = cols[j].copy()
test_col[index] = int(not test_col[index])
test_row = rows[index].copy()
test_row[j] = int(not test_row[j])
new_row_dist = opt_dist(test_row, row_numbers[index])
new_col_dist = opt_dist(test_col, col_numbers[j])
change_indicator = (row_dist - new_row_dist) + (col_dist - new_col_dist)
if change_indicator > best:
best = change_indicator
idx_to_change = [j]
elif change_indicator == best:
idx_to_change.append(j)
return idx_to_change[random.randint(0,len(idx_to_change)-1)]
def find_best_row(ready_rows, ready_cols, rows, cols, index):
col_dist = ready_cols[index][1]
best = 0
idx = 0
idx_to_change = []
for i in range(len(rows)):
row_dist = ready_rows[i][1]
test_row = rows[i].copy()
test_row[index] = int(not test_row[index])
test_col = cols[index].copy()
test_col[i] = int(not test_col[i])
new_row_dist = opt_dist(test_row, row_numbers[i])
new_col_dist = opt_dist(test_col, col_numbers[index])
change_indicator = (row_dist - new_row_dist) + (col_dist - new_col_dist)
if change_indicator > best:
best = change_indicator
idx_to_change = [i]
elif change_indicator == best:
idx_to_change.append(i)
return idx_to_change[random.randint(0,len(idx_to_change)-1)]
def solve():
global row_numbers
global col_numbers
max_changes = row_size * col_size * 2
while True:
rows, cols, ready_rows, ready_cols = init_grid()
print("STARTING NONOGRAM:")
print_nonogram(rows)
for i in range(max_changes):
bad_rows = [(index, "row") for index in range(len(ready_rows)) if ready_rows[index][0] == 0]
bad_cols = [(index, "col") for index in range(len(ready_cols)) if ready_cols[index][0] == 0]
bad_rows_and_cols = bad_cols + bad_rows
if len(bad_rows_and_cols) == 0:
print("ENDING NONOGRAM:")
print_nonogram(rows)
print_nonogram_to_output(rows)
return 0
if len(bad_rows) != 0:
(index, row_or_col) = bad_rows[random.randint(0,len(bad_rows)-1)]
else:
(index, row_or_col) = bad_rows_and_cols[random.randint(0, len(bad_rows_and_cols) - 1)]
if(row_or_col == "col"):
row = find_best_row(ready_rows,ready_cols,rows,cols,index)
cols[index][row] = int(not cols[index][row])
rows[row][index] = int(not rows[row][index])
update_ready_rows_cols(rows, ready_rows, cols, ready_cols, row, index)
else:
col = find_best_col(ready_rows,ready_cols,rows,cols,index)
rows[index][col] = int(not rows[index][col])
cols[col][index] = int(not cols[col][index])
update_ready_rows_cols(rows, ready_rows, cols, ready_cols, index, col)
print("At least i tried")
print_nonogram(rows)
solve()
| true
| true
|
1c44b1733cd51ce1847605ee462170363bfdd49d
| 1,671
|
py
|
Python
|
post/models.py
|
bluesky0960/MiniProject1-DjangoWebApp_n
|
368457dfde8ba6601b82ff218aa3bb3eed639a5a
|
[
"MIT"
] | null | null | null |
post/models.py
|
bluesky0960/MiniProject1-DjangoWebApp_n
|
368457dfde8ba6601b82ff218aa3bb3eed639a5a
|
[
"MIT"
] | null | null | null |
post/models.py
|
bluesky0960/MiniProject1-DjangoWebApp_n
|
368457dfde8ba6601b82ff218aa3bb3eed639a5a
|
[
"MIT"
] | null | null | null |
from email.policy import default
from django.db import models
from ckeditor.fields import RichTextField
from tagging.fields import TagField
from django.conf import settings
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
author = models.CharField(max_length=200, default=None)
subject = models.CharField(max_length=200)
description = models.TextField(blank=True)
content = RichTextField()
register_date = models.DateTimeField()
solved = models.BooleanField(default=False)
tags = TagField()
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="like_posts", blank=True)
def __str__(self):
return self.subject
@property
def get_comment_cnt(self):
cmt_cnt = Comment.objects.filter(post = self.pk)
return len(cmt_cnt)
@property
def get_tag_list(self):
return self.tags.split(', ')
@property
def get_like_cnt(self):
return len(self.likes.all())
def is_like_user(self, user):
return self.likes.filter(pk=user.pk).exists()
class Meta:
ordering=['-register_date']
class Comment(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
author = models.CharField(max_length=200, default=None)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
description = models.TextField(blank=True)
content = models.TextField(blank=True)
register_date = models.DateTimeField()
choice = models.BooleanField(default=False)
check = models.BooleanField(default=False)
| 34.102041
| 100
| 0.701376
|
from email.policy import default
from django.db import models
from ckeditor.fields import RichTextField
from tagging.fields import TagField
from django.conf import settings
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
author = models.CharField(max_length=200, default=None)
subject = models.CharField(max_length=200)
description = models.TextField(blank=True)
content = RichTextField()
register_date = models.DateTimeField()
solved = models.BooleanField(default=False)
tags = TagField()
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="like_posts", blank=True)
def __str__(self):
return self.subject
@property
def get_comment_cnt(self):
cmt_cnt = Comment.objects.filter(post = self.pk)
return len(cmt_cnt)
@property
def get_tag_list(self):
return self.tags.split(', ')
@property
def get_like_cnt(self):
return len(self.likes.all())
def is_like_user(self, user):
return self.likes.filter(pk=user.pk).exists()
class Meta:
ordering=['-register_date']
class Comment(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
author = models.CharField(max_length=200, default=None)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
description = models.TextField(blank=True)
content = models.TextField(blank=True)
register_date = models.DateTimeField()
choice = models.BooleanField(default=False)
check = models.BooleanField(default=False)
| true
| true
|
1c44b2b95aa8c78f49081c3ed36d75f8f3b16d67
| 4,321
|
py
|
Python
|
data/process_data.py
|
akiaohk/Udacity-Disaster-Response-Pipelines
|
9a6042a0d288381c1310de1948121bccf647f418
|
[
"RSA-MD"
] | null | null | null |
data/process_data.py
|
akiaohk/Udacity-Disaster-Response-Pipelines
|
9a6042a0d288381c1310de1948121bccf647f418
|
[
"RSA-MD"
] | null | null | null |
data/process_data.py
|
akiaohk/Udacity-Disaster-Response-Pipelines
|
9a6042a0d288381c1310de1948121bccf647f418
|
[
"RSA-MD"
] | null | null | null |
"""
Project: Disaster Response Pipeline
Script Syntax for execution:
> python process_data.py <path to messages csv file> <path to categories csv file> <path to sqllite destination db>
> python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db
"""
# Import libraries
import sys
import pandas as pd
import sqlite3
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Load messages and categories datasets and merge using common id function.
Arguments:
messages_filepath -> csv path of file containing messages
categories_filepath -> csv path of file containing categories
Output:
df -> combined dataset of messages and categories
"""
# load messages dataset
messages = pd.read_csv(messages_filepath)
# load categories dataset
categories = pd.read_csv(categories_filepath)
# merge datasets
df = messages.merge(categories, on = ['id'])
return df
def clean_data(df):
"""
Clean Categories Data Function
Arguments:
df -> combined data containing messages and categories
Outputs:
df -> combined data containing messages and categories with categories cleaned up
"""
# split the values in the categories column on the ';'
categories = df.categories.str.split(';', expand=True)
# use the first row of categories dataframe to create column names for the categories data
row = categories.iloc[0]
category_colnames = row.map(lambda x: str(x)[:-2])
categories.columns = category_colnames
# convert category values to just numbers 0 or 1
for column in categories:
categories[column] = pd.Series([str(x)[-1] for x in categories[column]])
categories[column] = categories[column].astype(int)
# replace categories column in df with the new category columns
df.drop(columns=['categories'], inplace=True)
df = pd.concat([df, categories], axis=1)
df.drop_duplicates(inplace=True)
# Remove child_alone as it has all zeros
df = df.drop(['child_alone'],axis=1)
# There is a category 2 in 'related' column. This could be an error.
# In the absense of any information, we assume it to be 1 as the majority class.
df['related']=df['related'].map(lambda x: 1 if x == 2 else x)
return df
def save_data(df, database_filename):
"""
Save the clean dataset into an sqlite database function.
Arguments:
df -> combined dataset of messages and categories cleaned
database_filename -> path to SQLite database
"""
# save the clean dataset into an sqlite database
engine = create_engine('sqlite:///' + database_filename)
conn = sqlite3.connect('data/DisasterResponse.db')
#df.to_sql('disaster_response_clean', con = conn, if_exists='replace', index=False)
table_name = database_filename.replace(".db","") + "_table"
df.to_sql(table_name, con = conn, if_exists='replace', index=False)
def main():
"""
Main function which will kick off the data processing functions. There are three primary actions taken by this function:
1) Load Messages Data with Categories
2) Clean Categories Data
3) Save Data to SQLite Database
"""
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 35.130081
| 124
| 0.674612
|
import sys
import pandas as pd
import sqlite3
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories, on = ['id'])
return df
def clean_data(df):
categories = df.categories.str.split(';', expand=True)
row = categories.iloc[0]
category_colnames = row.map(lambda x: str(x)[:-2])
categories.columns = category_colnames
for column in categories:
categories[column] = pd.Series([str(x)[-1] for x in categories[column]])
categories[column] = categories[column].astype(int)
df.drop(columns=['categories'], inplace=True)
df = pd.concat([df, categories], axis=1)
df.drop_duplicates(inplace=True)
df = df.drop(['child_alone'],axis=1)
df['related']=df['related'].map(lambda x: 1 if x == 2 else x)
return df
def save_data(df, database_filename):
engine = create_engine('sqlite:///' + database_filename)
conn = sqlite3.connect('data/DisasterResponse.db')
table_name = database_filename.replace(".db","") + "_table"
df.to_sql(table_name, con = conn, if_exists='replace', index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| true
| true
|
1c44b2d75035252cf0824ee80a4607398785d535
| 148
|
py
|
Python
|
tests/context.py
|
tetio/green_snake
|
014d5cf4c96858abb09ee1a4bda0ee84b80b5666
|
[
"BSD-2-Clause"
] | null | null | null |
tests/context.py
|
tetio/green_snake
|
014d5cf4c96858abb09ee1a4bda0ee84b80b5666
|
[
"BSD-2-Clause"
] | null | null | null |
tests/context.py
|
tetio/green_snake
|
014d5cf4c96858abb09ee1a4bda0ee84b80b5666
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import green_snake
| 21.142857
| 82
| 0.689189
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import green_snake
| true
| true
|
1c44b32a5d9cf46b962dd89bdbf7c6993962dd46
| 9,077
|
py
|
Python
|
convokit/politeness_collections/politeness_api/features/politeness_strategies.py
|
CornellNLP/Cornell-Conversational-Analysis-Toolkit
|
0bc4d1a4baf25eec0861440dc3166c60d4cbe339
|
[
"MIT"
] | 371
|
2016-07-19T22:10:13.000Z
|
2022-03-28T08:04:32.000Z
|
convokit/politeness_collections/politeness_api/features/politeness_strategies.py
|
CornellNLP/Cornell-Conversational-Analysis-Toolkit
|
0bc4d1a4baf25eec0861440dc3166c60d4cbe339
|
[
"MIT"
] | 92
|
2017-07-25T22:04:11.000Z
|
2022-03-29T13:46:07.000Z
|
convokit/politeness_collections/politeness_api/features/politeness_strategies.py
|
CornellNLP/Cornell-Conversational-Analysis-Toolkit
|
0bc4d1a4baf25eec0861440dc3166c60d4cbe339
|
[
"MIT"
] | 105
|
2016-07-04T15:04:53.000Z
|
2022-03-30T01:36:38.000Z
|
import pkg_resources
import os
#####
# Word lists
hedges = [
"think", "thought", "thinking", "almost",
"apparent", "apparently", "appear", "appeared", "appears", "approximately", "around",
"assume", "assumed", "certain amount", "certain extent", "certain level", "claim",
"claimed", "doubt", "doubtful", "essentially", "estimate",
"estimated", "feel", "felt", "frequently", "from our perspective", "generally", "guess",
"in general", "in most cases", "in most instances", "in our view", "indicate", "indicated",
"largely", "likely", "mainly", "may", "maybe", "might", "mostly", "often", "on the whole",
"ought", "perhaps", "plausible", "plausibly", "possible", "possibly", "postulate",
"postulated", "presumable", "probable", "probably", "relatively", "roughly", "seems",
"should", "sometimes", "somewhat", "suggest", "suggested", "suppose", "suspect", "tend to",
"tends to", "typical", "typically", "uncertain", "uncertainly", "unclear", "unclearly",
"unlikely", "usually", "broadly", "tended to", "presumably", "suggests",
"from this perspective", "from my perspective", "in my view", "in this view", "in our opinion",
"in my opinion", "to my knowledge", "fairly", "quite", "rather", "argue", "argues", "argued",
"claims", "feels", "indicates", "supposed", "supposes", "suspects", "postulates"
]
# Positive and negative words from Liu
pos_filename = pkg_resources.resource_filename("convokit",
os.path.join("data", "liu-positive-words.txt"))
neg_filename = pkg_resources.resource_filename("convokit",
os.path.join("data", "liu-negative-words.txt"))
positive_words = set(map(lambda x: x.strip(), open(pos_filename).read().splitlines()))
negative_words = set(map(lambda x: x.strip(), open(neg_filename, encoding="ISO-8859-1").read().splitlines()))
#####
# Lambda Functions
please = lambda p: check_word([{"tok":"_"}] + p[1:], ["please"])
please.__name__ = "Please"
please_start = lambda p: check_word_at(p, 0, tok=["please"])
please_start.__name__ = "Please start"
has_hedge = lambda p: check_word(p, tok=hedges)
has_hedge.__name__ = "HASHEDGE"
btw = lambda p: check_word_at(p, 2, up_tok=["by"], tok=["way"], dep=["pobj"])
btw.__name__ = "Indirect (btw)"
hashedges = lambda p: check_word(p, dep=["nsubj"], up_tok=hedges)
hashedges.__name__ = "Hedges"
factuality = lambda p: combine_results([check_word(p, up_tok=["in"], tok=["fact"], dep=["pobj"]),
check_word(p, tok=["the"], up_tok=["point", "reality", "truth"], dep=["det"], precede=["point", "reality", "truth"]),
check_word(p, tok=["really","actually","honestly","surely"])])
factuality.__name__ = "Factuality"
deference = lambda p: check_word_at(p, 0, tok=["great","good","nice","good","interesting","cool","excellent","awesome"])
deference.__name__ = "Deference"
gratitude = lambda p: combine_results([check_word(p, tok=["thank","thanks"]), check_word(p, tok=["i"], up_tok=["appreciate"])])
gratitude.__name__ = "Gratitude"
apologize = lambda p: combine_results([check_word(p, tok=["sorry","woops","oops"]),
check_word(p, tok=["i"], up_tok=["apologize"], dep=["nsubj"]),
check_word(p, tok=["me"], up_tok=["forgive", "excuse"], dep=["dobj"])])
apologize.__name__ = "Apologizing"
groupidentity = lambda p: check_word(p, tok=["we", "our", "us", "ourselves"])
groupidentity.__name__ = "1st person pl."
firstperson = lambda p: check_word([{"tok":"_"}] + p[1:], tok= ["i", "my", "mine", "myself"])
firstperson.__name__ = "1st person"
firstperson_start = lambda p: check_word_at(p, 0, tok=["i","my","mine","myself"])
firstperson_start.__name__ = "1st person start"
secondperson = lambda p: check_word([{"tok":"_"}] + p[1:], tok= ["you","your","yours","yourself"])
secondperson.__name__ = "2nd person"
secondperson_start = lambda p: check_word_at(p, 0, tok=["you","your","yours","yourself"])
secondperson_start.__name__ = "2nd person start"
hello = lambda p: check_word_at(p, 0, tok=["hi","hello","hey"])
hello.__name__ = "Indirect (greeting)"
why = lambda p: check_word(p[:2], tok=["what","why","who","how"])
why.__name__ = "Direct question"
conj = lambda p: check_word_at(p, 0, tok=["so","then","and","but","or"])
conj.__name__ = "Direct start"
has_positive = lambda p: check_word(p, tok=positive_words)
has_positive.__name__ = "HASPOSITIVE"
has_negative = lambda p: check_word(p, tok=negative_words)
has_negative.__name__ = "HASNEGATIVE"
subjunctive = lambda p: check_word(p, tok=["could", "would"], precede=["you"])
subjunctive.__name__ = "SUBJUNCTIVE"
indicative = lambda p: check_word(p, tok=["can", "will"], precede=["you"])
indicative.__name__ = "INDICATIVE"
#####
# Helper functions and variables
def combine_results(lst):
"""
combines list of results
ex: [[1, ["hey", 0]], [0,[]], [1, ["you", 1]]] -> [1, [["hey", 0],["you", 1]]]
"""
a = 0; b = []
for x in lst:
a = max(a, x[0])
if x[1] != []:
b += x[1]
return a, b
def check_word_at(p, ind, tok = None, dep = None, up_tok = None, up_dep = None, precede = None):
"""
Returns an indicator and a marker
If parameters match word at index:
returns 1, [tok at ind, ind]
Else:
returns 0, []
"""
if len(p) <= ind:
return 0, []
if tok != None and p[ind]["tok"].lower() not in tok:
return 0, []
if dep != None and p[ind]["dep"] not in dep:
return 0, []
if up_tok != None and ("up" not in p[ind] or p[p[ind]["up"]]["tok"].lower() not in up_tok):
return 0, []
if up_dep != None and p[p[ind]["up"]]["dep"] not in up_dep:
return 0, []
if precede != None and (len(p) <= ind + 1 or p[ind+1]["tok"] not in precede):
return 0, []
return 1, [[(p[ind]["tok"], ind)]]
def check_word(p, tok = None, dep = None, up_tok = None, up_dep = None, precede = None):
"""
Returns an indicator and a marker
If parameters match any word in the sentence:
returns 1, markers for each occurance
Else:
returns 0, []
"""
toks = []
for ind, x in enumerate(p):
if tok != None and x["tok"].lower() not in tok:
continue
if dep != None and x["dep"] not in dep:
continue
if up_tok != None and ("up" not in x or p[x["up"]]["tok"].lower() not in up_tok):
continue
if up_dep != None and p[x["up"]]["dep"] not in up_dep:
continue
if precede != None and (len(p) <= ind + 1 or p[ind + 1]["tok"] not in precede):
continue
if up_tok != None:
toks += [[(x["tok"], ind), (p[x["up"]]["tok"].lower() , x["up"])]]
else:
toks += [[(x["tok"], ind)]]
if toks != []:
return 1, toks
else:
return 0, []
# Feature function list
F = [please, please_start, has_hedge, btw, hashedges, factuality, deference, gratitude, apologize, groupidentity,
firstperson, firstperson_start, secondperson, secondperson_start, hello, why, conj, has_positive, has_negative,
subjunctive, indicative]
fnc2feature_name = lambda f, keys: [key + "_==%s==" % f.__name__.replace(" ","_") for key in keys]
def get_politeness_strategy_features(parses):
"""
:param utt- the utterance to be processed
:type utterance- Object with attributes including text and meta
utt.meta is a dictionary with the following form:
{
parsed: [
{ 'rt': 5
'toks': [{'dep': 'intj', 'dn': [], 'tag': 'UH', 'tok': 'hello', 'up': 2}, #sent 1, word 1
... {sent 1 word 2} ,{sent 1 word 3}...]
},
{ 'rt': 12
'toks': [{'dep': 'nsubj', 'dn': [], 'tag': 'PRP', 'tok': 'i', 'up': 1}, # sent 2, word 1
{'dep': 'ROOT', 'dn': [0, 2, 3], 'tag': 'VBP', 'tok': 'need'},
...]
}
]
}
Returns- feature dictionary and marker dictionary
feature dictionary:
{
feature_name: 1 or 0
}
marker dictionary:
{
marker_name: list of [token, sentence index, word index]
}
"""
#build dictionary
features = {}
markers = {}
for fnc in F:
f = fnc2feature_name(fnc, ["feature_politeness", "politeness_markers"])
features[f[0]] = 0
markers[f[1]] = []
# runs lambda functions
for sent_ind, sentence in enumerate(parses):
for fnc in F:
feature, marker = fnc(sentence)
f = fnc2feature_name(fnc, ["feature_politeness", "politeness_markers"])
features[f[0]] = max(features[f[0]], feature)
# adds sent_ind to marker information
if len(marker) > 0:
for occ in marker:
markers[f[1]] += [[(mark[0], sent_ind, mark[1]) for mark in occ]]
return features, markers
| 39.125
| 157
| 0.577393
|
import pkg_resources
import os
= [
"think", "thought", "thinking", "almost",
"apparent", "apparently", "appear", "appeared", "appears", "approximately", "around",
"assume", "assumed", "certain amount", "certain extent", "certain level", "claim",
"claimed", "doubt", "doubtful", "essentially", "estimate",
"estimated", "feel", "felt", "frequently", "from our perspective", "generally", "guess",
"in general", "in most cases", "in most instances", "in our view", "indicate", "indicated",
"largely", "likely", "mainly", "may", "maybe", "might", "mostly", "often", "on the whole",
"ought", "perhaps", "plausible", "plausibly", "possible", "possibly", "postulate",
"postulated", "presumable", "probable", "probably", "relatively", "roughly", "seems",
"should", "sometimes", "somewhat", "suggest", "suggested", "suppose", "suspect", "tend to",
"tends to", "typical", "typically", "uncertain", "uncertainly", "unclear", "unclearly",
"unlikely", "usually", "broadly", "tended to", "presumably", "suggests",
"from this perspective", "from my perspective", "in my view", "in this view", "in our opinion",
"in my opinion", "to my knowledge", "fairly", "quite", "rather", "argue", "argues", "argued",
"claims", "feels", "indicates", "supposed", "supposes", "suspects", "postulates"
]
pos_filename = pkg_resources.resource_filename("convokit",
os.path.join("data", "liu-positive-words.txt"))
neg_filename = pkg_resources.resource_filename("convokit",
os.path.join("data", "liu-negative-words.txt"))
positive_words = set(map(lambda x: x.strip(), open(pos_filename).read().splitlines()))
negative_words = set(map(lambda x: x.strip(), open(neg_filename, encoding="ISO-8859-1").read().splitlines()))
= lambda p: check_word([{"tok":"_"}] + p[1:], ["please"])
please.__name__ = "Please"
please_start = lambda p: check_word_at(p, 0, tok=["please"])
please_start.__name__ = "Please start"
has_hedge = lambda p: check_word(p, tok=hedges)
has_hedge.__name__ = "HASHEDGE"
btw = lambda p: check_word_at(p, 2, up_tok=["by"], tok=["way"], dep=["pobj"])
btw.__name__ = "Indirect (btw)"
hashedges = lambda p: check_word(p, dep=["nsubj"], up_tok=hedges)
hashedges.__name__ = "Hedges"
factuality = lambda p: combine_results([check_word(p, up_tok=["in"], tok=["fact"], dep=["pobj"]),
check_word(p, tok=["the"], up_tok=["point", "reality", "truth"], dep=["det"], precede=["point", "reality", "truth"]),
check_word(p, tok=["really","actually","honestly","surely"])])
factuality.__name__ = "Factuality"
deference = lambda p: check_word_at(p, 0, tok=["great","good","nice","good","interesting","cool","excellent","awesome"])
deference.__name__ = "Deference"
gratitude = lambda p: combine_results([check_word(p, tok=["thank","thanks"]), check_word(p, tok=["i"], up_tok=["appreciate"])])
gratitude.__name__ = "Gratitude"
apologize = lambda p: combine_results([check_word(p, tok=["sorry","woops","oops"]),
check_word(p, tok=["i"], up_tok=["apologize"], dep=["nsubj"]),
check_word(p, tok=["me"], up_tok=["forgive", "excuse"], dep=["dobj"])])
apologize.__name__ = "Apologizing"
groupidentity = lambda p: check_word(p, tok=["we", "our", "us", "ourselves"])
groupidentity.__name__ = "1st person pl."
firstperson = lambda p: check_word([{"tok":"_"}] + p[1:], tok= ["i", "my", "mine", "myself"])
firstperson.__name__ = "1st person"
firstperson_start = lambda p: check_word_at(p, 0, tok=["i","my","mine","myself"])
firstperson_start.__name__ = "1st person start"
secondperson = lambda p: check_word([{"tok":"_"}] + p[1:], tok= ["you","your","yours","yourself"])
secondperson.__name__ = "2nd person"
secondperson_start = lambda p: check_word_at(p, 0, tok=["you","your","yours","yourself"])
secondperson_start.__name__ = "2nd person start"
hello = lambda p: check_word_at(p, 0, tok=["hi","hello","hey"])
hello.__name__ = "Indirect (greeting)"
why = lambda p: check_word(p[:2], tok=["what","why","who","how"])
why.__name__ = "Direct question"
conj = lambda p: check_word_at(p, 0, tok=["so","then","and","but","or"])
conj.__name__ = "Direct start"
has_positive = lambda p: check_word(p, tok=positive_words)
has_positive.__name__ = "HASPOSITIVE"
has_negative = lambda p: check_word(p, tok=negative_words)
has_negative.__name__ = "HASNEGATIVE"
subjunctive = lambda p: check_word(p, tok=["could", "would"], precede=["you"])
subjunctive.__name__ = "SUBJUNCTIVE"
indicative = lambda p: check_word(p, tok=["can", "will"], precede=["you"])
indicative.__name__ = "INDICATIVE"
bine_results(lst):
a = 0; b = []
for x in lst:
a = max(a, x[0])
if x[1] != []:
b += x[1]
return a, b
def check_word_at(p, ind, tok = None, dep = None, up_tok = None, up_dep = None, precede = None):
if len(p) <= ind:
return 0, []
if tok != None and p[ind]["tok"].lower() not in tok:
return 0, []
if dep != None and p[ind]["dep"] not in dep:
return 0, []
if up_tok != None and ("up" not in p[ind] or p[p[ind]["up"]]["tok"].lower() not in up_tok):
return 0, []
if up_dep != None and p[p[ind]["up"]]["dep"] not in up_dep:
return 0, []
if precede != None and (len(p) <= ind + 1 or p[ind+1]["tok"] not in precede):
return 0, []
return 1, [[(p[ind]["tok"], ind)]]
def check_word(p, tok = None, dep = None, up_tok = None, up_dep = None, precede = None):
toks = []
for ind, x in enumerate(p):
if tok != None and x["tok"].lower() not in tok:
continue
if dep != None and x["dep"] not in dep:
continue
if up_tok != None and ("up" not in x or p[x["up"]]["tok"].lower() not in up_tok):
continue
if up_dep != None and p[x["up"]]["dep"] not in up_dep:
continue
if precede != None and (len(p) <= ind + 1 or p[ind + 1]["tok"] not in precede):
continue
if up_tok != None:
toks += [[(x["tok"], ind), (p[x["up"]]["tok"].lower() , x["up"])]]
else:
toks += [[(x["tok"], ind)]]
if toks != []:
return 1, toks
else:
return 0, []
F = [please, please_start, has_hedge, btw, hashedges, factuality, deference, gratitude, apologize, groupidentity,
firstperson, firstperson_start, secondperson, secondperson_start, hello, why, conj, has_positive, has_negative,
subjunctive, indicative]
fnc2feature_name = lambda f, keys: [key + "_==%s==" % f.__name__.replace(" ","_") for key in keys]
def get_politeness_strategy_features(parses):
features = {}
markers = {}
for fnc in F:
f = fnc2feature_name(fnc, ["feature_politeness", "politeness_markers"])
features[f[0]] = 0
markers[f[1]] = []
for sent_ind, sentence in enumerate(parses):
for fnc in F:
feature, marker = fnc(sentence)
f = fnc2feature_name(fnc, ["feature_politeness", "politeness_markers"])
features[f[0]] = max(features[f[0]], feature)
if len(marker) > 0:
for occ in marker:
markers[f[1]] += [[(mark[0], sent_ind, mark[1]) for mark in occ]]
return features, markers
| true
| true
|
1c44b3943158b819f95a2467b6baf9a67b1af264
| 4,385
|
py
|
Python
|
33RL/02maze/maze_env.py
|
cheerfulwang/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | 2
|
2021-01-04T10:44:44.000Z
|
2022-02-13T07:53:41.000Z
|
33RL/02maze/maze_env.py
|
zm79287/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | null | null | null |
33RL/02maze/maze_env.py
|
zm79287/python-tutorial
|
d0f7348e1da4ff954e3add66e1aae55d599283ee
|
[
"Apache-2.0"
] | 2
|
2020-11-23T08:58:51.000Z
|
2022-02-13T07:53:42.000Z
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
"""
Reinforcement learning maze example.
Red rectangle: explorer.
Black rectangles: hells [reward = -1].
Yellow bin circle: paradise [reward = +1].
All other states: ground [reward = 0].
This script is the environment part of this example. The RL is in RL_brain.py.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
"""
import numpy as np
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
UNIT = 40 # pixels
MAZE_H = 4 # grid height
MAZE_W = 4 # grid width
class Maze(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(
hell2_center[0] - 15, hell2_center[1] - 15,
hell2_center[0] + 15, hell2_center[1] + 15,
fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.5)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
s_ = self.canvas.coords(self.rect) # next state
# reward function
if s_ == self.canvas.coords(self.oval):
reward = 1
done = True
s_ = 'terminal'
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
reward = -1
done = True
s_ = 'terminal'
else:
reward = 0
done = False
return s_, reward, done
def render(self):
time.sleep(0.1)
self.update()
def update():
for t in range(10):
s = env.reset()
while True:
env.render()
a = 1
s, r, done = env.step(a)
if done:
break
if __name__ == '__main__':
env = Maze()
env.after(100, update)
env.mainloop()
| 29.039735
| 84
| 0.51927
|
import numpy as np
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
UNIT = 40
MAZE_H = 4
MAZE_W = 4
class Maze(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
origin = np.array([20, 20])
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(
hell2_center[0] - 15, hell2_center[1] - 15,
hell2_center[0] + 15, hell2_center[1] + 15,
fill='black')
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.5)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0:
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1:
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2:
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3:
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1])
s_ = self.canvas.coords(self.rect)
if s_ == self.canvas.coords(self.oval):
reward = 1
done = True
s_ = 'terminal'
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
reward = -1
done = True
s_ = 'terminal'
else:
reward = 0
done = False
return s_, reward, done
def render(self):
time.sleep(0.1)
self.update()
def update():
for t in range(10):
s = env.reset()
while True:
env.render()
a = 1
s, r, done = env.step(a)
if done:
break
if __name__ == '__main__':
env = Maze()
env.after(100, update)
env.mainloop()
| true
| true
|
1c44b5b492fbfda587fd4612218f612dc17333a7
| 1,124
|
py
|
Python
|
fuzzysort.py
|
TylerZeroMaster/Fuzzysorting
|
fc894707dd3af001e809fcdad83170b1963fbab4
|
[
"MIT"
] | null | null | null |
fuzzysort.py
|
TylerZeroMaster/Fuzzysorting
|
fc894707dd3af001e809fcdad83170b1963fbab4
|
[
"MIT"
] | null | null | null |
fuzzysort.py
|
TylerZeroMaster/Fuzzysorting
|
fc894707dd3af001e809fcdad83170b1963fbab4
|
[
"MIT"
] | null | null | null |
from random import randint
from time import time
from array import array
def clock():
start = time()
while 1:
yield time() - start
def bubble_sort(a):
n = len(a)
while n != 0:
newn = 0
i = 1
while i < n:
fir = a[i - 1]
if fir > a[i]:
a[i - 1] = a[i]
a[i] = fir
newn = i
i += 1
n = newn
def fuzzy_sort(a):
L = len(a)
m = (L - 1) / float(max(a))
sorted = [None] * L
for x in a:
index = int(m * x)
if sorted[index]:
sorted[index].append(x)
else:
sorted[index] = array('i', [x])
for i in range(L):
s = sorted[i]
if s:
bubble_sort(s)
sorted.extend(s)
return sorted[L:]
def main():
a = [randint(0, 100000) for i in range(1000000)]
print("Sorting...")
timer = clock()
timer.next()
a = fuzzy_sort(a)
print("Sorted %d items in %f seconds" % (len(a), timer.next()))
print('\t '.join(map(str, a[:1000])))
if __name__ == "__main__":
main()
| 22.039216
| 67
| 0.456406
|
from random import randint
from time import time
from array import array
def clock():
start = time()
while 1:
yield time() - start
def bubble_sort(a):
n = len(a)
while n != 0:
newn = 0
i = 1
while i < n:
fir = a[i - 1]
if fir > a[i]:
a[i - 1] = a[i]
a[i] = fir
newn = i
i += 1
n = newn
def fuzzy_sort(a):
L = len(a)
m = (L - 1) / float(max(a))
sorted = [None] * L
for x in a:
index = int(m * x)
if sorted[index]:
sorted[index].append(x)
else:
sorted[index] = array('i', [x])
for i in range(L):
s = sorted[i]
if s:
bubble_sort(s)
sorted.extend(s)
return sorted[L:]
def main():
a = [randint(0, 100000) for i in range(1000000)]
print("Sorting...")
timer = clock()
timer.next()
a = fuzzy_sort(a)
print("Sorted %d items in %f seconds" % (len(a), timer.next()))
print('\t '.join(map(str, a[:1000])))
if __name__ == "__main__":
main()
| true
| true
|
1c44b6516f38798433717035c3debbc803b324b5
| 1,566
|
py
|
Python
|
src/rl/AtariAgent.py
|
fronovics/AI_playground
|
ac302c0694fa2182af343c257b28a033bc4cf5b9
|
[
"Apache-2.0"
] | null | null | null |
src/rl/AtariAgent.py
|
fronovics/AI_playground
|
ac302c0694fa2182af343c257b28a033bc4cf5b9
|
[
"Apache-2.0"
] | null | null | null |
src/rl/AtariAgent.py
|
fronovics/AI_playground
|
ac302c0694fa2182af343c257b28a033bc4cf5b9
|
[
"Apache-2.0"
] | null | null | null |
import random
import numpy as np
import gym
import cv2
from random import random
from src.rl.ReplayMemory import ReplayMemory
class AtariAgent(object):
def __init__(self, env: gym.Env, net, config):
self.mem = ReplayMemory(config)
self.env = env
self.net = net
self.eps = config['eps']
self.max_reward = -np.inf
self.buf_size = 4
self.state_buf = np.zeros(shape=(1, 84, 84, self.buf_size), dtype=int)
def act(self, env: gym.Env, s) -> int:
s = self._scale(s)
self.state_buf = np.roll(self.state_buf, shift=-1, axis=3)
self.state_buf[0, :, :, -1] = s
if random() > self.eps:
a = self.net.predict(self.state_buf)
else:
a = env.action_space.sample()
return a
def learn(self, s, a, r, ns, t):
s = self._scale(s)
self.mem.add(s, a, r, t)
if self.mem.count < self.mem.batch_size:
return
s, a, r, ns, t = self.mem.get_minibatch()
self.net.train(s, self._onehot_actions(a), r, ns, t)
def sync_target(self):
self.net.sync_target()
def reset(self):
self.state_buf = np.zeros(shape=(1, 84, 84, self.buf_size), dtype=int)
def _scale(self, s):
s = cv2.cvtColor(s, cv2.COLOR_BGR2GRAY)
return cv2.resize(s, (84, 84))
def _onehot_actions(self, actions):
size = len(actions)
onehot = np.zeros((size, self.env.action_space.n))
for i in range(size):
onehot[i, actions[i]] = 1
return onehot
| 27.964286
| 78
| 0.57599
|
import random
import numpy as np
import gym
import cv2
from random import random
from src.rl.ReplayMemory import ReplayMemory
class AtariAgent(object):
def __init__(self, env: gym.Env, net, config):
self.mem = ReplayMemory(config)
self.env = env
self.net = net
self.eps = config['eps']
self.max_reward = -np.inf
self.buf_size = 4
self.state_buf = np.zeros(shape=(1, 84, 84, self.buf_size), dtype=int)
def act(self, env: gym.Env, s) -> int:
s = self._scale(s)
self.state_buf = np.roll(self.state_buf, shift=-1, axis=3)
self.state_buf[0, :, :, -1] = s
if random() > self.eps:
a = self.net.predict(self.state_buf)
else:
a = env.action_space.sample()
return a
def learn(self, s, a, r, ns, t):
s = self._scale(s)
self.mem.add(s, a, r, t)
if self.mem.count < self.mem.batch_size:
return
s, a, r, ns, t = self.mem.get_minibatch()
self.net.train(s, self._onehot_actions(a), r, ns, t)
def sync_target(self):
self.net.sync_target()
def reset(self):
self.state_buf = np.zeros(shape=(1, 84, 84, self.buf_size), dtype=int)
def _scale(self, s):
s = cv2.cvtColor(s, cv2.COLOR_BGR2GRAY)
return cv2.resize(s, (84, 84))
def _onehot_actions(self, actions):
size = len(actions)
onehot = np.zeros((size, self.env.action_space.n))
for i in range(size):
onehot[i, actions[i]] = 1
return onehot
| true
| true
|
1c44b8d5c17ebb7a41d93d6912bd998e1d2841bf
| 4,284
|
py
|
Python
|
src/models/model_lightning.py
|
granatb/mlops_handin
|
b0992be9667bf7f1e226efd0174289327a548efb
|
[
"MIT"
] | null | null | null |
src/models/model_lightning.py
|
granatb/mlops_handin
|
b0992be9667bf7f1e226efd0174289327a548efb
|
[
"MIT"
] | null | null | null |
src/models/model_lightning.py
|
granatb/mlops_handin
|
b0992be9667bf7f1e226efd0174289327a548efb
|
[
"MIT"
] | null | null | null |
import os
import sys
from typing import Callable, List, Optional, Tuple, Union
import matplotlib.pyplot as plt # type: ignore
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from pytorch_lightning import loggers
from torch import nn
from torch.utils.data import Dataset
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import wandb
from data.make_dataset import MNISTdata
import torchdrift
import copy
class MyLightningModel(pl.LightningModule):
def __init__(self, hidden_size: int, output_size: int, drop_p: float = 0.3) -> None:
"""Builds a feedforward network with arbitrary hidden layers.
Arguments
---------
hidden_size: integer, size of dense layer
output_size: number of classes
drop_p: dropout rate
"""
super().__init__()
# Input to a hidden layer
self.num_classes = output_size
self.arch = nn.Sequential(
nn.Conv2d(
in_channels=1, out_channels=16, kernel_size=3, padding=1, stride=1
),
# convolution output dim (16, 28, 28)
nn.BatchNorm2d(16),
nn.MaxPool2d(kernel_size=2, stride=2),
# pooling output dim (16, 14, 14)
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=16, out_channels=8, kernel_size=5, padding=2),
nn.Dropout2d(p=drop_p),
# convolution output dim (8, 14, 14)
nn.MaxPool2d(kernel_size=2, stride=2),
# polling output dim (8, 7, 7)
nn.ReLU(inplace=True),
)
# fully connected output layers
# [(W−K+2P)/S]+1
self.fc1_features = 8 * 7 * 7
self.fc1 = nn.Linear(in_features=self.fc1_features, out_features=hidden_size)
self.fc2 = nn.Linear(in_features=hidden_size, out_features=self.num_classes)
def forward(self, x):
x = self.arch(x)
x = x.view(-1, self.fc1_features)
x = F.relu(self.fc1(x))
return F.log_softmax(self.fc2(x), dim=1)
def training_step(self, batch, batch_idx):
# training_step defines the train loop. It is independent of forward
images, labels = batch
x = self.arch(images)
x = x.view(-1, self.fc1_features)
x = F.relu(self.fc1(x))
x_hat = F.log_softmax(self.fc2(x), dim=1)
loss = F.nll_loss(x_hat, labels)
self.log("train_loss", loss)
self.logger.experiment.log({"logits": wandb.Histogram(x_hat.detach().numpy())})
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def validation_step(self, batch, batch_idx):
images, labels = batch
y_hat = self(images)
val_loss = F.nll_loss(y_hat, labels)
self.log("val_loss", val_loss)
return val_loss
def corruption_function(x: torch.Tensor):
return torchdrift.data.functional.gaussian_blur(x, severity=2)
def main():
train_data = torch.load("data/processed/train.pth")
test_data = torch.load("data/processed/test.pth")
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64, shuffle=True)
model = MyLightningModel(128, 10)
wd_logger = loggers.WandbLogger(name="test")
trainer = pl.Trainer(logger=wd_logger, max_epochs=5)
trainer.fit(model, trainloader, testloader)
inputs, _ = next(iter(testloader))
inputs_ood = corruption_function(inputs)
N = 6
model.eval()
inps = torch.cat([inputs[:N], inputs_ood[:N]])
model.cpu()
# predictions = model.predict(inps).max(1).indices
feature_extractor = copy.deepcopy(model)
feature_extractor.classifier = torch.nn.Identity()
drift_detector = torchdrift.detectors.KernelMMDDriftDetector()
torchdrift.utils.fit(trainloader, feature_extractor, drift_detector)
drift_detection_model = torch.nn.Sequential(
feature_extractor,
drift_detector
)
features = feature_extractor(inputs)
score = drift_detector(features)
p_val = drift_detector.compute_p_value(features)
print(f'score: {score}, p_val: {p_val}')
if __name__ == "__main__":
main()
| 31.733333
| 88
| 0.65056
|
import os
import sys
from typing import Callable, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from pytorch_lightning import loggers
from torch import nn
from torch.utils.data import Dataset
sys.path.insert(1, os.path.join(sys.path[0], ".."))
import wandb
from data.make_dataset import MNISTdata
import torchdrift
import copy
class MyLightningModel(pl.LightningModule):
def __init__(self, hidden_size: int, output_size: int, drop_p: float = 0.3) -> None:
super().__init__()
self.num_classes = output_size
self.arch = nn.Sequential(
nn.Conv2d(
in_channels=1, out_channels=16, kernel_size=3, padding=1, stride=1
),
nn.BatchNorm2d(16),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=16, out_channels=8, kernel_size=5, padding=2),
nn.Dropout2d(p=drop_p),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(inplace=True),
)
self.fc1_features = 8 * 7 * 7
self.fc1 = nn.Linear(in_features=self.fc1_features, out_features=hidden_size)
self.fc2 = nn.Linear(in_features=hidden_size, out_features=self.num_classes)
def forward(self, x):
x = self.arch(x)
x = x.view(-1, self.fc1_features)
x = F.relu(self.fc1(x))
return F.log_softmax(self.fc2(x), dim=1)
def training_step(self, batch, batch_idx):
images, labels = batch
x = self.arch(images)
x = x.view(-1, self.fc1_features)
x = F.relu(self.fc1(x))
x_hat = F.log_softmax(self.fc2(x), dim=1)
loss = F.nll_loss(x_hat, labels)
self.log("train_loss", loss)
self.logger.experiment.log({"logits": wandb.Histogram(x_hat.detach().numpy())})
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def validation_step(self, batch, batch_idx):
images, labels = batch
y_hat = self(images)
val_loss = F.nll_loss(y_hat, labels)
self.log("val_loss", val_loss)
return val_loss
def corruption_function(x: torch.Tensor):
return torchdrift.data.functional.gaussian_blur(x, severity=2)
def main():
train_data = torch.load("data/processed/train.pth")
test_data = torch.load("data/processed/test.pth")
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64, shuffle=True)
model = MyLightningModel(128, 10)
wd_logger = loggers.WandbLogger(name="test")
trainer = pl.Trainer(logger=wd_logger, max_epochs=5)
trainer.fit(model, trainloader, testloader)
inputs, _ = next(iter(testloader))
inputs_ood = corruption_function(inputs)
N = 6
model.eval()
inps = torch.cat([inputs[:N], inputs_ood[:N]])
model.cpu()
feature_extractor = copy.deepcopy(model)
feature_extractor.classifier = torch.nn.Identity()
drift_detector = torchdrift.detectors.KernelMMDDriftDetector()
torchdrift.utils.fit(trainloader, feature_extractor, drift_detector)
drift_detection_model = torch.nn.Sequential(
feature_extractor,
drift_detector
)
features = feature_extractor(inputs)
score = drift_detector(features)
p_val = drift_detector.compute_p_value(features)
print(f'score: {score}, p_val: {p_val}')
if __name__ == "__main__":
main()
| true
| true
|
1c44b965ae0544a2d5f9e1d40e2c8c42d789fbb0
| 13,724
|
py
|
Python
|
models/resnet.py
|
eyov7/CV_LTH_Pre-training-LLNL
|
bb18ba2093328aeb4e5ab3929f2749264ef3c981
|
[
"MIT"
] | 47
|
2020-12-15T03:40:50.000Z
|
2022-03-30T03:38:29.000Z
|
models/resnet.py
|
eyov7/CV_LTH_Pre-training-LLNL
|
bb18ba2093328aeb4e5ab3929f2749264ef3c981
|
[
"MIT"
] | null | null | null |
models/resnet.py
|
eyov7/CV_LTH_Pre-training-LLNL
|
bb18ba2093328aeb4e5ab3929f2749264ef3c981
|
[
"MIT"
] | 10
|
2021-03-17T01:28:57.000Z
|
2022-02-24T20:23:57.000Z
|
import torch
import torch.nn as nn
from advertorch.utils import NormalizeByChannelMeanStd
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.Identity()
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 38.659155
| 107
| 0.634509
|
import torch
import torch.nn as nn
from advertorch.utils import NormalizeByChannelMeanStd
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.Identity()
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| true
| true
|
1c44ba1d050a5c452efb2e0f80a6341a0cd138e5
| 13,908
|
py
|
Python
|
open_spiel/python/rl_environment.py
|
antonevenepoel/open_spiel
|
f2f0c786410018675fc40e9a5b82c40814555fa8
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/rl_environment.py
|
antonevenepoel/open_spiel
|
f2f0c786410018675fc40e9a5b82c40814555fa8
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/rl_environment.py
|
antonevenepoel/open_spiel
|
f2f0c786410018675fc40e9a5b82c40814555fa8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement Learning (RL) Environment for Open Spiel.
This module wraps Open Spiel Python interface providing an RL-friendly API. It
covers both turn-based and simultaneous move games. Interactions between agents
and the underlying game occur mostly through the `reset` and `step` methods,
which return a `TimeStep` structure (see its docstrings for more info).
The following example illustrates the interaction dynamics. Consider a 2-player
Kuhn Poker (turn-based game). Agents have access to the `observations` (a dict)
field from `TimeSpec`, containing the following members:
* `info_state`: list containing the game information state for each player. The
size of the list always correspond to the number of players. E.g.:
[[0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]].
* `legal_actions`: list containing legal action ID lists (one for each player).
E.g.: [[0, 1], [0]], which corresponds to actions 0 and 1 being valid for
player 0 (the 1st player) and action 0 being valid for player 1 (2nd player).
* `current_player`: zero-based integer representing the player to make a move.
At each `step` call, the environment expects a singleton list with the action
(as it's a turn-based game), e.g.: [1]. This (zero-based) action must correspond
to the player specified at `current_player`. The game (which is at decision
node) will process the action and take as many steps necessary to cover chance
nodes, halting at a new decision or final node. Finally, a new `TimeStep`is
returned to the agent.
Simultaneous-move games follow analogous dynamics. The only differences is the
environment expects a list of actions, one per player. Note the `current_player`
field is "irrelevant" here, admitting a constant value defined in spiel.h, which
defaults to -2 (module level constant `SIMULTANEOUS_PLAYER_ID`).
See open_spiel/python/examples/rl_example.py for example usages.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import enum
import numpy as np
import pyspiel
SIMULTANEOUS_PLAYER_ID = pyspiel.PlayerId.SIMULTANEOUS
class TimeStep(
collections.namedtuple(
"TimeStep", ["observations", "rewards", "discounts", "step_type"])):
"""Returned with every call to `step` and `reset`.
A `TimeStep` contains the data emitted by a game at each step of interaction.
A `TimeStep` holds an `observation` (list of dicts, one per player),
associated lists of `rewards`, `discounts` and a `step_type`.
The first `TimeStep` in a sequence will have `StepType.FIRST`. The final
`TimeStep` will have `StepType.LAST`. All other `TimeStep`s in a sequence will
have `StepType.MID.
Attributes:
observations: a list of dicts containing observations per player.
rewards: A list of scalars (one per player), or `None` if `step_type` is
`StepType.FIRST`, i.e. at the start of a sequence.
discounts: A list of discount values in the range `[0, 1]` (one per player),
or `None` if `step_type` is `StepType.FIRST`.
step_type: A `StepType` enum value.
"""
__slots__ = ()
def first(self):
return self.step_type == StepType.FIRST
def mid(self):
return self.step_type == StepType.MID
def last(self):
return self.step_type == StepType.LAST
def is_simultaneous_move(self):
return self.observations["current_player"] == SIMULTANEOUS_PLAYER_ID
def current_player(self):
return self.observations["current_player"]
class StepType(enum.Enum):
"""Defines the status of a `TimeStep` within a sequence."""
FIRST = 0 # Denotes the first `TimeStep` in a sequence.
MID = 1 # Denotes any `TimeStep` in a sequence that is not FIRST or LAST.
LAST = 2 # Denotes the last `TimeStep` in a sequence.
def first(self):
return self is StepType.FIRST
def mid(self):
return self is StepType.MID
def last(self):
return self is StepType.LAST
# Global pyspiel members
def registered_games():
return pyspiel.registered_games()
class ChanceEventSampler(object):
"""Default sampler for external chance events."""
def __init__(self, seed=None):
self._rng = np.random.RandomState(seed)
def __call__(self, state):
"""Sample a chance event in the given state."""
actions, probs = zip(*state.chance_outcomes())
return self._rng.choice(actions, p=probs)
class ObservationType(enum.Enum):
"""Defines what kind of observation to use."""
OBSERVATION = 0 # Use observation_tensor
INFORMATION_STATE = 1 # Use information_state_tensor
class Environment(object):
"""Open Spiel reinforcement learning environment class."""
def __init__(self,
game,
discount=1.0,
chance_event_sampler=None,
observation_type=None,
**kwargs):
"""Constructor.
Args:
game: [string, pyspiel.Game] Open Spiel game name or game instance.
discount: float, discount used in non-initial steps. Defaults to 1.0.
chance_event_sampler: optional object with `sample_external_events` method
to sample chance events.
observation_type: what kind of observation to use. If not specified, will
default to INFORMATION_STATE unless the game doesn't provide it.
**kwargs: dict, additional settings passed to the Open Spiel game.
"""
self._chance_event_sampler = chance_event_sampler or ChanceEventSampler()
if isinstance(game, pyspiel.Game):
logging.info("Using game instance: %s", game.get_type().short_name)
self._game = game
elif kwargs:
game_settings = {
key: pyspiel.GameParameter(val) for (key, val) in kwargs.items()
}
logging.info("Using game settings: %s", game_settings)
self._game = pyspiel.load_game(game, game_settings)
else:
logging.info("Using game string: %s", game)
self._game = pyspiel.load_game(game)
self._num_players = self._game.num_players()
self._state = None
self._should_reset = True
# Discount returned at non-initial steps.
self._discounts = [discount] * self._num_players
# Determine what observation type to use.
if observation_type is None:
if self._game.get_type().provides_information_state_tensor:
observation_type = ObservationType.INFORMATION_STATE
else:
observation_type = ObservationType.OBSERVATION
# Check the requested observation type is supported.
if observation_type == ObservationType.OBSERVATION:
if not self._game.get_type().provides_observation_tensor:
raise ValueError("observation_tensor not supported by " + game)
elif observation_type == ObservationType.INFORMATION_STATE:
if not self._game.get_type().provides_information_state_tensor:
raise ValueError("information_state_tensor not supported by " + game)
self._use_observation = (observation_type == ObservationType.OBSERVATION)
def get_time_step(self):
"""Returns a `TimeStep` without updating the environment.
Returns:
A `TimeStep` namedtuple containing:
observation: list of dicts containing one observations per player, each
corresponding to `observation_spec()`.
reward: list of rewards at this timestep, or None if step_type is
`StepType.FIRST`.
discount: list of discounts in the range [0, 1], or None if step_type is
`StepType.FIRST`.
step_type: A `StepType` value.
"""
observations = {"info_state": [], "legal_actions": [], "current_player": []}
rewards = []
step_type = StepType.LAST if self._state.is_terminal() else StepType.MID
self._should_reset = step_type == StepType.LAST
cur_rewards = self._state.rewards()
for player_id in range(self.num_players):
rewards.append(cur_rewards[player_id])
observations["info_state"].append(
self._state.observation_tensor(player_id) if self._use_observation
else self._state.information_state_tensor(player_id))
observations["legal_actions"].append(self._state.legal_actions(player_id))
observations["current_player"] = self._state.current_player()
return TimeStep(
observations=observations,
rewards=rewards,
discounts=self._discounts,
step_type=step_type)
def step(self, actions):
"""Updates the environment according to `actions` and returns a `TimeStep`.
If the environment returned a `TimeStep` with `StepType.LAST` at the
previous step, this call to `step` will start a new sequence and `actions`
will be ignored.
This method will also start a new sequence if called after the environment
has been constructed and `reset` has not been called. Again, in this case
`actions` will be ignored.
Args:
actions: a list containing one action per player, following specifications
defined in `action_spec()`.
Returns:
A `TimeStep` namedtuple containing:
observation: list of dicts containing one observations per player, each
corresponding to `observation_spec()`.
reward: list of rewards at this timestep, or None if step_type is
`StepType.FIRST`.
discount: list of discounts in the range [0, 1], or None if step_type is
`StepType.FIRST`.
step_type: A `StepType` value.
"""
assert len(actions) == self.num_actions_per_step, (
"Invalid number of actions! Expected {}".format(
self.num_actions_per_step))
if self._should_reset:
return self.reset()
if self.is_turn_based:
self._state.apply_action(actions[0])
else:
self._state.apply_actions(actions)
self._sample_external_events()
return self.get_time_step()
def reset(self):
"""Starts a new sequence and returns the first `TimeStep` of this sequence.
Returns:
A `TimeStep` namedtuple containing:
observations: list of dicts containing one observations per player, each
corresponding to `observation_spec()`.
rewards: list of rewards at this timestep, or None if step_type is
`StepType.FIRST`.
discounts: list of discounts in the range [0, 1], or None if step_type
is `StepType.FIRST`.
step_type: A `StepType` value.
"""
self._should_reset = False
self._state = self._game.new_initial_state()
self._sample_external_events()
observations = {"info_state": [], "legal_actions": [], "current_player": []}
for player_id in range(self.num_players):
observations["info_state"].append(
self._state.observation_tensor(player_id) if self._use_observation
else self._state.information_state_tensor(player_id))
observations["legal_actions"].append(self._state.legal_actions(player_id))
observations["current_player"] = self._state.current_player()
return TimeStep(
observations=observations,
rewards=None,
discounts=None,
step_type=StepType.FIRST)
def _sample_external_events(self):
"""Sample chance events until we get to a decision node."""
while self._state.is_chance_node():
outcome = self._chance_event_sampler(self._state)
self._state.apply_action(outcome)
def observation_spec(self):
"""Defines the observation per player provided by the environment.
Each dict member will contain its expected structure and shape. E.g.: for
Kuhn Poker {"info_state": (6,), "legal_actions": (2,), "current_player": ()}
Returns:
A specification dict describing the observation fields and shapes.
"""
return dict(
info_state=tuple([
self._game.observation_tensor_size() if self._use_observation else
self._game.information_state_tensor_size()
]),
legal_actions=(self._game.num_distinct_actions(),),
current_player=(),
)
def action_spec(self):
"""Defines per player action specifications.
Specifications include action boundaries and their data type.
E.g.: for Kuhn Poker {"num_actions": 2, "min": 0, "max":1, "dtype": int}
Returns:
A specification dict containing per player action properties.
"""
return dict(
num_actions=self._game.num_distinct_actions(),
min=0,
max=self._game.num_distinct_actions() - 1,
dtype=int,
)
# Game properties
@property
def name(self):
return self._game.get_type().short_name
@property
def num_players(self):
return self._game.num_players()
@property
def num_actions_per_step(self):
return 1 if self.is_turn_based else self.num_players
# New RL calls for more advanced use cases (e.g. search + RL).
@property
def is_turn_based(self):
return self._game.get_type(
).dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL
@property
def max_game_length(self):
return self._game.max_game_length()
@property
def is_chance_node(self):
return self._state.is_chance_node()
@property
def game(self):
return self._game
def set_state(self, new_state):
"""Updates the game state."""
assert new_state.get_game() == self.game, (
"State must have been created by the same game.")
self._state = new_state
@property
def get_state(self):
return self._state
| 36.124675
| 80
| 0.704774
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import enum
import numpy as np
import pyspiel
SIMULTANEOUS_PLAYER_ID = pyspiel.PlayerId.SIMULTANEOUS
class TimeStep(
collections.namedtuple(
"TimeStep", ["observations", "rewards", "discounts", "step_type"])):
__slots__ = ()
def first(self):
return self.step_type == StepType.FIRST
def mid(self):
return self.step_type == StepType.MID
def last(self):
return self.step_type == StepType.LAST
def is_simultaneous_move(self):
return self.observations["current_player"] == SIMULTANEOUS_PLAYER_ID
def current_player(self):
return self.observations["current_player"]
class StepType(enum.Enum):
FIRST = 0
MID = 1
LAST = 2
def first(self):
return self is StepType.FIRST
def mid(self):
return self is StepType.MID
def last(self):
return self is StepType.LAST
def registered_games():
return pyspiel.registered_games()
class ChanceEventSampler(object):
def __init__(self, seed=None):
self._rng = np.random.RandomState(seed)
def __call__(self, state):
actions, probs = zip(*state.chance_outcomes())
return self._rng.choice(actions, p=probs)
class ObservationType(enum.Enum):
OBSERVATION = 0
INFORMATION_STATE = 1
class Environment(object):
def __init__(self,
game,
discount=1.0,
chance_event_sampler=None,
observation_type=None,
**kwargs):
self._chance_event_sampler = chance_event_sampler or ChanceEventSampler()
if isinstance(game, pyspiel.Game):
logging.info("Using game instance: %s", game.get_type().short_name)
self._game = game
elif kwargs:
game_settings = {
key: pyspiel.GameParameter(val) for (key, val) in kwargs.items()
}
logging.info("Using game settings: %s", game_settings)
self._game = pyspiel.load_game(game, game_settings)
else:
logging.info("Using game string: %s", game)
self._game = pyspiel.load_game(game)
self._num_players = self._game.num_players()
self._state = None
self._should_reset = True
self._discounts = [discount] * self._num_players
if observation_type is None:
if self._game.get_type().provides_information_state_tensor:
observation_type = ObservationType.INFORMATION_STATE
else:
observation_type = ObservationType.OBSERVATION
if observation_type == ObservationType.OBSERVATION:
if not self._game.get_type().provides_observation_tensor:
raise ValueError("observation_tensor not supported by " + game)
elif observation_type == ObservationType.INFORMATION_STATE:
if not self._game.get_type().provides_information_state_tensor:
raise ValueError("information_state_tensor not supported by " + game)
self._use_observation = (observation_type == ObservationType.OBSERVATION)
def get_time_step(self):
observations = {"info_state": [], "legal_actions": [], "current_player": []}
rewards = []
step_type = StepType.LAST if self._state.is_terminal() else StepType.MID
self._should_reset = step_type == StepType.LAST
cur_rewards = self._state.rewards()
for player_id in range(self.num_players):
rewards.append(cur_rewards[player_id])
observations["info_state"].append(
self._state.observation_tensor(player_id) if self._use_observation
else self._state.information_state_tensor(player_id))
observations["legal_actions"].append(self._state.legal_actions(player_id))
observations["current_player"] = self._state.current_player()
return TimeStep(
observations=observations,
rewards=rewards,
discounts=self._discounts,
step_type=step_type)
def step(self, actions):
assert len(actions) == self.num_actions_per_step, (
"Invalid number of actions! Expected {}".format(
self.num_actions_per_step))
if self._should_reset:
return self.reset()
if self.is_turn_based:
self._state.apply_action(actions[0])
else:
self._state.apply_actions(actions)
self._sample_external_events()
return self.get_time_step()
def reset(self):
self._should_reset = False
self._state = self._game.new_initial_state()
self._sample_external_events()
observations = {"info_state": [], "legal_actions": [], "current_player": []}
for player_id in range(self.num_players):
observations["info_state"].append(
self._state.observation_tensor(player_id) if self._use_observation
else self._state.information_state_tensor(player_id))
observations["legal_actions"].append(self._state.legal_actions(player_id))
observations["current_player"] = self._state.current_player()
return TimeStep(
observations=observations,
rewards=None,
discounts=None,
step_type=StepType.FIRST)
def _sample_external_events(self):
while self._state.is_chance_node():
outcome = self._chance_event_sampler(self._state)
self._state.apply_action(outcome)
def observation_spec(self):
return dict(
info_state=tuple([
self._game.observation_tensor_size() if self._use_observation else
self._game.information_state_tensor_size()
]),
legal_actions=(self._game.num_distinct_actions(),),
current_player=(),
)
def action_spec(self):
return dict(
num_actions=self._game.num_distinct_actions(),
min=0,
max=self._game.num_distinct_actions() - 1,
dtype=int,
)
@property
def name(self):
return self._game.get_type().short_name
@property
def num_players(self):
return self._game.num_players()
@property
def num_actions_per_step(self):
return 1 if self.is_turn_based else self.num_players
@property
def is_turn_based(self):
return self._game.get_type(
).dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL
@property
def max_game_length(self):
return self._game.max_game_length()
@property
def is_chance_node(self):
return self._state.is_chance_node()
@property
def game(self):
return self._game
def set_state(self, new_state):
assert new_state.get_game() == self.game, (
"State must have been created by the same game.")
self._state = new_state
@property
def get_state(self):
return self._state
| true
| true
|
1c44bc896af0886897c2340327fd5e82882c9c91
| 467
|
py
|
Python
|
malcolm/modules/builtin/parts/float64part.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
malcolm/modules/builtin/parts/float64part.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
malcolm/modules/builtin/parts/float64part.py
|
MattTaylorDLS/pymalcolm
|
995a8e4729bd745f8f617969111cc5a34ce1ac14
|
[
"Apache-2.0"
] | null | null | null |
from malcolm.core import method_also_takes
from malcolm.modules.builtin.vmetas import NumberMeta
from .attributepart import AttributePart
@method_also_takes(
"initialValue", NumberMeta("float64", "Initial value of attribute"), 0.0,
)
class Float64Part(AttributePart):
def get_initial_value(self):
return self.params.initialValue
def create_meta(self, description, tags):
return NumberMeta("float64", description=description, tags=tags)
| 31.133333
| 77
| 0.768737
|
from malcolm.core import method_also_takes
from malcolm.modules.builtin.vmetas import NumberMeta
from .attributepart import AttributePart
@method_also_takes(
"initialValue", NumberMeta("float64", "Initial value of attribute"), 0.0,
)
class Float64Part(AttributePart):
def get_initial_value(self):
return self.params.initialValue
def create_meta(self, description, tags):
return NumberMeta("float64", description=description, tags=tags)
| true
| true
|
1c44bde50b8bbe235553d6be40f12534a7ddeb26
| 15,857
|
py
|
Python
|
test_reporting/junit_xml_parser.py
|
vkuma82/sonic-mgmt
|
131764317fe590141b6fa38fc60f243b43bf616c
|
[
"Apache-2.0"
] | 1
|
2021-09-15T17:06:16.000Z
|
2021-09-15T17:06:16.000Z
|
test_reporting/junit_xml_parser.py
|
vkuma82/sonic-mgmt
|
131764317fe590141b6fa38fc60f243b43bf616c
|
[
"Apache-2.0"
] | null | null | null |
test_reporting/junit_xml_parser.py
|
vkuma82/sonic-mgmt
|
131764317fe590141b6fa38fc60f243b43bf616c
|
[
"Apache-2.0"
] | null | null | null |
"""Utilities for validating and parsing JUnit XML files generated by Pytest and Spytest.
This library/script should work for any test result XML file generated by Pytest or Spytest.
CLI Usage:
% python3 junit_xml_parser.py -h
usage: junit_xml_parser.py [-h] [--validate-only] [--compact] [--output-file OUTPUT_FILE] file
Validate and convert SONiC JUnit XML files into JSON.
positional arguments:
file A file to validate/parse.
optional arguments:
-h, --help show this help message and exit
--validate-only Validate without parsing the file.
--compact, -c Output the JSON in a compact form.
--output-file OUTPUT_FILE, -o OUTPUT_FILE
A file to store the JSON output in.
Examples:
python3 junit_xml_parser.py tests/files/sample_tr.xml
"""
import argparse
import glob
import json
import sys
import os
from collections import defaultdict
from datetime import datetime
import defusedxml.ElementTree as ET
TEST_REPORT_CLIENT_VERSION = (1, 1, 0)
MAXIMUM_XML_SIZE = 20e7 # 20MB
MAXIMUM_SUMMARY_SIZE = 1024 # 1MB
# Fields found in the testsuite/root section of the JUnit XML file.
TESTSUITE_TAG = "testsuite"
REQUIRED_TESTSUITE_ATTRIBUTES = {
("time", float),
("tests", int),
("skipped", int),
("failures", int),
("errors", int)
}
# Fields found in the metadata/properties section of the JUnit XML file.
# FIXME: These are specific to pytest, needs to be extended to support spytest.
METADATA_TAG = "properties"
METADATA_PROPERTY_TAG = "property"
REQUIRED_METADATA_PROPERTIES = [
"topology",
"testbed",
"timestamp",
"host",
"asic",
"platform",
"hwsku",
"os_version",
]
# Fields found in the testcase sections of the JUnit XML file.
TESTCASE_TAG = "testcase"
REQUIRED_TESTCASE_ATTRIBUTES = [
"classname",
"file",
"line",
"name",
"time",
]
class JUnitXMLValidationError(Exception):
"""Expected errors that are thrown while validating the contents of the JUnit XML file."""
def validate_junit_xml_stream(stream):
"""Validate that a stream containing an XML document is valid JUnit XML.
Args:
stream: A string containing an XML document.
Returns:
The root of the validated XML document.
Raises:
JUnitXMLValidationError: if any of the following are true:
- The provided stream exceeds 10MB
- The provided stream is unparseable
- The provided stream is missing required fields
"""
if sys.getsizeof(stream) > MAXIMUM_XML_SIZE:
raise JUnitXMLValidationError("provided stream is too large")
try:
root = ET.fromstring(stream, forbid_dtd=True)
except Exception as e:
raise JUnitXMLValidationError(f"could not parse provided XML stream: {e}") from e
return _validate_junit_xml(root)
def validate_junit_xml_file(document_name):
"""Validate that an XML file is valid JUnit XML.
Args:
document_name: The name of the document.
Returns:
The root of the validated XML document.
Raises:
JUnitXMLValidationError: if any of the following are true:
- The provided file doesn't exist
- The provided file exceeds 10MB
- The provided file is unparseable
- The provided file is missing required fields
"""
if not os.path.exists(document_name) or not os.path.isfile(document_name):
raise JUnitXMLValidationError("file not found")
if os.path.getsize(document_name) > MAXIMUM_XML_SIZE:
raise JUnitXMLValidationError("provided file is too large")
try:
tree = ET.parse(document_name, forbid_dtd=True)
except Exception as e:
raise JUnitXMLValidationError(f"could not parse {document_name}: {e}") from e
return _validate_junit_xml(tree.getroot())
def validate_junit_xml_archive(directory_name):
"""Validate that an XML archive contains valid JUnit XML.
Args:
directory_name: The name of the directory containing XML documents.
Returns:
A list of roots of validated XML documents.
Raises:
JUnitXMLValidationError: if any of the following are true:
- The provided directory doesn't exist
- The provided files exceed 10MB
- Any of the provided files are unparseable
- Any of the provided files are missing required fields
"""
if not os.path.exists(directory_name) or not os.path.isdir(directory_name):
raise JUnitXMLValidationError("file not found")
roots = []
metadata_source = None
metadata = {}
doc_list = glob.glob(os.path.join(directory_name, "tr.xml"))
doc_list += glob.glob(os.path.join(directory_name, "*test*.xml"))
doc_list += glob.glob(os.path.join(directory_name, "**", "*test*.xml"), recursive=True)
doc_list = set(doc_list)
total_size = 0
for document in doc_list:
total_size += os.path.getsize(document)
if total_size > MAXIMUM_XML_SIZE:
raise JUnitXMLValidationError("provided directory is too large")
for document in doc_list:
try:
root = validate_junit_xml_file(document)
root_metadata = {k: v for k, v in _parse_test_metadata(root).items()
if k in REQUIRED_METADATA_PROPERTIES and k != "timestamp"}
if root_metadata:
# All metadata from a single test run should be identical, so we
# just use the first one we see to validate the rest.
if not metadata_source:
metadata_source = document
metadata = root_metadata
if root_metadata != metadata:
raise JUnitXMLValidationError(f"{document} metadata differs from {metadata_source}\n"
f"{document}: {root_metadata}\n"
f"{metadata_source}: {metadata}")
roots.append(root)
except Exception as e:
raise JUnitXMLValidationError(f"could not parse {document}: {e}") from e
if not roots:
raise JUnitXMLValidationError(f"provided directory {directory_name} does not contain any XML files")
return roots
def _validate_junit_xml(root):
_validate_test_summary(root)
_validate_test_metadata(root)
_validate_test_cases(root)
return root
def _validate_test_summary(root):
if root.tag != TESTSUITE_TAG:
raise JUnitXMLValidationError(f"{TESTSUITE_TAG} tag not found on root element")
for xml_field, expected_type in REQUIRED_TESTSUITE_ATTRIBUTES:
if xml_field not in root.keys():
raise JUnitXMLValidationError(f"{xml_field} not found in <{TESTSUITE_TAG}> element")
try:
expected_type(root.get(xml_field))
except Exception as e:
raise JUnitXMLValidationError(
f"invalid type for {xml_field} in {TESTSUITE_TAG}> element: "
f"expected a number, received "
f'"{root.get(xml_field)}"'
) from e
def _validate_test_metadata(root):
properties_element = root.find("properties")
if not properties_element:
return
seen_properties = []
for prop in properties_element.iterfind(METADATA_PROPERTY_TAG):
property_name = prop.get("name", None)
if not property_name:
continue
if property_name not in REQUIRED_METADATA_PROPERTIES:
continue
if property_name in seen_properties:
raise JUnitXMLValidationError(
f"duplicate metadata element: {property_name} seen more than once"
)
property_value = prop.get("value", None)
if property_value is None: # Some fields may be empty
raise JUnitXMLValidationError(
f'invalid metadata element: no "value" field provided for {property_name}'
)
seen_properties.append(property_name)
if set(seen_properties) < set(REQUIRED_METADATA_PROPERTIES):
raise JUnitXMLValidationError("missing metadata element(s)")
def _validate_test_cases(root):
def _validate_test_case(test_case):
for attribute in REQUIRED_TESTCASE_ATTRIBUTES:
if attribute not in test_case.keys():
raise JUnitXMLValidationError(
f'"{attribute}" not found in test case '
f"\"{test_case.get('name', 'Name Not Found')}\""
)
cases = root.findall(TESTCASE_TAG)
for test_case in cases:
_validate_test_case(test_case)
def parse_test_result(roots):
"""Parse a given XML document into JSON.
Args:
root: The root of the XML document to parse.
Returns:
A dict containing the parsed test result.
"""
test_result_json = defaultdict(dict)
for root in roots:
test_result_json["test_metadata"] = _update_test_metadata(test_result_json["test_metadata"],
_parse_test_metadata(root))
test_cases = _parse_test_cases(root)
test_result_json["test_cases"] = _update_test_cases(test_result_json["test_cases"], test_cases)
test_result_json["test_summary"] = _update_test_summary(test_result_json["test_summary"],
_extract_test_summary(test_cases))
return test_result_json
def _parse_test_summary(root):
test_result_summary = {}
for attribute, _ in REQUIRED_TESTSUITE_ATTRIBUTES:
test_result_summary[attribute] = root.get(attribute)
return test_result_summary
def _extract_test_summary(test_cases):
test_result_summary = defaultdict(int)
for _, cases in test_cases.items():
for case in cases:
test_result_summary["tests"] += 1
test_result_summary["failures"] += case["result"] == "failure" or case["result"] == "error"
test_result_summary["skipped"] += case["result"] == "skipped"
test_result_summary["errors"] += case["error"]
test_result_summary["time"] += float(case["time"])
test_result_summary = {k: str(v) for k, v in test_result_summary.items()}
return test_result_summary
def _parse_test_metadata(root):
properties_element = root.find(METADATA_TAG)
if not properties_element:
return {}
test_result_metadata = {}
for prop in properties_element.iterfind("property"):
if prop.get("value"):
test_result_metadata[prop.get("name")] = prop.get("value")
return test_result_metadata
def _parse_test_cases(root):
test_case_results = defaultdict(list)
def _parse_test_case(test_case):
result = {}
# FIXME: This is specific to pytest, needs to be extended to support spytest.
test_class_tokens = test_case.get("classname").split(".")
feature = test_class_tokens[0]
for attribute in REQUIRED_TESTCASE_ATTRIBUTES:
result[attribute] = test_case.get(attribute)
# NOTE: "if failure" and "if error" does not work with the ETree library.
failure = test_case.find("failure")
error = test_case.find("error")
skipped = test_case.find("skipped")
# NOTE: "error" is unique in that it can occur alongside a succesful, failed, or skipped test result.
# Because of this, we track errors separately so that the error can be correlated with the stage it
# occurred.
#
# If there is *only* an error tag we note that as well, as this indicates that the framework
# errored out during setup or teardown.
if failure is not None:
result["result"] = "failure"
summary = failure.get("message", "")
elif skipped is not None:
result["result"] = "skipped"
summary = skipped.get("message", "")
elif error is not None:
result["result"] = "error"
summary = error.get("message", "")
else:
result["result"] = "success"
summary = ""
result["summary"] = summary[:min(len(summary), MAXIMUM_SUMMARY_SIZE)]
result["error"] = error is not None
return feature, result
for test_case in root.findall("testcase"):
feature, result = _parse_test_case(test_case)
test_case_results[feature].append(result)
return dict(test_case_results)
def _update_test_summary(current, update):
if not current:
return update.copy()
new_summary = {}
for attribute, attr_type in REQUIRED_TESTSUITE_ATTRIBUTES:
new_summary[attribute] = str(round(attr_type(current.get(attribute, 0)) + attr_type(update.get(attribute, 0)), 3))
return new_summary
def _update_test_metadata(current, update):
# Case 1: On the very first update, current will be empty since we haven't seen any results yet.
if not current:
return update.copy()
# Case 2: For test cases that are 100% skipped there will be no metadata added, so we need to
# default to current.
if not update:
return current.copy()
# Case 3: For all other cases, take the earliest timestamp and default everything else to update.
new_metadata = {}
for prop in REQUIRED_METADATA_PROPERTIES:
if prop == "timestamp":
new_metadata[prop] = str(min(datetime.strptime(current[prop], "%Y-%m-%d %H:%M:%S.%f"),
datetime.strptime(update[prop], "%Y-%m-%d %H:%M:%S.%f")))
else:
new_metadata[prop] = update[prop]
return new_metadata
def _update_test_cases(current, update):
if not current:
return update.copy()
new_cases = current.copy()
for group, cases in update.items():
updated_cases = cases.copy()
if group in new_cases:
updated_cases += new_cases[group]
new_cases[group] = updated_cases
return new_cases
def _run_script():
parser = argparse.ArgumentParser(
description="Validate and convert SONiC JUnit XML files into JSON.",
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Examples:
python3 junit_xml_parser.py tests/files/sample_tr.xml
""",
)
parser.add_argument("file_name", metavar="file", type=str, help="A file to validate/parse.")
parser.add_argument(
"--validate-only", action="store_true", help="Validate without parsing the file.",
)
parser.add_argument(
"--compact", "-c", action="store_true", help="Output the JSON in a compact form.",
)
parser.add_argument(
"--output-file", "-o", type=str, help="A file to store the JSON output in.",
)
parser.add_argument(
"--directory", "-d", action="store_true", help="Provide a directory instead of a single file."
)
args = parser.parse_args()
try:
if args.directory:
roots = validate_junit_xml_archive(args.file_name)
else:
roots = [validate_junit_xml_file(args.file_name)]
except JUnitXMLValidationError as e:
print(f"XML validation failed: {e}")
sys.exit(1)
except Exception as e:
print(f"Unexpected error occured during validation: {e}")
sys.exit(2)
if args.validate_only:
print(f"{args.file_name} validated succesfully!")
sys.exit(0)
test_result_json = parse_test_result(roots)
if args.compact:
output = json.dumps(test_result_json, separators=(",", ":"), sort_keys=True)
else:
output = json.dumps(test_result_json, indent=4, sort_keys=True)
if args.output_file:
with open(args.output_file, "w+") as output_file:
output_file.write(output)
else:
print(output)
if __name__ == "__main__":
_run_script()
| 32.627572
| 122
| 0.648483
|
import argparse
import glob
import json
import sys
import os
from collections import defaultdict
from datetime import datetime
import defusedxml.ElementTree as ET
TEST_REPORT_CLIENT_VERSION = (1, 1, 0)
MAXIMUM_XML_SIZE = 20e7
MAXIMUM_SUMMARY_SIZE = 1024
TESTSUITE_TAG = "testsuite"
REQUIRED_TESTSUITE_ATTRIBUTES = {
("time", float),
("tests", int),
("skipped", int),
("failures", int),
("errors", int)
}
METADATA_TAG = "properties"
METADATA_PROPERTY_TAG = "property"
REQUIRED_METADATA_PROPERTIES = [
"topology",
"testbed",
"timestamp",
"host",
"asic",
"platform",
"hwsku",
"os_version",
]
TESTCASE_TAG = "testcase"
REQUIRED_TESTCASE_ATTRIBUTES = [
"classname",
"file",
"line",
"name",
"time",
]
class JUnitXMLValidationError(Exception):
def validate_junit_xml_stream(stream):
if sys.getsizeof(stream) > MAXIMUM_XML_SIZE:
raise JUnitXMLValidationError("provided stream is too large")
try:
root = ET.fromstring(stream, forbid_dtd=True)
except Exception as e:
raise JUnitXMLValidationError(f"could not parse provided XML stream: {e}") from e
return _validate_junit_xml(root)
def validate_junit_xml_file(document_name):
if not os.path.exists(document_name) or not os.path.isfile(document_name):
raise JUnitXMLValidationError("file not found")
if os.path.getsize(document_name) > MAXIMUM_XML_SIZE:
raise JUnitXMLValidationError("provided file is too large")
try:
tree = ET.parse(document_name, forbid_dtd=True)
except Exception as e:
raise JUnitXMLValidationError(f"could not parse {document_name}: {e}") from e
return _validate_junit_xml(tree.getroot())
def validate_junit_xml_archive(directory_name):
if not os.path.exists(directory_name) or not os.path.isdir(directory_name):
raise JUnitXMLValidationError("file not found")
roots = []
metadata_source = None
metadata = {}
doc_list = glob.glob(os.path.join(directory_name, "tr.xml"))
doc_list += glob.glob(os.path.join(directory_name, "*test*.xml"))
doc_list += glob.glob(os.path.join(directory_name, "**", "*test*.xml"), recursive=True)
doc_list = set(doc_list)
total_size = 0
for document in doc_list:
total_size += os.path.getsize(document)
if total_size > MAXIMUM_XML_SIZE:
raise JUnitXMLValidationError("provided directory is too large")
for document in doc_list:
try:
root = validate_junit_xml_file(document)
root_metadata = {k: v for k, v in _parse_test_metadata(root).items()
if k in REQUIRED_METADATA_PROPERTIES and k != "timestamp"}
if root_metadata:
if not metadata_source:
metadata_source = document
metadata = root_metadata
if root_metadata != metadata:
raise JUnitXMLValidationError(f"{document} metadata differs from {metadata_source}\n"
f"{document}: {root_metadata}\n"
f"{metadata_source}: {metadata}")
roots.append(root)
except Exception as e:
raise JUnitXMLValidationError(f"could not parse {document}: {e}") from e
if not roots:
raise JUnitXMLValidationError(f"provided directory {directory_name} does not contain any XML files")
return roots
def _validate_junit_xml(root):
_validate_test_summary(root)
_validate_test_metadata(root)
_validate_test_cases(root)
return root
def _validate_test_summary(root):
if root.tag != TESTSUITE_TAG:
raise JUnitXMLValidationError(f"{TESTSUITE_TAG} tag not found on root element")
for xml_field, expected_type in REQUIRED_TESTSUITE_ATTRIBUTES:
if xml_field not in root.keys():
raise JUnitXMLValidationError(f"{xml_field} not found in <{TESTSUITE_TAG}> element")
try:
expected_type(root.get(xml_field))
except Exception as e:
raise JUnitXMLValidationError(
f"invalid type for {xml_field} in {TESTSUITE_TAG}> element: "
f"expected a number, received "
f'"{root.get(xml_field)}"'
) from e
def _validate_test_metadata(root):
properties_element = root.find("properties")
if not properties_element:
return
seen_properties = []
for prop in properties_element.iterfind(METADATA_PROPERTY_TAG):
property_name = prop.get("name", None)
if not property_name:
continue
if property_name not in REQUIRED_METADATA_PROPERTIES:
continue
if property_name in seen_properties:
raise JUnitXMLValidationError(
f"duplicate metadata element: {property_name} seen more than once"
)
property_value = prop.get("value", None)
if property_value is None:
raise JUnitXMLValidationError(
f'invalid metadata element: no "value" field provided for {property_name}'
)
seen_properties.append(property_name)
if set(seen_properties) < set(REQUIRED_METADATA_PROPERTIES):
raise JUnitXMLValidationError("missing metadata element(s)")
def _validate_test_cases(root):
def _validate_test_case(test_case):
for attribute in REQUIRED_TESTCASE_ATTRIBUTES:
if attribute not in test_case.keys():
raise JUnitXMLValidationError(
f'"{attribute}" not found in test case '
f"\"{test_case.get('name', 'Name Not Found')}\""
)
cases = root.findall(TESTCASE_TAG)
for test_case in cases:
_validate_test_case(test_case)
def parse_test_result(roots):
test_result_json = defaultdict(dict)
for root in roots:
test_result_json["test_metadata"] = _update_test_metadata(test_result_json["test_metadata"],
_parse_test_metadata(root))
test_cases = _parse_test_cases(root)
test_result_json["test_cases"] = _update_test_cases(test_result_json["test_cases"], test_cases)
test_result_json["test_summary"] = _update_test_summary(test_result_json["test_summary"],
_extract_test_summary(test_cases))
return test_result_json
def _parse_test_summary(root):
test_result_summary = {}
for attribute, _ in REQUIRED_TESTSUITE_ATTRIBUTES:
test_result_summary[attribute] = root.get(attribute)
return test_result_summary
def _extract_test_summary(test_cases):
test_result_summary = defaultdict(int)
for _, cases in test_cases.items():
for case in cases:
test_result_summary["tests"] += 1
test_result_summary["failures"] += case["result"] == "failure" or case["result"] == "error"
test_result_summary["skipped"] += case["result"] == "skipped"
test_result_summary["errors"] += case["error"]
test_result_summary["time"] += float(case["time"])
test_result_summary = {k: str(v) for k, v in test_result_summary.items()}
return test_result_summary
def _parse_test_metadata(root):
properties_element = root.find(METADATA_TAG)
if not properties_element:
return {}
test_result_metadata = {}
for prop in properties_element.iterfind("property"):
if prop.get("value"):
test_result_metadata[prop.get("name")] = prop.get("value")
return test_result_metadata
def _parse_test_cases(root):
test_case_results = defaultdict(list)
def _parse_test_case(test_case):
result = {}
test_class_tokens = test_case.get("classname").split(".")
feature = test_class_tokens[0]
for attribute in REQUIRED_TESTCASE_ATTRIBUTES:
result[attribute] = test_case.get(attribute)
failure = test_case.find("failure")
error = test_case.find("error")
skipped = test_case.find("skipped")
if failure is not None:
result["result"] = "failure"
summary = failure.get("message", "")
elif skipped is not None:
result["result"] = "skipped"
summary = skipped.get("message", "")
elif error is not None:
result["result"] = "error"
summary = error.get("message", "")
else:
result["result"] = "success"
summary = ""
result["summary"] = summary[:min(len(summary), MAXIMUM_SUMMARY_SIZE)]
result["error"] = error is not None
return feature, result
for test_case in root.findall("testcase"):
feature, result = _parse_test_case(test_case)
test_case_results[feature].append(result)
return dict(test_case_results)
def _update_test_summary(current, update):
if not current:
return update.copy()
new_summary = {}
for attribute, attr_type in REQUIRED_TESTSUITE_ATTRIBUTES:
new_summary[attribute] = str(round(attr_type(current.get(attribute, 0)) + attr_type(update.get(attribute, 0)), 3))
return new_summary
def _update_test_metadata(current, update):
if not current:
return update.copy()
# Case 2: For test cases that are 100% skipped there will be no metadata added, so we need to
# default to current.
if not update:
return current.copy()
# Case 3: For all other cases, take the earliest timestamp and default everything else to update.
new_metadata = {}
for prop in REQUIRED_METADATA_PROPERTIES:
if prop == "timestamp":
new_metadata[prop] = str(min(datetime.strptime(current[prop], "%Y-%m-%d %H:%M:%S.%f"),
datetime.strptime(update[prop], "%Y-%m-%d %H:%M:%S.%f")))
else:
new_metadata[prop] = update[prop]
return new_metadata
def _update_test_cases(current, update):
if not current:
return update.copy()
new_cases = current.copy()
for group, cases in update.items():
updated_cases = cases.copy()
if group in new_cases:
updated_cases += new_cases[group]
new_cases[group] = updated_cases
return new_cases
def _run_script():
parser = argparse.ArgumentParser(
description="Validate and convert SONiC JUnit XML files into JSON.",
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Examples:
python3 junit_xml_parser.py tests/files/sample_tr.xml
""",
)
parser.add_argument("file_name", metavar="file", type=str, help="A file to validate/parse.")
parser.add_argument(
"--validate-only", action="store_true", help="Validate without parsing the file.",
)
parser.add_argument(
"--compact", "-c", action="store_true", help="Output the JSON in a compact form.",
)
parser.add_argument(
"--output-file", "-o", type=str, help="A file to store the JSON output in.",
)
parser.add_argument(
"--directory", "-d", action="store_true", help="Provide a directory instead of a single file."
)
args = parser.parse_args()
try:
if args.directory:
roots = validate_junit_xml_archive(args.file_name)
else:
roots = [validate_junit_xml_file(args.file_name)]
except JUnitXMLValidationError as e:
print(f"XML validation failed: {e}")
sys.exit(1)
except Exception as e:
print(f"Unexpected error occured during validation: {e}")
sys.exit(2)
if args.validate_only:
print(f"{args.file_name} validated succesfully!")
sys.exit(0)
test_result_json = parse_test_result(roots)
if args.compact:
output = json.dumps(test_result_json, separators=(",", ":"), sort_keys=True)
else:
output = json.dumps(test_result_json, indent=4, sort_keys=True)
if args.output_file:
with open(args.output_file, "w+") as output_file:
output_file.write(output)
else:
print(output)
if __name__ == "__main__":
_run_script()
| true
| true
|
1c44be78f0de124dc36c88eff98e426707185e4e
| 614
|
py
|
Python
|
src/coincheck/withdraw.py
|
coincheckjp/coincheck-python
|
85e8f9a9b9245e047a95cd33615284259e9ba399
|
[
"MIT"
] | 46
|
2017-03-29T00:18:00.000Z
|
2022-03-19T12:55:43.000Z
|
coincheck/withdraw.py
|
gamma-github/cryptCurrency
|
efb67f3a4ba0819224f73fefec53dfadcc2cbf78
|
[
"MIT"
] | 3
|
2017-08-04T05:31:29.000Z
|
2018-08-09T06:42:25.000Z
|
coincheck/withdraw.py
|
gamma-github/cryptCurrency
|
efb67f3a4ba0819224f73fefec53dfadcc2cbf78
|
[
"MIT"
] | 21
|
2017-03-11T14:31:09.000Z
|
2021-01-07T02:07:41.000Z
|
from coincheck.servicebase import ServiceBase
class Withdraw(ServiceBase):
baseUrl = '/api/withdraws'
def create(self, params = {}):
return self.coinCheck.request(ServiceBase.METHOD_POST, self.baseUrl, params)
def all(self, params = {}):
return self.coinCheck.request(ServiceBase.METHOD_GET, self.baseUrl, params)
def cancel(self, params = {}):
defaults = {
'id': ""
}
defaults.update(params)
params = defaults.copy()
return self.coinCheck.request(ServiceBase.METHOD_DELETE, self.baseUrl + '/' + str(params['id']), params)
| 34.111111
| 112
| 0.640065
|
from coincheck.servicebase import ServiceBase
class Withdraw(ServiceBase):
baseUrl = '/api/withdraws'
def create(self, params = {}):
return self.coinCheck.request(ServiceBase.METHOD_POST, self.baseUrl, params)
def all(self, params = {}):
return self.coinCheck.request(ServiceBase.METHOD_GET, self.baseUrl, params)
def cancel(self, params = {}):
defaults = {
'id': ""
}
defaults.update(params)
params = defaults.copy()
return self.coinCheck.request(ServiceBase.METHOD_DELETE, self.baseUrl + '/' + str(params['id']), params)
| true
| true
|
1c44c0e71ba8b171137bece871c14f3ee1b13891
| 592
|
py
|
Python
|
qingmi/__about__.py
|
xiongxianzhu/qingmi
|
ae5a446abec3982ebf2c5dde8546ef72f9453137
|
[
"BSD-3-Clause"
] | 20
|
2018-05-22T09:29:40.000Z
|
2020-12-11T04:53:15.000Z
|
qingmi/__about__.py
|
xiongxianzhu/qingmi
|
ae5a446abec3982ebf2c5dde8546ef72f9453137
|
[
"BSD-3-Clause"
] | 65
|
2019-03-07T02:43:06.000Z
|
2021-01-07T03:43:52.000Z
|
qingmi/__about__.py
|
xiongxianzhu/qingmi
|
ae5a446abec3982ebf2c5dde8546ef72f9453137
|
[
"BSD-3-Clause"
] | 6
|
2019-03-08T06:39:47.000Z
|
2021-07-01T11:02:56.000Z
|
__name__ = 'qingmi'
__description__ = 'Common modules and toolsets for rapid and efficient development of flask Web.'
__url__ = 'https://github.com/xiongxianzhu/qingmi'
__version_info__ = ('0', '1', '4')
__version__ = '.'.join(__version_info__)
__fullname__ = '-'.join((__name__, __version__))
__author__ = 'zhuxiongxian'
__author_email__ = 'zhuxiongxian@gmail.com'
__maintainer__ = 'zhuxiongxian'
__maintainer_email__ = 'zhuxiongxian@gmail.com'
__license__ = 'BSD'
__copyright__ = '(c) 2018 by zhuxiongxian'
__source__ = 'https://github.com/xiongxianzhu/qingmi'
__keywords__ = 'qingmi flask'
| 42.285714
| 97
| 0.765203
|
__name__ = 'qingmi'
__description__ = 'Common modules and toolsets for rapid and efficient development of flask Web.'
__url__ = 'https://github.com/xiongxianzhu/qingmi'
__version_info__ = ('0', '1', '4')
__version__ = '.'.join(__version_info__)
__fullname__ = '-'.join((__name__, __version__))
__author__ = 'zhuxiongxian'
__author_email__ = 'zhuxiongxian@gmail.com'
__maintainer__ = 'zhuxiongxian'
__maintainer_email__ = 'zhuxiongxian@gmail.com'
__license__ = 'BSD'
__copyright__ = '(c) 2018 by zhuxiongxian'
__source__ = 'https://github.com/xiongxianzhu/qingmi'
__keywords__ = 'qingmi flask'
| true
| true
|
1c44c145c66898134fa0294e53de15df16edc466
| 3,992
|
py
|
Python
|
youtube_dl/extractor/veoh.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 66,635
|
2019-03-10T21:34:18.000Z
|
2022-03-31T23:50:31.000Z
|
youtube_dl/extractor/veoh.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 10,936
|
2019-03-10T21:35:47.000Z
|
2022-03-31T23:46:52.000Z
|
youtube_dl/extractor/veoh.py
|
hackarada/youtube-dl
|
2ba46715a41fe074eab2221170b2ac78fab93fad
|
[
"Unlicense"
] | 15,194
|
2019-03-10T21:09:27.000Z
|
2022-03-31T22:13:49.000Z
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
qualities,
)
class VeohIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?veoh\.com/(?:watch|embed|iphone/#_Watch)/(?P<id>(?:v|e|yapi-)[\da-zA-Z]+)'
_TESTS = [{
'url': 'http://www.veoh.com/watch/v56314296nk7Zdmz3',
'md5': '9e7ecc0fd8bbee7a69fe38953aeebd30',
'info_dict': {
'id': 'v56314296nk7Zdmz3',
'ext': 'mp4',
'title': 'Straight Backs Are Stronger',
'uploader': 'LUMOback',
'description': 'At LUMOback, we believe straight backs are stronger. The LUMOback Posture & Movement Sensor: It gently vibrates when you slouch, inspiring improved posture and mobility. Use the app to track your data and improve your posture over time. ',
},
}, {
'url': 'http://www.veoh.com/embed/v56314296nk7Zdmz3',
'only_matching': True,
}, {
'url': 'http://www.veoh.com/watch/v27701988pbTc4wzN?h1=Chile+workers+cover+up+to+avoid+skin+damage',
'md5': '4a6ff84b87d536a6a71e6aa6c0ad07fa',
'info_dict': {
'id': '27701988',
'ext': 'mp4',
'title': 'Chile workers cover up to avoid skin damage',
'description': 'md5:2bd151625a60a32822873efc246ba20d',
'uploader': 'afp-news',
'duration': 123,
},
'skip': 'This video has been deleted.',
}, {
'url': 'http://www.veoh.com/watch/v69525809F6Nc4frX',
'md5': '4fde7b9e33577bab2f2f8f260e30e979',
'note': 'Embedded ooyala video',
'info_dict': {
'id': '69525809',
'ext': 'mp4',
'title': 'Doctors Alter Plan For Preteen\'s Weight Loss Surgery',
'description': 'md5:f5a11c51f8fb51d2315bca0937526891',
'uploader': 'newsy-videos',
},
'skip': 'This video has been deleted.',
}, {
'url': 'http://www.veoh.com/watch/e152215AJxZktGS',
'only_matching': True,
}]
def _extract_video(self, source):
return {
'id': source.get('videoId'),
'title': source.get('title'),
'description': source.get('description'),
'thumbnail': source.get('highResImage') or source.get('medResImage'),
'uploader': source.get('username'),
'duration': int_or_none(source.get('length')),
'view_count': int_or_none(source.get('views')),
'age_limit': 18 if source.get('isMature') == 'true' or source.get('isSexy') == 'true' else 0,
'formats': self._extract_formats(source),
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'https://www.veoh.com/watch/getVideo/' + video_id,
video_id)['video']
title = video['title']
thumbnail_url = None
q = qualities(['HQ', 'Regular'])
formats = []
for f_id, f_url in video.get('src', {}).items():
if not f_url:
continue
if f_id == 'poster':
thumbnail_url = f_url
else:
formats.append({
'format_id': f_id,
'quality': q(f_id),
'url': f_url,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': thumbnail_url,
'uploader': video.get('author', {}).get('nickname'),
'duration': int_or_none(video.get('lengthBySec')) or parse_duration(video.get('length')),
'view_count': int_or_none(video.get('views')),
'formats': formats,
'average_rating': int_or_none(video.get('rating')),
'comment_count': int_or_none(video.get('numOfComments')),
}
| 38.384615
| 270
| 0.549098
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
qualities,
)
class VeohIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?veoh\.com/(?:watch|embed|iphone/#_Watch)/(?P<id>(?:v|e|yapi-)[\da-zA-Z]+)'
_TESTS = [{
'url': 'http://www.veoh.com/watch/v56314296nk7Zdmz3',
'md5': '9e7ecc0fd8bbee7a69fe38953aeebd30',
'info_dict': {
'id': 'v56314296nk7Zdmz3',
'ext': 'mp4',
'title': 'Straight Backs Are Stronger',
'uploader': 'LUMOback',
'description': 'At LUMOback, we believe straight backs are stronger. The LUMOback Posture & Movement Sensor: It gently vibrates when you slouch, inspiring improved posture and mobility. Use the app to track your data and improve your posture over time. ',
},
}, {
'url': 'http://www.veoh.com/embed/v56314296nk7Zdmz3',
'only_matching': True,
}, {
'url': 'http://www.veoh.com/watch/v27701988pbTc4wzN?h1=Chile+workers+cover+up+to+avoid+skin+damage',
'md5': '4a6ff84b87d536a6a71e6aa6c0ad07fa',
'info_dict': {
'id': '27701988',
'ext': 'mp4',
'title': 'Chile workers cover up to avoid skin damage',
'description': 'md5:2bd151625a60a32822873efc246ba20d',
'uploader': 'afp-news',
'duration': 123,
},
'skip': 'This video has been deleted.',
}, {
'url': 'http://www.veoh.com/watch/v69525809F6Nc4frX',
'md5': '4fde7b9e33577bab2f2f8f260e30e979',
'note': 'Embedded ooyala video',
'info_dict': {
'id': '69525809',
'ext': 'mp4',
'title': 'Doctors Alter Plan For Preteen\'s Weight Loss Surgery',
'description': 'md5:f5a11c51f8fb51d2315bca0937526891',
'uploader': 'newsy-videos',
},
'skip': 'This video has been deleted.',
}, {
'url': 'http://www.veoh.com/watch/e152215AJxZktGS',
'only_matching': True,
}]
def _extract_video(self, source):
return {
'id': source.get('videoId'),
'title': source.get('title'),
'description': source.get('description'),
'thumbnail': source.get('highResImage') or source.get('medResImage'),
'uploader': source.get('username'),
'duration': int_or_none(source.get('length')),
'view_count': int_or_none(source.get('views')),
'age_limit': 18 if source.get('isMature') == 'true' or source.get('isSexy') == 'true' else 0,
'formats': self._extract_formats(source),
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'https://www.veoh.com/watch/getVideo/' + video_id,
video_id)['video']
title = video['title']
thumbnail_url = None
q = qualities(['HQ', 'Regular'])
formats = []
for f_id, f_url in video.get('src', {}).items():
if not f_url:
continue
if f_id == 'poster':
thumbnail_url = f_url
else:
formats.append({
'format_id': f_id,
'quality': q(f_id),
'url': f_url,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': thumbnail_url,
'uploader': video.get('author', {}).get('nickname'),
'duration': int_or_none(video.get('lengthBySec')) or parse_duration(video.get('length')),
'view_count': int_or_none(video.get('views')),
'formats': formats,
'average_rating': int_or_none(video.get('rating')),
'comment_count': int_or_none(video.get('numOfComments')),
}
| true
| true
|
1c44c19e674fd96d888109c3073edd9787436023
| 3,193
|
py
|
Python
|
annotation/management/commands/fix_annotation_link_transcripts.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
annotation/management/commands/fix_annotation_link_transcripts.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
annotation/management/commands/fix_annotation_link_transcripts.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python3
import logging
from django.core.management.base import BaseCommand
from django.db.models import Q, Func, Value, F
from annotation.models import VariantAnnotation, VariantTranscriptAnnotation, VariantAnnotationVersion, \
TranscriptVersion, defaultdict
from snpdb.models.models_genome import GenomeBuild
class Command(BaseCommand):
""" This should only need to be run on legacy data, with variant annotations that were run before the transcript
versions it used were inserted. Now we ensure the gene_annotation_release is set so this shouldn't happen """
def handle(self, *args, **options):
for genome_build in GenomeBuild.builds_with_annotation():
for vav in VariantAnnotationVersion.objects.filter(genome_build=genome_build):
self.fix_variant_annotation_version(vav)
@staticmethod
def fix_variant_annotation_version(vav: VariantAnnotationVersion):
print(f"Looking for missing transcripts in {vav}")
t_qs = TranscriptVersion.objects.filter(transcript__annotation_consortium=vav.annotation_consortium,
genome_build=vav.genome_build)
transcript_versions_by_id = defaultdict(dict)
for pk, transcript_id, version in t_qs.values_list("pk", "transcript_id", "version"):
transcript_versions_by_id[transcript_id][version] = pk
for klass in [VariantAnnotation, VariantTranscriptAnnotation]:
missing_qs = klass.objects.filter(Q(transcript__isnull=True) | Q(transcript_version__isnull=True),
version=vav, hgvs_c__isnull=False)
split_func = Func(F("hgvs_c"), Value(":"), Value(1), function="split_part")
num_fixed = 0
records = []
for pk, feature in missing_qs.annotate(feature=split_func).values_list("pk", "feature"):
t_id, version = TranscriptVersion.get_transcript_id_and_version(feature)
transcript_versions = transcript_versions_by_id.get(t_id)
if transcript_versions:
transcript_id = t_id # Know it's valid to link
transcript_version_id = transcript_versions.get(version)
if transcript_version_id is None:
logging.warning(f"Have transcript '{transcript_id}' but no version: '{version}'")
records.append(klass(pk=pk, transcript_id=transcript_id, transcript_version_id=transcript_version_id))
# Need to break up into smaller runs as the big job (even with batch_size) was killed
num_records = len(records)
if num_records >= 2000:
num_fixed += num_records
klass.objects.bulk_update(records, fields=["transcript_id", "transcript_version_id"])
records = []
if records: # Any remaining
num_fixed += len(records)
klass.objects.bulk_update(records, fields=["transcript_id", "transcript_version_id"], batch_size=2000)
print(f"Fixed {num_fixed} {klass} records")
| 54.118644
| 122
| 0.656123
|
import logging
from django.core.management.base import BaseCommand
from django.db.models import Q, Func, Value, F
from annotation.models import VariantAnnotation, VariantTranscriptAnnotation, VariantAnnotationVersion, \
TranscriptVersion, defaultdict
from snpdb.models.models_genome import GenomeBuild
class Command(BaseCommand):
def handle(self, *args, **options):
for genome_build in GenomeBuild.builds_with_annotation():
for vav in VariantAnnotationVersion.objects.filter(genome_build=genome_build):
self.fix_variant_annotation_version(vav)
@staticmethod
def fix_variant_annotation_version(vav: VariantAnnotationVersion):
print(f"Looking for missing transcripts in {vav}")
t_qs = TranscriptVersion.objects.filter(transcript__annotation_consortium=vav.annotation_consortium,
genome_build=vav.genome_build)
transcript_versions_by_id = defaultdict(dict)
for pk, transcript_id, version in t_qs.values_list("pk", "transcript_id", "version"):
transcript_versions_by_id[transcript_id][version] = pk
for klass in [VariantAnnotation, VariantTranscriptAnnotation]:
missing_qs = klass.objects.filter(Q(transcript__isnull=True) | Q(transcript_version__isnull=True),
version=vav, hgvs_c__isnull=False)
split_func = Func(F("hgvs_c"), Value(":"), Value(1), function="split_part")
num_fixed = 0
records = []
for pk, feature in missing_qs.annotate(feature=split_func).values_list("pk", "feature"):
t_id, version = TranscriptVersion.get_transcript_id_and_version(feature)
transcript_versions = transcript_versions_by_id.get(t_id)
if transcript_versions:
transcript_id = t_id
transcript_version_id = transcript_versions.get(version)
if transcript_version_id is None:
logging.warning(f"Have transcript '{transcript_id}' but no version: '{version}'")
records.append(klass(pk=pk, transcript_id=transcript_id, transcript_version_id=transcript_version_id))
# Need to break up into smaller runs as the big job (even with batch_size) was killed
num_records = len(records)
if num_records >= 2000:
num_fixed += num_records
klass.objects.bulk_update(records, fields=["transcript_id", "transcript_version_id"])
records = []
if records: # Any remaining
num_fixed += len(records)
klass.objects.bulk_update(records, fields=["transcript_id", "transcript_version_id"], batch_size=2000)
print(f"Fixed {num_fixed} {klass} records")
| true
| true
|
1c44c2e440428fbf8e99bba9838287b19cd3fed5
| 195
|
py
|
Python
|
bindsnet_master/bindsnet/pipeline/__init__.py
|
Singular-Brain/ProjectBrain
|
2d22d45c13a86825c0dcaf517a59e02f2c4f6164
|
[
"MIT"
] | 6
|
2021-06-01T03:43:35.000Z
|
2022-02-11T10:41:06.000Z
|
bindsnet_master/bindsnet/pipeline/__init__.py
|
Singular-Brain/ProjectBrain
|
2d22d45c13a86825c0dcaf517a59e02f2c4f6164
|
[
"MIT"
] | 1
|
2022-03-31T03:22:14.000Z
|
2022-03-31T03:22:14.000Z
|
bindsnet_master/bindsnet/pipeline/__init__.py
|
Singular-Brain/ProjectBrain
|
2d22d45c13a86825c0dcaf517a59e02f2c4f6164
|
[
"MIT"
] | 3
|
2021-10-30T02:30:40.000Z
|
2021-11-16T04:23:12.000Z
|
from .environment_pipeline import EnvironmentPipeline
from .base_pipeline import BasePipeline
from .dataloader_pipeline import DataLoaderPipeline, TorchVisionDatasetPipeline
from . import action
| 39
| 79
| 0.887179
|
from .environment_pipeline import EnvironmentPipeline
from .base_pipeline import BasePipeline
from .dataloader_pipeline import DataLoaderPipeline, TorchVisionDatasetPipeline
from . import action
| true
| true
|
1c44c32533d248651d10865ab48ee508be5c4361
| 9,815
|
py
|
Python
|
reports/configs/only_logs_dmpnn8_1/other_config.py
|
hengwei-chan/graph_network_demo
|
542f2a59b1b9708abdc718d77db7111f3ba2df96
|
[
"MIT"
] | 1
|
2021-10-18T03:44:53.000Z
|
2021-10-18T03:44:53.000Z
|
reports/configs/only_logs_dmpnn8_1/other_config.py
|
hengwei-chan/graph_network_demo
|
542f2a59b1b9708abdc718d77db7111f3ba2df96
|
[
"MIT"
] | null | null | null |
reports/configs/only_logs_dmpnn8_1/other_config.py
|
hengwei-chan/graph_network_demo
|
542f2a59b1b9708abdc718d77db7111f3ba2df96
|
[
"MIT"
] | 1
|
2022-02-22T08:32:01.000Z
|
2022-02-22T08:32:01.000Z
|
from dataclasses import dataclass, field
from typing import List
import tensorflow as tf
from graph_networks.utilities import *
import logging
import os
ATOM_FEATURE_DIM = DGIN8_ATOM_FEATURE_DIM
EDGE_FEATURE_DIM = DGIN8_EDGE_FEATURE_DIM
@dataclass
class BasicModelConfig:
"""
Config for model1/2/3 run file.
General model parameters
"""
model_name: str = 'only_logs_dmpnn8_1' # without h_w in DGIN gin part - added h_v_0 instead
# whole train/eval split - no more double split within train data set
# random train/test split in get_data_sd - only change overall_seed
# CHANGES dgin3 10.02.2021:
# *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM
# *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'
# CHANGES dgin3 16.02.2021:
# *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM
# *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'
# CHANGES dgin4 16.02.2021:
# *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin
# encoding before logD prediction
# test_frags_dgin4 was added for species inclusion in model2 call()
batch_size: int =15
override_if_exists: bool = True
overall_seed: int = 2
# path to the project folder
project_path:str = "./"
retrain_model: bool = False
retrain_model_name: str = ''
retrain_model_epoch: str = ''
retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch
train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logs/'
test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logs/'
combined_dataset: bool = False
add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logs/'
add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logs/'
test_model: bool = False
test_model_epoch: str = '887'
# define the number or test runs for the CI.
# the mean and std of the RMSE and r^2 of the combined runs are taken as the output.
test_n_times: int = 1
# do you want to test the model with consensus mode?
# if yes, a defined ML model will be included in the consensus predictions during the testing.
consensus: bool = False
# include dropout during testing?
include_dropout: bool = False
test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch
# To save the prediction values for each property set to True
# When this flag is True - the whole test dataset is taken an test_n_times is set to zero!
save_predictions: bool = False
# define the folder where you want to save the predictions.
# For each property, a file is created under the property name ("./logd.txt","./logs.txt","./logp.txt","./others.txt")
test_prediction_output_folder: str = project_path+"reports/predictions/"+model_name+"/"
encode_hidden: bool = False
log_dir: str = project_path+'reports/logs/'+model_name+'.log'
verbosity_level = logging.INFO
model_type: str = 'DMPNN' # added 31.03.2021 to compare models like 'GIN' 'DMPNN' 'DGIN' 'MLP'
plot_dir: str = project_path+'reports/figures/'+model_name+'/'
tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'
config_log_dir: str = project_path+'reports/configs/'+model_name+'/'
model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'
stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'
@dataclass
class DGINConfig:
"""
Config for direcpted-mpnn class.
"""
dropout_aggregate_dmpnn: bool = False
layernorm_aggregate_dmpnn: bool = True
dropout_passing_dmpnn: bool = False
layernorm_passing_dmpnn: bool = True
dropout_aggregate_gin: bool = False
layernorm_aggregate_gin: bool = True
dropout_passing_gin: bool = False
layernorm_passing_gin: bool = True
gin_aggregate_bias: bool = False
dmpnn_passing_bias: bool = False
init_bias: bool = False
massge_iteration_dmpnn: int = 4
message_iterations_gin: int = 4
dropout_rate: float = 0.15
input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)
passing_hidden_size: int = 56 # this can be changed
input_size_gin: int = (ATOM_FEATURE_DIM) # changed 31.03.2021
return_hv: bool = True # model3 parameter
@dataclass
class Model1Config:
"""
Config model1 class - no subclass configs are defined here.
"""
validation_split: float = 0.90
learning_rate: float = 0.004
clip_rate: float = 0.6
optimizer = tf.keras.optimizers.Adam(learning_rate)
lipo_loss_mse = tf.keras.losses.mse
lipo_loss_mae = tf.keras.losses.mae
logP_loss_mse = tf.keras.losses.mse
logS_loss_mse = tf.keras.losses.mse
other_loss_mse = tf.keras.losses.mse
mw_loss_mse = tf.keras.losses.mse
metric = tf.keras.losses.mae
epochs: int = 1600
# define the number of epochs for each test run.
save_after_epoch: int = 3
# dropout rate for the general model - mainly the MLP for the different log predictions
dropout_rate: float = 0.15 # the overall dropout rate of the readout functions
# the seed to shuffle the training/validation dataset; For the same dataset, even when
# combined_dataset is True, it is the same training/valiation instances
train_data_seed: int = 0
dropout_rate: float = 0.15 # the overall dropout rate of the readout functions
train_data_seed: int = 0
hidden_readout_1: int = 32
hidden_readout_2: int = 14
activation_func_readout = tf.nn.relu
include_logD: bool = False
include_logS: bool = True
include_logP: bool = False
include_other: bool = False
include_mw: bool = False
include_rot_bond: bool = False
include_HBA: bool = False
include_HBD: bool = False
# define the starting threshold for the RMSE of the model. When the comnbined RMSE
# is below this threshold, the model weights are being safed and a new threshold
# is set. It only serves as a starting threshold so that not too many models
# are being safed. Depends on how many log endpoints are being taken into
# consideration - as three endpoints have a higher combined RMSE as only one
# endpoint.
best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/
# define the individual thresholds. If one model is better, the corresponding
# model weights are being saved.
best_evaluation_threshold_logd: float = 1.85
best_evaluation_threshold_logp: float = 1.65
best_evaluation_threshold_logs: float = 2.15
best_evaluation_threshold_other: float = 2.15
# 2.45 for all_logs
# 0.70 logP
# 0.75 logD
# 1.00 logS
# 1.75 logSD
# 1.70 logSP
# 1.45 logDP
include_fragment_conv: bool = False # was introduced on the 4.12.2020
use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss
shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)
add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction
@dataclass
class FrACConfig:
"""
Config fragment aggregation class - no subclass configs are defined here.
"""
input_size_gin: int = 28
layernorm_aggregate: bool = True
reduce_mean: bool = True # when false -> reduce_sum
@dataclass
class MLConfig:
"""
Configs for the ML algorithm
"""
# which algorithm do you want to use for the consensus?
# possibilities are: "SVM", "RF", "KNN" or "LR" - all are regression models!
# SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;
algorithm: str = "SVM"
# which fingerprint to use - possibilities are: "ECFP" or "MACCS"
fp_types: str = "ECFP"
# If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048!
n_bits: int = 2048
# If "ECFP" fingerprint is used, define the radius
radius: int = 4
# define if descriptors should be included into the non-GNN molecular representation
include_descriptors: bool = True
# define if the descriptors should be standardizedby scaling and centering (Sklearn)
standardize: bool = True
@dataclass
class Config():
"""
Overall config class for model2 and run file.
Includes all submodels config
"""
basic_model_config: BasicModelConfig
model1_config: Model1Config
d_gin_config: DGINConfig
frag_acc_config: FrACConfig
ml_config: MLConfig
model: str = 'model11'
| 44.013453
| 169
| 0.669791
|
from dataclasses import dataclass, field
from typing import List
import tensorflow as tf
from graph_networks.utilities import *
import logging
import os
ATOM_FEATURE_DIM = DGIN8_ATOM_FEATURE_DIM
EDGE_FEATURE_DIM = DGIN8_EDGE_FEATURE_DIM
@dataclass
class BasicModelConfig:
model_name: str = 'only_logs_dmpnn8_1'
batch_size: int =15
override_if_exists: bool = True
overall_seed: int = 2
project_path:str = "./"
retrain_model: bool = False
retrain_model_name: str = ''
retrain_model_epoch: str = ''
retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch
train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logs/'
test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logs/'
combined_dataset: bool = False
add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin8_logs/'
add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin8_logs/'
test_model: bool = False
test_model_epoch: str = '887'
test_n_times: int = 1
consensus: bool = False
include_dropout: bool = False
test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch
save_predictions: bool = False
test_prediction_output_folder: str = project_path+"reports/predictions/"+model_name+"/"
encode_hidden: bool = False
log_dir: str = project_path+'reports/logs/'+model_name+'.log'
verbosity_level = logging.INFO
model_type: str = 'DMPNN'
plot_dir: str = project_path+'reports/figures/'+model_name+'/'
tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'
config_log_dir: str = project_path+'reports/configs/'+model_name+'/'
model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'
stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'
@dataclass
class DGINConfig:
dropout_aggregate_dmpnn: bool = False
layernorm_aggregate_dmpnn: bool = True
dropout_passing_dmpnn: bool = False
layernorm_passing_dmpnn: bool = True
dropout_aggregate_gin: bool = False
layernorm_aggregate_gin: bool = True
dropout_passing_gin: bool = False
layernorm_passing_gin: bool = True
gin_aggregate_bias: bool = False
dmpnn_passing_bias: bool = False
init_bias: bool = False
massge_iteration_dmpnn: int = 4
message_iterations_gin: int = 4
dropout_rate: float = 0.15
input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM)
passing_hidden_size: int = 56
input_size_gin: int = (ATOM_FEATURE_DIM)
return_hv: bool = True
@dataclass
class Model1Config:
validation_split: float = 0.90
learning_rate: float = 0.004
clip_rate: float = 0.6
optimizer = tf.keras.optimizers.Adam(learning_rate)
lipo_loss_mse = tf.keras.losses.mse
lipo_loss_mae = tf.keras.losses.mae
logP_loss_mse = tf.keras.losses.mse
logS_loss_mse = tf.keras.losses.mse
other_loss_mse = tf.keras.losses.mse
mw_loss_mse = tf.keras.losses.mse
metric = tf.keras.losses.mae
epochs: int = 1600
save_after_epoch: int = 3
dropout_rate: float = 0.15
train_data_seed: int = 0
dropout_rate: float = 0.15
train_data_seed: int = 0
hidden_readout_1: int = 32
hidden_readout_2: int = 14
activation_func_readout = tf.nn.relu
include_logD: bool = False
include_logS: bool = True
include_logP: bool = False
include_other: bool = False
include_mw: bool = False
include_rot_bond: bool = False
include_HBA: bool = False
include_HBD: bool = False
best_evaluation_threshold: float = 2.45
best_evaluation_threshold_logd: float = 1.85
best_evaluation_threshold_logp: float = 1.65
best_evaluation_threshold_logs: float = 2.15
best_evaluation_threshold_other: float = 2.15
include_fragment_conv: bool = False
use_rmse: bool = True
shuffle_inside: bool = True
add_species: bool = False
@dataclass
class FrACConfig:
input_size_gin: int = 28
layernorm_aggregate: bool = True
reduce_mean: bool = True
@dataclass
class MLConfig:
algorithm: str = "SVM"
fp_types: str = "ECFP"
n_bits: int = 2048
radius: int = 4
include_descriptors: bool = True
standardize: bool = True
@dataclass
class Config():
basic_model_config: BasicModelConfig
model1_config: Model1Config
d_gin_config: DGINConfig
frag_acc_config: FrACConfig
ml_config: MLConfig
model: str = 'model11'
| true
| true
|
1c44c44a7fd2e2d21d12c9a07e5e73afb826cc4f
| 351
|
py
|
Python
|
app/__init__.py
|
demuk/Spare-Manager
|
670cac9a58e66cee58cd2ad3f6062d982c214903
|
[
"CC0-1.0"
] | 1
|
2021-08-25T12:13:15.000Z
|
2021-08-25T12:13:15.000Z
|
app/__init__.py
|
demuk/Spare-Manager
|
670cac9a58e66cee58cd2ad3f6062d982c214903
|
[
"CC0-1.0"
] | null | null | null |
app/__init__.py
|
demuk/Spare-Manager
|
670cac9a58e66cee58cd2ad3f6062d982c214903
|
[
"CC0-1.0"
] | null | null | null |
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
from app import routes, models
| 19.5
| 39
| 0.803419
|
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
from app import routes, models
| true
| true
|
1c44c46aec892b9cb0523ce25ff5abdb3d142a87
| 256
|
py
|
Python
|
film/urls.py
|
mamalmaleki/kolbe_cl
|
0daf1ab55562b1f71a232be76c9e7609e8255e9a
|
[
"MIT"
] | 1
|
2020-01-02T05:51:11.000Z
|
2020-01-02T05:51:11.000Z
|
film/urls.py
|
mamalmaleki/kolbe
|
0daf1ab55562b1f71a232be76c9e7609e8255e9a
|
[
"MIT"
] | 2
|
2021-03-30T12:38:16.000Z
|
2021-09-22T18:30:59.000Z
|
film/urls.py
|
mamalmaleki/kolbe
|
0daf1ab55562b1f71a232be76c9e7609e8255e9a
|
[
"MIT"
] | 1
|
2020-12-01T09:47:12.000Z
|
2020-12-01T09:47:12.000Z
|
from django.urls import path
from film import views
app_name = 'front-film'
urlpatterns = [
path('call', views.call, name='call'),
path('omdbapi-search', views.omdbapi_search, name='omdbapi-search'),
path('', views.film_list, name='list'),
]
| 23.272727
| 72
| 0.683594
|
from django.urls import path
from film import views
app_name = 'front-film'
urlpatterns = [
path('call', views.call, name='call'),
path('omdbapi-search', views.omdbapi_search, name='omdbapi-search'),
path('', views.film_list, name='list'),
]
| true
| true
|
1c44c4774817045f4ce1c5be7b351739522ce15c
| 12,993
|
py
|
Python
|
metadata-ingestion/src/datahub/cli/docker.py
|
naboudieng/datahub
|
1a5121a5aeb3940960e9994362860d4130b840f2
|
[
"Apache-2.0"
] | null | null | null |
metadata-ingestion/src/datahub/cli/docker.py
|
naboudieng/datahub
|
1a5121a5aeb3940960e9994362860d4130b840f2
|
[
"Apache-2.0"
] | null | null | null |
metadata-ingestion/src/datahub/cli/docker.py
|
naboudieng/datahub
|
1a5121a5aeb3940960e9994362860d4130b840f2
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import itertools
import logging
import os
import pathlib
import platform
import subprocess
import sys
import tempfile
import time
from typing import List, NoReturn, Optional
import click
import requests
from datahub.cli.docker_check import (
check_local_docker_containers,
get_client_with_error,
)
from datahub.ingestion.run.pipeline import Pipeline
from datahub.telemetry import telemetry
logger = logging.getLogger(__name__)
NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose.quickstart.yml"
)
ELASTIC_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose-without-neo4j.quickstart.yml"
)
M1_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose-without-neo4j-m1.quickstart.yml"
)
BOOTSTRAP_MCES_FILE = "metadata-ingestion/examples/mce_files/bootstrap_mce.json"
GITHUB_BASE_URL = "https://raw.githubusercontent.com/linkedin/datahub/master"
GITHUB_NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_URL = (
f"{GITHUB_BASE_URL}/{NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_FILE}"
)
GITHUB_ELASTIC_QUICKSTART_COMPOSE_URL = (
f"{GITHUB_BASE_URL}/{ELASTIC_QUICKSTART_COMPOSE_FILE}"
)
GITHUB_M1_QUICKSTART_COMPOSE_URL = f"{GITHUB_BASE_URL}/{M1_QUICKSTART_COMPOSE_FILE}"
GITHUB_BOOTSTRAP_MCES_URL = f"{GITHUB_BASE_URL}/{BOOTSTRAP_MCES_FILE}"
@click.group()
def docker() -> None:
"""Helper commands for setting up and interacting with a local
DataHub instance using Docker."""
pass
def _print_issue_list_and_exit(
issues: List[str], header: str, footer: Optional[str] = None
) -> NoReturn:
click.secho(header, fg="bright_red")
for issue in issues:
click.echo(f"- {issue}")
if footer:
click.echo()
click.echo(footer)
sys.exit(1)
def docker_check_impl() -> None:
issues = check_local_docker_containers()
if not issues:
click.secho("✔ No issues detected", fg="green")
else:
_print_issue_list_and_exit(issues, "The following issues were detected:")
@docker.command()
@telemetry.with_telemetry
def check() -> None:
"""Check that the Docker containers are healthy"""
docker_check_impl()
def is_m1() -> bool:
"""Check whether we are running on an M1 machine"""
try:
return (
platform.uname().machine == "arm64" and platform.uname().system == "Darwin"
)
except Exception:
# Catch-all
return False
def should_use_neo4j_for_graph_service(graph_service_override: Optional[str]) -> bool:
if graph_service_override is not None:
if graph_service_override == "elasticsearch":
click.echo("Starting with elasticsearch due to graph-service-impl param\n")
return False
if graph_service_override == "neo4j":
click.echo("Starting with neo4j due to graph-service-impl param\n")
return True
else:
click.secho(
graph_service_override
+ " is not a valid graph service option. Choose either `neo4j` or "
"`elasticsearch`\n",
fg="red",
)
raise ValueError(f"invalid graph service option: {graph_service_override}")
with get_client_with_error() as (client, error):
if error:
click.secho(
"Docker doesn't seem to be running. Did you start it?", fg="red"
)
raise error
if len(client.volumes.list(filters={"name": "datahub_neo4jdata"})) > 0:
click.echo(
"Datahub Neo4j volume found, starting with neo4j as graph service.\n"
"If you want to run using elastic, run `datahub docker nuke` and re-ingest your data.\n"
)
return True
click.echo(
"No Datahub Neo4j volume found, starting with elasticsearch as graph service.\n"
"To use neo4j as a graph backend, run \n"
"`datahub docker quickstart --quickstart-compose-file ./docker/quickstart/docker-compose.quickstart.yml`"
"\nfrom the root of the datahub repo\n"
)
return False
@docker.command()
@click.option(
"--version",
type=str,
default="head",
help="Datahub version to be deployed. If not set, deploy latest",
)
@click.option(
"--build-locally",
type=bool,
is_flag=True,
default=False,
help="Attempt to build the containers locally before starting",
)
@click.option(
"--quickstart-compose-file",
type=click.Path(exists=True, dir_okay=False, readable=True),
default=[],
multiple=True,
help="Use a local docker-compose file instead of pulling from GitHub",
)
@click.option(
"--dump-logs-on-failure",
type=bool,
is_flag=True,
default=False,
help="If true, the docker-compose logs will be printed to console if something fails",
)
@click.option(
"--graph-service-impl",
type=str,
is_flag=False,
default=None,
help="If set, forces docker-compose to use that graph service implementation",
)
@telemetry.with_telemetry
def quickstart(
version: str,
build_locally: bool,
quickstart_compose_file: List[pathlib.Path],
dump_logs_on_failure: bool,
graph_service_impl: Optional[str],
) -> None:
"""Start an instance of DataHub locally using docker-compose.
This command will automatically download the latest docker-compose configuration
from GitHub, pull the latest images, and bring up the DataHub system.
There are options to override the docker-compose config file, build the containers
locally, and dump logs to the console or to a file if something goes wrong.
"""
running_on_m1 = is_m1()
if running_on_m1:
click.echo("Detected M1 machine")
# Run pre-flight checks.
issues = check_local_docker_containers(preflight_only=True)
if issues:
_print_issue_list_and_exit(issues, "Unable to run quickstart:")
quickstart_compose_file = list(
quickstart_compose_file
) # convert to list from tuple
if not quickstart_compose_file:
should_use_neo4j = should_use_neo4j_for_graph_service(graph_service_impl)
if should_use_neo4j and running_on_m1:
click.secho(
"Running with neo4j on M1 is not currently supported, will be using elasticsearch as graph",
fg="red",
)
github_file = (
GITHUB_NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_URL
if should_use_neo4j and not running_on_m1
else GITHUB_ELASTIC_QUICKSTART_COMPOSE_URL
if not running_on_m1
else GITHUB_M1_QUICKSTART_COMPOSE_URL
)
with tempfile.NamedTemporaryFile(suffix=".yml", delete=False) as tmp_file:
path = pathlib.Path(tmp_file.name)
quickstart_compose_file.append(path)
click.echo(f"Fetching docker-compose file {github_file} from GitHub")
# Download the quickstart docker-compose file from GitHub.
quickstart_download_response = requests.get(github_file)
quickstart_download_response.raise_for_status()
tmp_file.write(quickstart_download_response.content)
logger.debug(f"Copied to {path}")
# set version
os.environ["DATAHUB_VERSION"] = version
base_command: List[str] = [
"docker-compose",
*itertools.chain.from_iterable(
("-f", f"{path}") for path in quickstart_compose_file
),
"-p",
"datahub",
]
# Pull and possibly build the latest containers.
subprocess.run(
[
*base_command,
"pull",
],
check=True,
)
if build_locally:
subprocess.run(
[
*base_command,
"build",
"--pull",
],
check=True,
env={
**os.environ,
"DOCKER_BUILDKIT": "1",
},
)
# Start it up! (with retries)
max_wait_time = datetime.timedelta(minutes=6)
start_time = datetime.datetime.now()
sleep_interval = datetime.timedelta(seconds=2)
up_interval = datetime.timedelta(seconds=30)
up_attempts = 0
while (datetime.datetime.now() - start_time) < max_wait_time:
# Attempt to run docker-compose up every minute.
if (datetime.datetime.now() - start_time) > up_attempts * up_interval:
click.echo()
subprocess.run(base_command + ["up", "-d", "--remove-orphans"])
up_attempts += 1
# Check docker health every few seconds.
issues = check_local_docker_containers()
if not issues:
break
# Wait until next iteration.
click.echo(".", nl=False)
time.sleep(sleep_interval.total_seconds())
else:
# Falls through if the while loop doesn't exit via break.
click.echo()
with tempfile.NamedTemporaryFile(suffix=".log", delete=False) as log_file:
ret = subprocess.run(
base_command + ["logs"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
log_file.write(ret.stdout)
if dump_logs_on_failure:
with open(log_file.name, "r") as logs:
click.echo("Dumping docker-compose logs:")
click.echo(logs.read())
click.echo()
_print_issue_list_and_exit(
issues,
header="Unable to run quickstart - the following issues were detected:",
footer="If you think something went wrong, please file an issue at https://github.com/linkedin/datahub/issues\n"
"or send a message in our Slack https://slack.datahubproject.io/\n"
f"Be sure to attach the logs from {log_file.name}",
)
# Handle success condition.
click.echo()
click.secho("✔ DataHub is now running", fg="green")
click.secho(
"Ingest some demo data using `datahub docker ingest-sample-data`,\n"
"or head to http://localhost:9002 (username: datahub, password: datahub) to play around with the frontend.",
fg="green",
)
click.secho(
"Need support? Get in touch on Slack: https://slack.datahubproject.io/",
fg="magenta",
)
@docker.command()
@click.option(
"--path",
type=click.Path(exists=True, dir_okay=False),
help=f"The MCE json file to ingest. Defaults to downloading {BOOTSTRAP_MCES_FILE} from GitHub",
)
@telemetry.with_telemetry
def ingest_sample_data(path: Optional[str]) -> None:
"""Ingest sample data into a running DataHub instance."""
if path is None:
click.echo("Downloading sample data...")
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as tmp_file:
path = str(pathlib.Path(tmp_file.name))
# Download the bootstrap MCE file from GitHub.
mce_json_download_response = requests.get(GITHUB_BOOTSTRAP_MCES_URL)
mce_json_download_response.raise_for_status()
tmp_file.write(mce_json_download_response.content)
click.echo(f"Downloaded to {path}")
# Verify that docker is up.
issues = check_local_docker_containers()
if issues:
_print_issue_list_and_exit(
issues,
header="Docker is not ready:",
footer="Try running `datahub docker quickstart` first",
)
# Run ingestion.
click.echo("Starting ingestion...")
pipeline = Pipeline.create(
{
"source": {
"type": "file",
"config": {
"filename": path,
},
},
"sink": {
"type": "datahub-rest",
"config": {"server": "http://localhost:8080"},
},
}
)
pipeline.run()
ret = pipeline.pretty_print_summary()
sys.exit(ret)
@docker.command()
@telemetry.with_telemetry
def nuke() -> None:
"""Remove all Docker containers, networks, and volumes associated with DataHub."""
with get_client_with_error() as (client, error):
if error:
click.secho(
"Docker doesn't seem to be running. Did you start it?", fg="red"
)
return
click.echo("Removing containers in the datahub project")
for container in client.containers.list(
all=True, filters={"label": "com.docker.compose.project=datahub"}
):
container.remove(v=True, force=True)
click.echo("Removing volumes in the datahub project")
for volume in client.volumes.list(
filters={"label": "com.docker.compose.project=datahub"}
):
volume.remove(force=True)
click.echo("Removing networks in the datahub project")
for network in client.networks.list(
filters={"label": "com.docker.compose.project=datahub"}
):
network.remove()
| 32.893671
| 124
| 0.633572
|
import datetime
import itertools
import logging
import os
import pathlib
import platform
import subprocess
import sys
import tempfile
import time
from typing import List, NoReturn, Optional
import click
import requests
from datahub.cli.docker_check import (
check_local_docker_containers,
get_client_with_error,
)
from datahub.ingestion.run.pipeline import Pipeline
from datahub.telemetry import telemetry
logger = logging.getLogger(__name__)
NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose.quickstart.yml"
)
ELASTIC_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose-without-neo4j.quickstart.yml"
)
M1_QUICKSTART_COMPOSE_FILE = (
"docker/quickstart/docker-compose-without-neo4j-m1.quickstart.yml"
)
BOOTSTRAP_MCES_FILE = "metadata-ingestion/examples/mce_files/bootstrap_mce.json"
GITHUB_BASE_URL = "https://raw.githubusercontent.com/linkedin/datahub/master"
GITHUB_NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_URL = (
f"{GITHUB_BASE_URL}/{NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_FILE}"
)
GITHUB_ELASTIC_QUICKSTART_COMPOSE_URL = (
f"{GITHUB_BASE_URL}/{ELASTIC_QUICKSTART_COMPOSE_FILE}"
)
GITHUB_M1_QUICKSTART_COMPOSE_URL = f"{GITHUB_BASE_URL}/{M1_QUICKSTART_COMPOSE_FILE}"
GITHUB_BOOTSTRAP_MCES_URL = f"{GITHUB_BASE_URL}/{BOOTSTRAP_MCES_FILE}"
@click.group()
def docker() -> None:
pass
def _print_issue_list_and_exit(
issues: List[str], header: str, footer: Optional[str] = None
) -> NoReturn:
click.secho(header, fg="bright_red")
for issue in issues:
click.echo(f"- {issue}")
if footer:
click.echo()
click.echo(footer)
sys.exit(1)
def docker_check_impl() -> None:
issues = check_local_docker_containers()
if not issues:
click.secho("✔ No issues detected", fg="green")
else:
_print_issue_list_and_exit(issues, "The following issues were detected:")
@docker.command()
@telemetry.with_telemetry
def check() -> None:
docker_check_impl()
def is_m1() -> bool:
try:
return (
platform.uname().machine == "arm64" and platform.uname().system == "Darwin"
)
except Exception:
return False
def should_use_neo4j_for_graph_service(graph_service_override: Optional[str]) -> bool:
if graph_service_override is not None:
if graph_service_override == "elasticsearch":
click.echo("Starting with elasticsearch due to graph-service-impl param\n")
return False
if graph_service_override == "neo4j":
click.echo("Starting with neo4j due to graph-service-impl param\n")
return True
else:
click.secho(
graph_service_override
+ " is not a valid graph service option. Choose either `neo4j` or "
"`elasticsearch`\n",
fg="red",
)
raise ValueError(f"invalid graph service option: {graph_service_override}")
with get_client_with_error() as (client, error):
if error:
click.secho(
"Docker doesn't seem to be running. Did you start it?", fg="red"
)
raise error
if len(client.volumes.list(filters={"name": "datahub_neo4jdata"})) > 0:
click.echo(
"Datahub Neo4j volume found, starting with neo4j as graph service.\n"
"If you want to run using elastic, run `datahub docker nuke` and re-ingest your data.\n"
)
return True
click.echo(
"No Datahub Neo4j volume found, starting with elasticsearch as graph service.\n"
"To use neo4j as a graph backend, run \n"
"`datahub docker quickstart --quickstart-compose-file ./docker/quickstart/docker-compose.quickstart.yml`"
"\nfrom the root of the datahub repo\n"
)
return False
@docker.command()
@click.option(
"--version",
type=str,
default="head",
help="Datahub version to be deployed. If not set, deploy latest",
)
@click.option(
"--build-locally",
type=bool,
is_flag=True,
default=False,
help="Attempt to build the containers locally before starting",
)
@click.option(
"--quickstart-compose-file",
type=click.Path(exists=True, dir_okay=False, readable=True),
default=[],
multiple=True,
help="Use a local docker-compose file instead of pulling from GitHub",
)
@click.option(
"--dump-logs-on-failure",
type=bool,
is_flag=True,
default=False,
help="If true, the docker-compose logs will be printed to console if something fails",
)
@click.option(
"--graph-service-impl",
type=str,
is_flag=False,
default=None,
help="If set, forces docker-compose to use that graph service implementation",
)
@telemetry.with_telemetry
def quickstart(
version: str,
build_locally: bool,
quickstart_compose_file: List[pathlib.Path],
dump_logs_on_failure: bool,
graph_service_impl: Optional[str],
) -> None:
running_on_m1 = is_m1()
if running_on_m1:
click.echo("Detected M1 machine")
# Run pre-flight checks.
issues = check_local_docker_containers(preflight_only=True)
if issues:
_print_issue_list_and_exit(issues, "Unable to run quickstart:")
quickstart_compose_file = list(
quickstart_compose_file
) # convert to list from tuple
if not quickstart_compose_file:
should_use_neo4j = should_use_neo4j_for_graph_service(graph_service_impl)
if should_use_neo4j and running_on_m1:
click.secho(
"Running with neo4j on M1 is not currently supported, will be using elasticsearch as graph",
fg="red",
)
github_file = (
GITHUB_NEO4J_AND_ELASTIC_QUICKSTART_COMPOSE_URL
if should_use_neo4j and not running_on_m1
else GITHUB_ELASTIC_QUICKSTART_COMPOSE_URL
if not running_on_m1
else GITHUB_M1_QUICKSTART_COMPOSE_URL
)
with tempfile.NamedTemporaryFile(suffix=".yml", delete=False) as tmp_file:
path = pathlib.Path(tmp_file.name)
quickstart_compose_file.append(path)
click.echo(f"Fetching docker-compose file {github_file} from GitHub")
# Download the quickstart docker-compose file from GitHub.
quickstart_download_response = requests.get(github_file)
quickstart_download_response.raise_for_status()
tmp_file.write(quickstart_download_response.content)
logger.debug(f"Copied to {path}")
# set version
os.environ["DATAHUB_VERSION"] = version
base_command: List[str] = [
"docker-compose",
*itertools.chain.from_iterable(
("-f", f"{path}") for path in quickstart_compose_file
),
"-p",
"datahub",
]
# Pull and possibly build the latest containers.
subprocess.run(
[
*base_command,
"pull",
],
check=True,
)
if build_locally:
subprocess.run(
[
*base_command,
"build",
"--pull",
],
check=True,
env={
**os.environ,
"DOCKER_BUILDKIT": "1",
},
)
# Start it up! (with retries)
max_wait_time = datetime.timedelta(minutes=6)
start_time = datetime.datetime.now()
sleep_interval = datetime.timedelta(seconds=2)
up_interval = datetime.timedelta(seconds=30)
up_attempts = 0
while (datetime.datetime.now() - start_time) < max_wait_time:
# Attempt to run docker-compose up every minute.
if (datetime.datetime.now() - start_time) > up_attempts * up_interval:
click.echo()
subprocess.run(base_command + ["up", "-d", "--remove-orphans"])
up_attempts += 1
# Check docker health every few seconds.
issues = check_local_docker_containers()
if not issues:
break
# Wait until next iteration.
click.echo(".", nl=False)
time.sleep(sleep_interval.total_seconds())
else:
# Falls through if the while loop doesn't exit via break.
click.echo()
with tempfile.NamedTemporaryFile(suffix=".log", delete=False) as log_file:
ret = subprocess.run(
base_command + ["logs"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
log_file.write(ret.stdout)
if dump_logs_on_failure:
with open(log_file.name, "r") as logs:
click.echo("Dumping docker-compose logs:")
click.echo(logs.read())
click.echo()
_print_issue_list_and_exit(
issues,
header="Unable to run quickstart - the following issues were detected:",
footer="If you think something went wrong, please file an issue at https://github.com/linkedin/datahub/issues\n"
"or send a message in our Slack https://slack.datahubproject.io/\n"
f"Be sure to attach the logs from {log_file.name}",
)
click.echo()
click.secho("✔ DataHub is now running", fg="green")
click.secho(
"Ingest some demo data using `datahub docker ingest-sample-data`,\n"
"or head to http://localhost:9002 (username: datahub, password: datahub) to play around with the frontend.",
fg="green",
)
click.secho(
"Need support? Get in touch on Slack: https://slack.datahubproject.io/",
fg="magenta",
)
@docker.command()
@click.option(
"--path",
type=click.Path(exists=True, dir_okay=False),
help=f"The MCE json file to ingest. Defaults to downloading {BOOTSTRAP_MCES_FILE} from GitHub",
)
@telemetry.with_telemetry
def ingest_sample_data(path: Optional[str]) -> None:
if path is None:
click.echo("Downloading sample data...")
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as tmp_file:
path = str(pathlib.Path(tmp_file.name))
mce_json_download_response = requests.get(GITHUB_BOOTSTRAP_MCES_URL)
mce_json_download_response.raise_for_status()
tmp_file.write(mce_json_download_response.content)
click.echo(f"Downloaded to {path}")
issues = check_local_docker_containers()
if issues:
_print_issue_list_and_exit(
issues,
header="Docker is not ready:",
footer="Try running `datahub docker quickstart` first",
)
click.echo("Starting ingestion...")
pipeline = Pipeline.create(
{
"source": {
"type": "file",
"config": {
"filename": path,
},
},
"sink": {
"type": "datahub-rest",
"config": {"server": "http://localhost:8080"},
},
}
)
pipeline.run()
ret = pipeline.pretty_print_summary()
sys.exit(ret)
@docker.command()
@telemetry.with_telemetry
def nuke() -> None:
with get_client_with_error() as (client, error):
if error:
click.secho(
"Docker doesn't seem to be running. Did you start it?", fg="red"
)
return
click.echo("Removing containers in the datahub project")
for container in client.containers.list(
all=True, filters={"label": "com.docker.compose.project=datahub"}
):
container.remove(v=True, force=True)
click.echo("Removing volumes in the datahub project")
for volume in client.volumes.list(
filters={"label": "com.docker.compose.project=datahub"}
):
volume.remove(force=True)
click.echo("Removing networks in the datahub project")
for network in client.networks.list(
filters={"label": "com.docker.compose.project=datahub"}
):
network.remove()
| true
| true
|
1c44c8b72eadfbead01d993a5e1364203d654373
| 2,784
|
py
|
Python
|
DisFormers/disformers.py
|
spacedev-official/disformers
|
31800466741be5ddcdfb531e021638f6ee112e23
|
[
"Apache-2.0"
] | null | null | null |
DisFormers/disformers.py
|
spacedev-official/disformers
|
31800466741be5ddcdfb531e021638f6ee112e23
|
[
"Apache-2.0"
] | 14
|
2021-11-01T08:23:06.000Z
|
2022-03-31T08:32:24.000Z
|
DisFormers/disformers.py
|
spacedev-official/disformers
|
31800466741be5ddcdfb531e021638f6ee112e23
|
[
"Apache-2.0"
] | null | null | null |
"""
Adapted from:
https://www.machinecurve.com/index.php/2021/03/16/easy-chatbot-with-dialogpt-machine-learning-and-huggingface-transformers/
"""
import asyncio
from typing import Union
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
from discord import (
Message,
Client
)
from discord.ext import commands
from aioify import aioify
class DisFormersBot:
def __init__(
self,
client: Union[commands.Bot,Client],
prefix: str,
languague: str = "en",
):
if languague == "en":
model_name = 'microsoft/DialoGPT-small'
if not os.path.exists('./models/dialogpt'):
AutoModelForCausalLM.from_pretrained(model_name).save_pretrained('./models/dialogpt')
AutoTokenizer.from_pretrained(model_name).save_pretrained('./models/dialogpt')
if languague == "ko":
model_name = 'byeongal/Ko-DialoGPT'
if not os.path.exists('./models/dialogpt'):
AutoModelForCausalLM.from_pretrained(model_name).save_pretrained('./models/dialogpt')
AutoTokenizer.from_pretrained(model_name).save_pretrained('./models/dialogpt')
super().__init__()
self.model = AutoModelForCausalLM.from_pretrained('./models/dialogpt')
self.tokenizer = AutoTokenizer.from_pretrained('./models/dialogpt')
self.bot = client
self.prefix = prefix
if type(self.bot) == commands.Bot:
self.bot.add_listener(self.__hendle_messages, "on_message")
async def chat(self, inputs: str) -> str:
inputs_tokenized = self.tokenizer.encode(inputs+ self.tokenizer.eos_token, return_tensors='pt')
reply_ids = self.model.generate(inputs_tokenized, max_length=1250, pad_token_id=self.tokenizer.eos_token_id)
return self.tokenizer.decode(
reply_ids[:, inputs_tokenized.shape[-1] :][0], skip_special_tokens=True
)
async def __hendle_messages(self, message: Message):
if message.author.bot:
return
if message.content.startswith(self.prefix):
async with message.channel.typing():
user_input = message.content[len(self.prefix):]
chat_ = aioify(obj=self.chat)
res = await chat_(inputs=user_input)
return await message.reply(content=res)
async def client_message(self,message:Message):
if message.author.bot:
return
if message.content.startswith(self.prefix):
async with message.channel.typing():
user_input = message.content[len(self.prefix):]
chat_ = aioify(obj=self.chat)
res = await chat_(user_input)
return await message.reply(content=res)
| 40.941176
| 123
| 0.650503
|
import asyncio
from typing import Union
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
from discord import (
Message,
Client
)
from discord.ext import commands
from aioify import aioify
class DisFormersBot:
def __init__(
self,
client: Union[commands.Bot,Client],
prefix: str,
languague: str = "en",
):
if languague == "en":
model_name = 'microsoft/DialoGPT-small'
if not os.path.exists('./models/dialogpt'):
AutoModelForCausalLM.from_pretrained(model_name).save_pretrained('./models/dialogpt')
AutoTokenizer.from_pretrained(model_name).save_pretrained('./models/dialogpt')
if languague == "ko":
model_name = 'byeongal/Ko-DialoGPT'
if not os.path.exists('./models/dialogpt'):
AutoModelForCausalLM.from_pretrained(model_name).save_pretrained('./models/dialogpt')
AutoTokenizer.from_pretrained(model_name).save_pretrained('./models/dialogpt')
super().__init__()
self.model = AutoModelForCausalLM.from_pretrained('./models/dialogpt')
self.tokenizer = AutoTokenizer.from_pretrained('./models/dialogpt')
self.bot = client
self.prefix = prefix
if type(self.bot) == commands.Bot:
self.bot.add_listener(self.__hendle_messages, "on_message")
async def chat(self, inputs: str) -> str:
inputs_tokenized = self.tokenizer.encode(inputs+ self.tokenizer.eos_token, return_tensors='pt')
reply_ids = self.model.generate(inputs_tokenized, max_length=1250, pad_token_id=self.tokenizer.eos_token_id)
return self.tokenizer.decode(
reply_ids[:, inputs_tokenized.shape[-1] :][0], skip_special_tokens=True
)
async def __hendle_messages(self, message: Message):
if message.author.bot:
return
if message.content.startswith(self.prefix):
async with message.channel.typing():
user_input = message.content[len(self.prefix):]
chat_ = aioify(obj=self.chat)
res = await chat_(inputs=user_input)
return await message.reply(content=res)
async def client_message(self,message:Message):
if message.author.bot:
return
if message.content.startswith(self.prefix):
async with message.channel.typing():
user_input = message.content[len(self.prefix):]
chat_ = aioify(obj=self.chat)
res = await chat_(user_input)
return await message.reply(content=res)
| true
| true
|
1c44c9bafb9eb7bf07b6cf5ae9faa8f6dfb48e92
| 64
|
py
|
Python
|
src/http_server/__meta__.py
|
explodingnuggets/HTTP-Server
|
adb829f14ea22f3791e90e093d0b2a1a13e80738
|
[
"MIT"
] | null | null | null |
src/http_server/__meta__.py
|
explodingnuggets/HTTP-Server
|
adb829f14ea22f3791e90e093d0b2a1a13e80738
|
[
"MIT"
] | null | null | null |
src/http_server/__meta__.py
|
explodingnuggets/HTTP-Server
|
adb829f14ea22f3791e90e093d0b2a1a13e80738
|
[
"MIT"
] | null | null | null |
__author__ = 'Matheus Bortoleto da Silva'
__version__ = '0.0.1'
| 21.333333
| 41
| 0.734375
|
__author__ = 'Matheus Bortoleto da Silva'
__version__ = '0.0.1'
| true
| true
|
1c44ca6ab4d39dcd1e39ea389b325bfce7fa4529
| 1,896
|
py
|
Python
|
A1014280203/4/4.py
|
saurabh896/python-1
|
f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7
|
[
"MIT"
] | 3,976
|
2015-01-01T15:49:39.000Z
|
2022-03-31T03:47:56.000Z
|
A1014280203/4/4.py
|
dwh65416396/python
|
1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a
|
[
"MIT"
] | 97
|
2015-01-11T02:59:46.000Z
|
2022-03-16T14:01:56.000Z
|
A1014280203/4/4.py
|
dwh65416396/python
|
1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a
|
[
"MIT"
] | 3,533
|
2015-01-01T06:19:30.000Z
|
2022-03-28T13:14:54.000Z
|
import string
# simply extend word like: it's => it is
def extend_word(text):
if text.find('\'') > 0:
old2new = dict()
words = text.split()
for word in words:
if word.find('\'') > 0:
parts = word.split('\'')
if parts[1] == 'm':
parts[1] = 'am'
elif parts[1] == 's':
parts[1] = 'is'
elif parts[1] == 're':
parts[1] = 'are'
elif parts[1] == 't':
parts[1] = 'not'
elif parts[1] == 've':
parts[1] = 'have'
elif parts[1] == 'll':
parts[1] = 'will'
elif parts[1] == 'd':
if words[words.index(word) + 1] == 'better':
parts[1] = 'had'
else:
parts[1] = 'would'
if parts[0].endswith('n'):
parts[0] = parts[0][:-1]
old2new[word] = ' '.join(parts)
_text = text
for old_word in old2new.keys():
_text = _text.replace(old_word, old2new[old_word])
return _text
def return_order_key(record):
return record[1]
def show_in_order(records):
items = sorted(records.items(), key=return_order_key, reverse=True)
for item in items:
print(item[0], item[1])
with open('subtitle.txt', 'r') as file:
article = file.read()
no_pun_text = article
_punctuation = string.punctuation.replace('\'', '')
for pun in _punctuation:
no_pun_text = no_pun_text.replace(pun, '')
complete_text = extend_word(no_pun_text)
records = dict()
for word in complete_text.lower().split():
records[word] = records.get(word, 0) + 1
show_in_order(records)
| 32.689655
| 72
| 0.456224
|
import string
def extend_word(text):
if text.find('\'') > 0:
old2new = dict()
words = text.split()
for word in words:
if word.find('\'') > 0:
parts = word.split('\'')
if parts[1] == 'm':
parts[1] = 'am'
elif parts[1] == 's':
parts[1] = 'is'
elif parts[1] == 're':
parts[1] = 'are'
elif parts[1] == 't':
parts[1] = 'not'
elif parts[1] == 've':
parts[1] = 'have'
elif parts[1] == 'll':
parts[1] = 'will'
elif parts[1] == 'd':
if words[words.index(word) + 1] == 'better':
parts[1] = 'had'
else:
parts[1] = 'would'
if parts[0].endswith('n'):
parts[0] = parts[0][:-1]
old2new[word] = ' '.join(parts)
_text = text
for old_word in old2new.keys():
_text = _text.replace(old_word, old2new[old_word])
return _text
def return_order_key(record):
return record[1]
def show_in_order(records):
items = sorted(records.items(), key=return_order_key, reverse=True)
for item in items:
print(item[0], item[1])
with open('subtitle.txt', 'r') as file:
article = file.read()
no_pun_text = article
_punctuation = string.punctuation.replace('\'', '')
for pun in _punctuation:
no_pun_text = no_pun_text.replace(pun, '')
complete_text = extend_word(no_pun_text)
records = dict()
for word in complete_text.lower().split():
records[word] = records.get(word, 0) + 1
show_in_order(records)
| true
| true
|
1c44cb2358ef226020e523a70bf6b334447775e4
| 2,162
|
py
|
Python
|
main.py
|
AntoniosBarotsis/midi2img
|
848f54c0f3a5175ee636c693b04b6363d00ee9c8
|
[
"MIT"
] | null | null | null |
main.py
|
AntoniosBarotsis/midi2img
|
848f54c0f3a5175ee636c693b04b6363d00ee9c8
|
[
"MIT"
] | null | null | null |
main.py
|
AntoniosBarotsis/midi2img
|
848f54c0f3a5175ee636c693b04b6363d00ee9c8
|
[
"MIT"
] | null | null | null |
import sys
import os
import time
from midi2img import main_midi
from img2midi import main_img
from contextlib import contextmanager,redirect_stderr,redirect_stdout
from os import devnull
from progress.bar import Bar
# Clear log file
open('out.log', 'w').close()
# Suppress warning messages
@contextmanager
def suppress_stdout_stderr():
"""A context manager that redirects stdout and stderr to devnull"""
with open(devnull, 'w') as fnull:
with redirect_stderr(fnull) as err, redirect_stdout(fnull) as out:
yield (err, out)
# Returns true if song made it to images
def helper(i, images):
for j in images:
if i.replace(".mid", "") in j.replace(".png", ""):
return True
return False
files = os.listdir("midiFiles")
images = os.listdir("imgOut")
midiOut = os.listdir("midiOut")
midiFinal = os.listdir("midiFinal")
# Cleans up the image directory
if len(images) > 0:
with Bar('Cleaning directories', max=len(images)+len(midiOut)+len(midiFinal)) as bar:
for f in images:
os.remove(f"imgOut/{f}")
bar.next()
for f in midiOut:
os.remove(f"midiOut/{f}")
bar.next()
for f in midiFinal:
os.remove(f"midiFinal/{f}")
bar.next()
print("\033[032m✓\033[0m Done\n")
# Convert midis to images
bar = Bar('Converting midi files to images', max=len(files))
for i in range(len(files)):
with suppress_stdout_stderr():
main_midi(f"midiFiles/{files[i]}", 100)
bar.next()
bar.finish()
print("\033[032m✓\033[0m Done\n")
# Convert images to midis
images = os.listdir("imgOut")
bar = Bar('Converting filtered images to midi files', max=len(images))
for i in range(len(images)):
main_img(f"imgOut/{images[i]}", "midiFinal")
bar.next()
bar.finish()
print("\033[032m✓\033[0m Done\n\nRemoving all redundant files...")
# Removes midis that did not make it to images to save space
for i in files:
if not helper(i, images):
print(f" ∘ Removing {i}...")
os.remove(f"midiFiles/{i}")
print("\033[032m✓\033[0m Done")
if os.stat("out.log").st_size == 0:
print("out.log updated!")
| 27.025
| 89
| 0.651711
|
import sys
import os
import time
from midi2img import main_midi
from img2midi import main_img
from contextlib import contextmanager,redirect_stderr,redirect_stdout
from os import devnull
from progress.bar import Bar
open('out.log', 'w').close()
@contextmanager
def suppress_stdout_stderr():
with open(devnull, 'w') as fnull:
with redirect_stderr(fnull) as err, redirect_stdout(fnull) as out:
yield (err, out)
def helper(i, images):
for j in images:
if i.replace(".mid", "") in j.replace(".png", ""):
return True
return False
files = os.listdir("midiFiles")
images = os.listdir("imgOut")
midiOut = os.listdir("midiOut")
midiFinal = os.listdir("midiFinal")
if len(images) > 0:
with Bar('Cleaning directories', max=len(images)+len(midiOut)+len(midiFinal)) as bar:
for f in images:
os.remove(f"imgOut/{f}")
bar.next()
for f in midiOut:
os.remove(f"midiOut/{f}")
bar.next()
for f in midiFinal:
os.remove(f"midiFinal/{f}")
bar.next()
print("\033[032m✓\033[0m Done\n")
bar = Bar('Converting midi files to images', max=len(files))
for i in range(len(files)):
with suppress_stdout_stderr():
main_midi(f"midiFiles/{files[i]}", 100)
bar.next()
bar.finish()
print("\033[032m✓\033[0m Done\n")
images = os.listdir("imgOut")
bar = Bar('Converting filtered images to midi files', max=len(images))
for i in range(len(images)):
main_img(f"imgOut/{images[i]}", "midiFinal")
bar.next()
bar.finish()
print("\033[032m✓\033[0m Done\n\nRemoving all redundant files...")
for i in files:
if not helper(i, images):
print(f" ∘ Removing {i}...")
os.remove(f"midiFiles/{i}")
print("\033[032m✓\033[0m Done")
if os.stat("out.log").st_size == 0:
print("out.log updated!")
| true
| true
|
1c44ccfcfcc208fed41386c0340da79ce7f18c39
| 383
|
py
|
Python
|
c3i/c3i/wsgi.py
|
addinall/python-C3I
|
be72f026fb7c6b5084404876cd1296d3c3cb9b85
|
[
"Unlicense"
] | null | null | null |
c3i/c3i/wsgi.py
|
addinall/python-C3I
|
be72f026fb7c6b5084404876cd1296d3c3cb9b85
|
[
"Unlicense"
] | null | null | null |
c3i/c3i/wsgi.py
|
addinall/python-C3I
|
be72f026fb7c6b5084404876cd1296d3c3cb9b85
|
[
"Unlicense"
] | null | null | null |
"""
WSGI config for c3i project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "c3i.settings")
application = get_wsgi_application()
| 22.529412
| 78
| 0.780679
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "c3i.settings")
application = get_wsgi_application()
| true
| true
|
1c44ce797a5c460258f52bac245631d4c3a93ac6
| 258
|
py
|
Python
|
tensorflow_tts/configs/__init__.py
|
ashishpatel26/TensorflowTTS
|
bd29c3eefa51041b76fd355d94025b4c13084296
|
[
"Apache-2.0"
] | 2
|
2020-06-01T07:39:25.000Z
|
2021-11-08T09:31:33.000Z
|
tensorflow_tts/configs/__init__.py
|
ashishpatel26/TensorflowTTS
|
bd29c3eefa51041b76fd355d94025b4c13084296
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_tts/configs/__init__.py
|
ashishpatel26/TensorflowTTS
|
bd29c3eefa51041b76fd355d94025b4c13084296
|
[
"Apache-2.0"
] | 1
|
2020-10-05T06:06:20.000Z
|
2020-10-05T06:06:20.000Z
|
from tensorflow_tts.configs.fastspeech import FastSpeechConfig
from tensorflow_tts.configs.melgan import MelGANGeneratorConfig
from tensorflow_tts.configs.melgan import MelGANDiscriminatorConfig
from tensorflow_tts.configs.tacotron2 import Tacotron2Config
| 36.857143
| 67
| 0.899225
|
from tensorflow_tts.configs.fastspeech import FastSpeechConfig
from tensorflow_tts.configs.melgan import MelGANGeneratorConfig
from tensorflow_tts.configs.melgan import MelGANDiscriminatorConfig
from tensorflow_tts.configs.tacotron2 import Tacotron2Config
| true
| true
|
1c44cf402bf3cc203667af282d9dee25fdc98243
| 313
|
py
|
Python
|
pi_utils.py
|
georgeblck/creepydoll
|
7b0a5d811cfbf5bc65c91b25af56702f6077c10c
|
[
"MIT"
] | null | null | null |
pi_utils.py
|
georgeblck/creepydoll
|
7b0a5d811cfbf5bc65c91b25af56702f6077c10c
|
[
"MIT"
] | null | null | null |
pi_utils.py
|
georgeblck/creepydoll
|
7b0a5d811cfbf5bc65c91b25af56702f6077c10c
|
[
"MIT"
] | null | null | null |
from picamera import PiCamera
import os
def record_video(destination):
filename = os.path.join(
destination, datetime.now().strftime('%Y-%m-%d_%H.%M.%S.h264'))
camera.start_preview()
camera.start_recording(filename)
def finish_video():
camera.stop_recording()
camera.stop_preview()
| 22.357143
| 71
| 0.702875
|
from picamera import PiCamera
import os
def record_video(destination):
filename = os.path.join(
destination, datetime.now().strftime('%Y-%m-%d_%H.%M.%S.h264'))
camera.start_preview()
camera.start_recording(filename)
def finish_video():
camera.stop_recording()
camera.stop_preview()
| true
| true
|
1c44cf439ceed23a243d6c564ce0b570a3316160
| 1,166
|
py
|
Python
|
english/data_processing/lessons/code/sphere.py
|
hrutkabence/tutorials
|
bd76294860804aee8ecda5e1445464506bf02ee0
|
[
"CC0-1.0"
] | null | null | null |
english/data_processing/lessons/code/sphere.py
|
hrutkabence/tutorials
|
bd76294860804aee8ecda5e1445464506bf02ee0
|
[
"CC0-1.0"
] | null | null | null |
english/data_processing/lessons/code/sphere.py
|
hrutkabence/tutorials
|
bd76294860804aee8ecda5e1445464506bf02ee0
|
[
"CC0-1.0"
] | null | null | null |
import numpy as np
from math import sqrt
from sys import argv
def sphere(x_, y_, z_):
"""
calculate best fitting sphere (LSM) on points
:param returns: x0, y0, z0, R
"""
n_ = x_.shape[0]
a = np.c_[x_, y_, z_, np.full(n_, 1, 'float64')]
b = -np.square(x_) - np.square(y_) - np.square(z_)
res = np.linalg.lstsq(a, b, rcond=None)[0]
return -0.5 * res[0], -0.5 * res[1], -0.5 * res[2], \
sqrt((res[0]**2 + res[1]**2 + res[2]**2) / 4 - res[3])
if __name__ == "__main__":
if len(argv) > 1:
file_names = argv[1:]
else:
file_names = ['sphere1.txt']
for file_name in file_names:
pnts = np.genfromtxt(file_name, 'float64', delimiter=',')
if pnts.shape[1] > 3:
pnts = pnts[:,1:4] # skip first column (point id)
sph = sphere(pnts[:,0], pnts[:,1], pnts[:,2])
print("x0: {:.3f} y0: {:.3f} z0: {:.3f} R: {:.3f}".format(sph[0], sph[1], sph[2], sph[3]))
dr = np.sqrt(np.sum(np.square(pnts - sph[:3]), 1)) - sph[3] # difference in radius direction
RMS = sqrt(np.sum(np.square(dr)) / pnts.shape[0])
print("RMS: {:.3f}".format(RMS))
| 37.612903
| 100
| 0.531732
|
import numpy as np
from math import sqrt
from sys import argv
def sphere(x_, y_, z_):
n_ = x_.shape[0]
a = np.c_[x_, y_, z_, np.full(n_, 1, 'float64')]
b = -np.square(x_) - np.square(y_) - np.square(z_)
res = np.linalg.lstsq(a, b, rcond=None)[0]
return -0.5 * res[0], -0.5 * res[1], -0.5 * res[2], \
sqrt((res[0]**2 + res[1]**2 + res[2]**2) / 4 - res[3])
if __name__ == "__main__":
if len(argv) > 1:
file_names = argv[1:]
else:
file_names = ['sphere1.txt']
for file_name in file_names:
pnts = np.genfromtxt(file_name, 'float64', delimiter=',')
if pnts.shape[1] > 3:
pnts = pnts[:,1:4]
sph = sphere(pnts[:,0], pnts[:,1], pnts[:,2])
print("x0: {:.3f} y0: {:.3f} z0: {:.3f} R: {:.3f}".format(sph[0], sph[1], sph[2], sph[3]))
dr = np.sqrt(np.sum(np.square(pnts - sph[:3]), 1)) - sph[3]
RMS = sqrt(np.sum(np.square(dr)) / pnts.shape[0])
print("RMS: {:.3f}".format(RMS))
| true
| true
|
1c44d06afab9a86e82ffdf346b95617bf5c67311
| 4,277
|
py
|
Python
|
test/functional/rpc_invalidateblock.py
|
HashUnlimited/chaincoin
|
9a035680d6d9b9a0524dc7524c55cfedd1a683ca
|
[
"MIT"
] | null | null | null |
test/functional/rpc_invalidateblock.py
|
HashUnlimited/chaincoin
|
9a035680d6d9b9a0524dc7524c55cfedd1a683ca
|
[
"MIT"
] | null | null | null |
test/functional/rpc_invalidateblock.py
|
HashUnlimited/chaincoin
|
9a035680d6d9b9a0524dc7524c55cfedd1a683ca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the invalidateblock RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR
from test_framework.util import (
assert_equal,
connect_nodes,
wait_until,
)
class InvalidateTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generatetoaddress(4, self.nodes[0].get_deterministic_priv_key().address)
assert_equal(self.nodes[0].getblockcount(), 4)
besthash_n0 = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generatetoaddress(6, self.nodes[1].get_deterministic_priv_key().address)
assert_equal(self.nodes[1].getblockcount(), 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes(self.nodes[0], 1)
self.sync_blocks(self.nodes[0:2])
assert_equal(self.nodes[0].getblockcount(), 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
assert_equal(self.nodes[0].getblockcount(), 4)
assert_equal(self.nodes[0].getbestblockhash(), besthash_n0)
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes(self.nodes[1], 2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
self.sync_blocks(self.nodes[1:3])
assert_equal(self.nodes[2].getblockcount(), 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert_equal(self.nodes[1].getblockcount(), 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert_equal(self.nodes[2].getblockcount(), 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
wait_until(lambda: self.nodes[2].getblockcount() == 3, timeout=5)
wait_until(lambda: self.nodes[0].getblockcount() == 4, timeout=5)
wait_until(lambda: self.nodes[1].getblockcount() == 4, timeout=5)
self.log.info("Verify that we reconsider all ancestors as well")
blocks = self.nodes[1].generatetodescriptor(10, ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR)
assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
# Invalidate the two blocks at the tip
self.nodes[1].invalidateblock(blocks[-1])
self.nodes[1].invalidateblock(blocks[-2])
assert_equal(self.nodes[1].getbestblockhash(), blocks[-3])
# Reconsider only the previous tip
self.nodes[1].reconsiderblock(blocks[-1])
# Should be back at the tip by now
assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
self.log.info("Verify that we reconsider all descendants")
blocks = self.nodes[1].generatetodescriptor(10, ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR)
assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
# Invalidate the two blocks at the tip
self.nodes[1].invalidateblock(blocks[-2])
self.nodes[1].invalidateblock(blocks[-4])
assert_equal(self.nodes[1].getbestblockhash(), blocks[-5])
# Reconsider only the previous tip
self.nodes[1].reconsiderblock(blocks[-4])
# Should be back at the tip by now
assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
if __name__ == '__main__':
InvalidateTest().main()
| 44.552083
| 100
| 0.680617
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR
from test_framework.util import (
assert_equal,
connect_nodes,
wait_until,
)
class InvalidateTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generatetoaddress(4, self.nodes[0].get_deterministic_priv_key().address)
assert_equal(self.nodes[0].getblockcount(), 4)
besthash_n0 = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generatetoaddress(6, self.nodes[1].get_deterministic_priv_key().address)
assert_equal(self.nodes[1].getblockcount(), 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes(self.nodes[0], 1)
self.sync_blocks(self.nodes[0:2])
assert_equal(self.nodes[0].getblockcount(), 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
assert_equal(self.nodes[0].getblockcount(), 4)
assert_equal(self.nodes[0].getbestblockhash(), besthash_n0)
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes(self.nodes[1], 2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
self.sync_blocks(self.nodes[1:3])
assert_equal(self.nodes[2].getblockcount(), 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert_equal(self.nodes[1].getblockcount(), 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert_equal(self.nodes[2].getblockcount(), 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
wait_until(lambda: self.nodes[2].getblockcount() == 3, timeout=5)
wait_until(lambda: self.nodes[0].getblockcount() == 4, timeout=5)
wait_until(lambda: self.nodes[1].getblockcount() == 4, timeout=5)
self.log.info("Verify that we reconsider all ancestors as well")
blocks = self.nodes[1].generatetodescriptor(10, ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR)
assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
self.nodes[1].invalidateblock(blocks[-1])
self.nodes[1].invalidateblock(blocks[-2])
assert_equal(self.nodes[1].getbestblockhash(), blocks[-3])
self.nodes[1].reconsiderblock(blocks[-1])
assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
self.log.info("Verify that we reconsider all descendants")
blocks = self.nodes[1].generatetodescriptor(10, ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR)
assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
self.nodes[1].invalidateblock(blocks[-2])
self.nodes[1].invalidateblock(blocks[-4])
assert_equal(self.nodes[1].getbestblockhash(), blocks[-5])
self.nodes[1].reconsiderblock(blocks[-4])
assert_equal(self.nodes[1].getbestblockhash(), blocks[-1])
if __name__ == '__main__':
InvalidateTest().main()
| true
| true
|
1c44d1c65c48745bd98f1e4b6e8a7460fd9bcbc8
| 10,468
|
py
|
Python
|
neural_network.py
|
marcvergees/rieffel_method
|
5377284c10010691238f10d5d6f77935c44d8f3d
|
[
"BSD-3-Clause"
] | null | null | null |
neural_network.py
|
marcvergees/rieffel_method
|
5377284c10010691238f10d5d6f77935c44d8f3d
|
[
"BSD-3-Clause"
] | null | null | null |
neural_network.py
|
marcvergees/rieffel_method
|
5377284c10010691238f10d5d6f77935c44d8f3d
|
[
"BSD-3-Clause"
] | null | null | null |
# Big Data, Xarxes Neuronals i Màrqueting: la clau de l'èxit?
# Treball de recerca (TR)
# Marc Vergés Santiago - Escola Pia Mataró
#
#
#
# Copyright (c) 2021, Marc Vergés Santiago
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY MARC VERGÉS ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.callbacks import EarlyStopping
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import instaloader
from contrasenyes import usuari, contrasenya
def profile_preferences_to_NN(user):
L = instaloader.Instaloader()
L.login(usuari, contrasenya)
list_to_append_csv = []
none = 0
creators_celebrities = 0
personal_gods = 0
local_events = 0
professional_services = 0
restaurants = 0
non_profits = 0
general_interest = 0
publishers = 0
transportation_and_accomodation = 0
business_and_utility = 0
home_services = 0
auto_dealers = 0
food_and_personal_goods = 0
government_agencies = 0
content_apps = 0
grocery = 0
entities = 0
lifestyle_services = 0
geography = 0
profile = instaloader.Profile.from_username(L.context, user)
preferences = []
for followee in profile.get_followees():
preferences.append(followee.business_category_name)
print(followee.username + " - " + str(followee.business_category_name))
if followee.business_category_name == "None":
none += 1
if followee.business_category_name == "Creators & Celebrities":
creators_celebrities += 1
if followee.business_category_name == "Personal Goods & General Merchandise Stores":
personal_gods += 1
if followee.business_category_name == "Local Events":
local_events += 1
if followee.business_category_name == "Professional Services":
professional_services += 1
if followee.business_category_name == "Restaurants":
restaurants += 1
if followee.business_category_name == "Non-Profits & Religious Organizations":
non_profits += 1
if followee.business_category_name == "General Interest":
general_interest += 1
if followee.business_category_name == "Publishers":
publishers += 1
if followee.business_category_name == "Transportation & Accomodation Services":
transportation_and_accomodation += 1
if followee.business_category_name == "Business & Utility Services":
business_and_utility += 1
if followee.business_category_name == "Home Services":
home_services += 1
if followee.business_category_name == "Auto Dealers":
auto_dealers += 1
if followee.business_category_name == "Food & Personal Goods":
food_and_personal_goods += 1
if followee.business_category_name == "Government Agencies":
government_agencies += 1
if followee.business_category_name == "Content & Apps":
content_apps += 1
if followee.business_category_name == "Grocery & Convenience Stores":
grocery += 1
if followee.business_category_name == "Entities":
entities += 1
if followee.business_category_name == "Lifestyle Services":
lifestyle_services += 1
if followee.business_category_name == "Geography":
geography += 1
print(preferences)
print("None: " + str(none))
print("Creators & Celebrities: " + str(creators_celebrities))
print("Personal Goods & General Merchandise Stores: " + str(personal_gods))
print("Local Events: " + str(local_events))
print("Professional Services: " + str(professional_services))
print("Restaurants: " + str(restaurants))
print("Non-Profits & Religious Organizations: " + str(non_profits))
print("General Interest: " + str(general_interest))
print("Publishers: " + str(publishers))
print("Transportation & Accomodation Services: " + str(transportation_and_accomodation))
print("Business & Utility Services: " + str(business_and_utility))
print("Home Services: " + str(home_services))
print("Auto Dealers: " + str(auto_dealers))
print("Food & Personal Goods: " + str(food_and_personal_goods))
print("Government Agencies: " + str(government_agencies))
print("Content & Apps: " + str(content_apps))
print("Grocery & Convenience Stores: " + str(grocery))
print("Entities: " + str(entities))
print("Lifestyle Services: " + str(lifestyle_services))
print("Geography: " + str(geography))
followers = 0
following = 0
for follower in profile.get_followers():
followers += 1
for follower in profile.get_followees():
following += 1
return preferences
def neural_network(list):
# url = 'https://gist.githubusercontent.com/curran/a08a1080b88344b0c8a7/raw/639388c2cbc2120a14dcf466e85730eb8be498bb/iris.csv'
df = pd.read_csv("data_set3.csv")
df = df.sample(frac=1).reset_index(drop=True)
Y = df['Tematica']
print(Y) # output
X = df.drop(['Tematica'], axis=1)
print(X) # input o dataset
print(X.shape)
print(Y.shape)
X = np.array(X)
Y.head()
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
dummy_y = np_utils.to_categorical(encoded_Y, 10)
print(encoded_Y)
print(dummy_y)
model = Sequential()
model.add(Dense(16, input_shape=(X.shape[1],), activation='relu')) # input shape is (features,)
model.add(Dense(16, input_shape=(X.shape[1],), activation='relu')) # input shape is (features,)
model.add(Dense(10, activation='softmax'))
model.summary()
# compile the model
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
# this is different instead of binary_crossentropy (for regular classification)
metrics=['accuracy'])
es = keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min',
patience=10,
restore_best_weights=True) # important - otherwise you just return the last weigths...
'''
# now we just update our model fit call
history = model.fit(X,
dummy_y,
callbacks=[es],
epochs=200, # you can set this to a big number!
batch_size=1,
shuffle=True,
validation_split=0.2,
verbose=1)
es = keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min',
patience=10,
restore_best_weights=True) # important - otherwise you just return the last weigths...
'''
# now we just update our model fit call
history = model.fit(X,
dummy_y,
callbacks=[es],
epochs=50, # you can set this to a big number!
batch_size=2,
shuffle=True,
validation_split=0.2,
verbose=1)
history_dict = history.history
# learning curve
# accuracy
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
# loss
loss = history_dict['loss']
val_loss = history_dict['val_loss']
# range of X (no. of epochs)
epochs = range(1, len(acc) + 1)
# plot
# "r" is for "solid red line"
plt.plot(epochs, acc, 'r', label='Training accuracy')
# b is for "solid blue line"
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
preds = model.predict(X) # see how the model did!
print(preds[0]) # i'm spreading that prediction across three nodes and they sum to 1
print(np.sum(preds[0])) # sum it up! Should be 1
## [9.9999988e-01 1.3509347e-07 6.7064638e-16]
## 1.0
# Almost a perfect prediction
# actual is left, predicted is top
# names can be found by inspecting Y
matrix = confusion_matrix(dummy_y.argmax(axis=1), preds.argmax(axis=1))
matrix
## array([[50, 0, 0],
## [ 0, 46, 4],
## [ 0, 1, 49]])
# more detail on how well things were predicted
print(classification_report(dummy_y.argmax(axis=1), preds.argmax(axis=1)))
model.predict(list, batch_size=1, verbose=1)
| 40.731518
| 131
| 0.62992
|
# Treball de recerca (TR)
# Marc Vergés Santiago - Escola Pia Mataró
#
#
#
# Copyright (c) 2021, Marc Vergés Santiago
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY MARC VERGÉS ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.callbacks import EarlyStopping
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import instaloader
from contrasenyes import usuari, contrasenya
def profile_preferences_to_NN(user):
L = instaloader.Instaloader()
L.login(usuari, contrasenya)
list_to_append_csv = []
none = 0
creators_celebrities = 0
personal_gods = 0
local_events = 0
professional_services = 0
restaurants = 0
non_profits = 0
general_interest = 0
publishers = 0
transportation_and_accomodation = 0
business_and_utility = 0
home_services = 0
auto_dealers = 0
food_and_personal_goods = 0
government_agencies = 0
content_apps = 0
grocery = 0
entities = 0
lifestyle_services = 0
geography = 0
profile = instaloader.Profile.from_username(L.context, user)
preferences = []
for followee in profile.get_followees():
preferences.append(followee.business_category_name)
print(followee.username + " - " + str(followee.business_category_name))
if followee.business_category_name == "None":
none += 1
if followee.business_category_name == "Creators & Celebrities":
creators_celebrities += 1
if followee.business_category_name == "Personal Goods & General Merchandise Stores":
personal_gods += 1
if followee.business_category_name == "Local Events":
local_events += 1
if followee.business_category_name == "Professional Services":
professional_services += 1
if followee.business_category_name == "Restaurants":
restaurants += 1
if followee.business_category_name == "Non-Profits & Religious Organizations":
non_profits += 1
if followee.business_category_name == "General Interest":
general_interest += 1
if followee.business_category_name == "Publishers":
publishers += 1
if followee.business_category_name == "Transportation & Accomodation Services":
transportation_and_accomodation += 1
if followee.business_category_name == "Business & Utility Services":
business_and_utility += 1
if followee.business_category_name == "Home Services":
home_services += 1
if followee.business_category_name == "Auto Dealers":
auto_dealers += 1
if followee.business_category_name == "Food & Personal Goods":
food_and_personal_goods += 1
if followee.business_category_name == "Government Agencies":
government_agencies += 1
if followee.business_category_name == "Content & Apps":
content_apps += 1
if followee.business_category_name == "Grocery & Convenience Stores":
grocery += 1
if followee.business_category_name == "Entities":
entities += 1
if followee.business_category_name == "Lifestyle Services":
lifestyle_services += 1
if followee.business_category_name == "Geography":
geography += 1
print(preferences)
print("None: " + str(none))
print("Creators & Celebrities: " + str(creators_celebrities))
print("Personal Goods & General Merchandise Stores: " + str(personal_gods))
print("Local Events: " + str(local_events))
print("Professional Services: " + str(professional_services))
print("Restaurants: " + str(restaurants))
print("Non-Profits & Religious Organizations: " + str(non_profits))
print("General Interest: " + str(general_interest))
print("Publishers: " + str(publishers))
print("Transportation & Accomodation Services: " + str(transportation_and_accomodation))
print("Business & Utility Services: " + str(business_and_utility))
print("Home Services: " + str(home_services))
print("Auto Dealers: " + str(auto_dealers))
print("Food & Personal Goods: " + str(food_and_personal_goods))
print("Government Agencies: " + str(government_agencies))
print("Content & Apps: " + str(content_apps))
print("Grocery & Convenience Stores: " + str(grocery))
print("Entities: " + str(entities))
print("Lifestyle Services: " + str(lifestyle_services))
print("Geography: " + str(geography))
followers = 0
following = 0
for follower in profile.get_followers():
followers += 1
for follower in profile.get_followees():
following += 1
return preferences
def neural_network(list):
# url = 'https://gist.githubusercontent.com/curran/a08a1080b88344b0c8a7/raw/639388c2cbc2120a14dcf466e85730eb8be498bb/iris.csv'
df = pd.read_csv("data_set3.csv")
df = df.sample(frac=1).reset_index(drop=True)
Y = df['Tematica']
print(Y) # output
X = df.drop(['Tematica'], axis=1)
print(X) # input o dataset
print(X.shape)
print(Y.shape)
X = np.array(X)
Y.head()
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
dummy_y = np_utils.to_categorical(encoded_Y, 10)
print(encoded_Y)
print(dummy_y)
model = Sequential()
model.add(Dense(16, input_shape=(X.shape[1],), activation='relu')) # input shape is (features,)
model.add(Dense(16, input_shape=(X.shape[1],), activation='relu')) # input shape is (features,)
model.add(Dense(10, activation='softmax'))
model.summary()
# compile the model
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
# this is different instead of binary_crossentropy (for regular classification)
metrics=['accuracy'])
es = keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min',
patience=10,
restore_best_weights=True) # important - otherwise you just return the last weigths...
# now we just update our model fit call
history = model.fit(X,
dummy_y,
callbacks=[es],
epochs=50, # you can set this to a big number!
batch_size=2,
shuffle=True,
validation_split=0.2,
verbose=1)
history_dict = history.history
# learning curve
# accuracy
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
# loss
loss = history_dict['loss']
val_loss = history_dict['val_loss']
# range of X (no. of epochs)
epochs = range(1, len(acc) + 1)
# plot
# "r" is for "solid red line"
plt.plot(epochs, acc, 'r', label='Training accuracy')
# b is for "solid blue line"
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
preds = model.predict(X) # see how the model did!
print(preds[0]) # i'm spreading that prediction across three nodes and they sum to 1
print(np.sum(preds[0]))
(dummy_y.argmax(axis=1), preds.argmax(axis=1))
matrix
1), preds.argmax(axis=1)))
model.predict(list, batch_size=1, verbose=1)
| true
| true
|
1c44d28b5a3327e8b46e68b17d3f1e122eb9c8c0
| 140
|
py
|
Python
|
pentagraph/__init__.py
|
Penta-Game/pentagraph
|
39a84dc06466bbff0a4f9692a24166ecfb839b84
|
[
"MIT"
] | 1
|
2020-07-25T10:07:53.000Z
|
2020-07-25T10:07:53.000Z
|
pentagraph/__init__.py
|
Penta-Game/pentagraph
|
39a84dc06466bbff0a4f9692a24166ecfb839b84
|
[
"MIT"
] | 43
|
2020-07-31T05:28:08.000Z
|
2021-07-27T05:11:03.000Z
|
pentagraph/__init__.py
|
Penta-Game/pentagraph
|
39a84dc06466bbff0a4f9692a24166ecfb839b84
|
[
"MIT"
] | null | null | null |
from . import lib
__version__ = "0.0.1b5"
__author__ = "Cobalt"
__doc__ = "Graph representation and tools for programming with pentagame."
| 23.333333
| 74
| 0.757143
|
from . import lib
__version__ = "0.0.1b5"
__author__ = "Cobalt"
__doc__ = "Graph representation and tools for programming with pentagame."
| true
| true
|
1c44d343222bdd8581be68438ee3d2be5a1cb5b4
| 5,997
|
py
|
Python
|
payments/paymentswobill.py
|
lkerxhalli/tools
|
bb9391ab7e1312619e705ca4da4f8cda3c201f99
|
[
"MIT"
] | null | null | null |
payments/paymentswobill.py
|
lkerxhalli/tools
|
bb9391ab7e1312619e705ca4da4f8cda3c201f99
|
[
"MIT"
] | null | null | null |
payments/paymentswobill.py
|
lkerxhalli/tools
|
bb9391ab7e1312619e705ca4da4f8cda3c201f99
|
[
"MIT"
] | null | null | null |
### Auth: Lorenc Kerxhalli
### Creates a estimated payment schedule per week
### input:
### 1. AP Payment report exported from Netsuite
### 2. Open Bills report exported from Netsuite
###
### output:
### Estimated payment schedule per vendor per week
###
import sys
import os
import csv
import datetime
from datetime import timedelta
import re
from re import sub
from decimal import Decimal
directory = '/Users/lkerxhalli/Documents/iris/jun13/'
name = 'NPS 1.1.19 4.30.19'
csvinputfile = directory + name + '.csv'
csvoutputfile = directory + name + ' out.csv'
hName = 'Name'
hBill = 'Bills > 30' # change agingDays below if you change the number here as well
hAvg = 'Avg.'
hAvgAdjust = 'Avg. Adjusted'
extraHeaders = 4 #headers that are not weeks
agingDays = 30 #number of recent days to ignore from open bills
# TODO continue
# def isStringCompany(str):
def parseName(str):
p = re.compile('V[0-9]{5}')
m = p.search(str)
if(m):
index = m.end() + 1
return str[index:]
else:
return None
def getDate(strDate):
# check if strdate year part is 2 digit or 4
lIndex = strDate.rfind('/') + 1 # index of first digit
strYear = strDate[lIndex:]
if(len(strYear) == 2):
return datetime.datetime.strptime(strDate, "%m/%d/%y").date()
elif (len(strYear) == 4):
return datetime.datetime.strptime(strDate, "%m/%d/%Y").date()
else:
return None
def getWeekHeader(dt):
year = dt.isocalendar()[0]
weekNo = dt.isocalendar()[1]
strDt = "{}-W{}".format(year, weekNo)
monday = datetime.datetime.strptime(strDt + '-1', "%Y-W%W-%w")
sunday = monday + datetime.timedelta(days=6)
return monday.strftime('%m/%d/%y') + ' - ' + sunday.strftime('%m/%d/%y')
def getNumber(strMoney):
value = Decimal(sub(r'[^\d.]', '', strMoney))
if strMoney.find('(') > -1:
value = value * -1
return value
def isLineFull(strLine):
arrLine = strLine.split(',')
countFields = 0
for field in arrLine:
if field:
countFields += 1
if countFields > 4:
return True
else:
return False
def isOpenBillLine(strLine):
arrLine = strLine.split(',')
if len(arrLine) > 5 and arrLine[5]:
return True
else:
return False
def removeFileHeader():
with open(csvinputfile, 'r') as fin:
data = fin.read().splitlines(True)
noLinesToSkip = 0
for line in data:
if isLineFull(line):
# hdr = line.split(',')
# strLine = ''
# for item in hdr:
# strLine += item.strip() + ','
# strLine = strLine[:-1]
# data[noLinesToSkip] = strLine
break
else:
noLinesToSkip += 1
with open(csvinputfile, 'w') as fout:
fout.writelines(data[noLinesToSkip:])
def generateHeader(firstDate, lastDate):
currentDate = firstDate
header = [hName]
while currentDate < lastDate:
header.append(getWeekHeader(currentDate))
currentDate = currentDate + timedelta(days=7)
#take care of last date
lastWeekHeader = getWeekHeader(lastDate)
if header[-1] != lastWeekHeader:
header.append(lastWeekHeader)
header.append(hBill)
header.append(hAvg)
header.append(hAvgAdjust)
return header
def main():
print ('-- Start --')
print ('-- Clean csvs --')
#let's remove any extra lines at the top (Titles etc)
removeFileHeader()
outdict = {}
firstDate = getDate('01/01/40') #initialize them
lastDate = getDate('01/01/10')
weeks = 0 # number of weeks
print ('-- reading AP --')
#now read payment csv
with open(csvinputfile, 'rU') as s_file:
csv_r = csv.DictReader(s_file)
tmpTransaction = '' # transaction string includes the vendor name
for csv_row in csv_r:
if csv_row['Transaction']:
tmpTransaction = parseName(csv_row['Transaction'])
if not tmpTransaction in outdict:
outdict[tmpTransaction] = {}
if csv_row['Bill Type'] == 'Bill Payment' or csv_row['Bill Type'] == 'JE':
dt = getDate(csv_row['Date'])
week = getWeekHeader(dt)
if dt > lastDate:
lastDate = dt
if dt < firstDate:
firstDate = dt
amount = getNumber(csv_row['Amount'])
if not week in outdict[tmpTransaction]:
outdict[tmpTransaction][week] = amount
else:
outdict[tmpTransaction][week] += amount
header = generateHeader(firstDate, lastDate)
weeks = len(header) - extraHeaders
print ('Number of weeks: {}'.format(weeks))
#calculate and add averages
for vendor in outdict:
avg = 0
for key in outdict[vendor]:
if key != hName and key != hBill:
avg += outdict[vendor][key]
if weeks > 0:
outdict[vendor][hAvg] = round(avg/weeks, 2)
if hBill in outdict[vendor]:
outdict[vendor][hAvgAdjust] = round((avg + outdict[vendor][hBill])/weeks, 2)
else:
outdict[vendor][hAvgAdjust] = outdict[vendor][hAvg]
print ('-- Completing Calculations --')
#TODO: find and Sort for TNE for the names
# for python 3 use below to prevent extra blank lines
# with open(csvoutputfile, 'w', newline='') as wfile:
with open(csvoutputfile, 'w') as wfile:
csvw = csv.writer(wfile, dialect='excel')
csvw.writerow(header)
for key in outdict:
if key:
row = [key]
for col in header[1:]:
if col in outdict[key]:
row.append(outdict[key][col])
else:
row.append(0)
csvw.writerow(row)
print ('-- finito --')
if __name__ == '__main__':
main()
| 28.023364
| 92
| 0.576121
|
= p.search(str)
if(m):
index = m.end() + 1
return str[index:]
else:
return None
def getDate(strDate):
lIndex = strDate.rfind('/') + 1
strYear = strDate[lIndex:]
if(len(strYear) == 2):
return datetime.datetime.strptime(strDate, "%m/%d/%y").date()
elif (len(strYear) == 4):
return datetime.datetime.strptime(strDate, "%m/%d/%Y").date()
else:
return None
def getWeekHeader(dt):
year = dt.isocalendar()[0]
weekNo = dt.isocalendar()[1]
strDt = "{}-W{}".format(year, weekNo)
monday = datetime.datetime.strptime(strDt + '-1', "%Y-W%W-%w")
sunday = monday + datetime.timedelta(days=6)
return monday.strftime('%m/%d/%y') + ' - ' + sunday.strftime('%m/%d/%y')
def getNumber(strMoney):
value = Decimal(sub(r'[^\d.]', '', strMoney))
if strMoney.find('(') > -1:
value = value * -1
return value
def isLineFull(strLine):
arrLine = strLine.split(',')
countFields = 0
for field in arrLine:
if field:
countFields += 1
if countFields > 4:
return True
else:
return False
def isOpenBillLine(strLine):
arrLine = strLine.split(',')
if len(arrLine) > 5 and arrLine[5]:
return True
else:
return False
def removeFileHeader():
with open(csvinputfile, 'r') as fin:
data = fin.read().splitlines(True)
noLinesToSkip = 0
for line in data:
if isLineFull(line):
break
else:
noLinesToSkip += 1
with open(csvinputfile, 'w') as fout:
fout.writelines(data[noLinesToSkip:])
def generateHeader(firstDate, lastDate):
currentDate = firstDate
header = [hName]
while currentDate < lastDate:
header.append(getWeekHeader(currentDate))
currentDate = currentDate + timedelta(days=7)
lastWeekHeader = getWeekHeader(lastDate)
if header[-1] != lastWeekHeader:
header.append(lastWeekHeader)
header.append(hBill)
header.append(hAvg)
header.append(hAvgAdjust)
return header
def main():
print ('-- Start --')
print ('-- Clean csvs --')
removeFileHeader()
outdict = {}
firstDate = getDate('01/01/40') #initialize them
lastDate = getDate('01/01/10')
weeks = 0 # number of weeks
print ('-- reading AP --')
#now read payment csv
with open(csvinputfile, 'rU') as s_file:
csv_r = csv.DictReader(s_file)
tmpTransaction = '' # transaction string includes the vendor name
for csv_row in csv_r:
if csv_row['Transaction']:
tmpTransaction = parseName(csv_row['Transaction'])
if not tmpTransaction in outdict:
outdict[tmpTransaction] = {}
if csv_row['Bill Type'] == 'Bill Payment' or csv_row['Bill Type'] == 'JE':
dt = getDate(csv_row['Date'])
week = getWeekHeader(dt)
if dt > lastDate:
lastDate = dt
if dt < firstDate:
firstDate = dt
amount = getNumber(csv_row['Amount'])
if not week in outdict[tmpTransaction]:
outdict[tmpTransaction][week] = amount
else:
outdict[tmpTransaction][week] += amount
header = generateHeader(firstDate, lastDate)
weeks = len(header) - extraHeaders
print ('Number of weeks: {}'.format(weeks))
#calculate and add averages
for vendor in outdict:
avg = 0
for key in outdict[vendor]:
if key != hName and key != hBill:
avg += outdict[vendor][key]
if weeks > 0:
outdict[vendor][hAvg] = round(avg/weeks, 2)
if hBill in outdict[vendor]:
outdict[vendor][hAvgAdjust] = round((avg + outdict[vendor][hBill])/weeks, 2)
else:
outdict[vendor][hAvgAdjust] = outdict[vendor][hAvg]
print ('-- Completing Calculations --')
#TODO: find and Sort for TNE for the names
# for python 3 use below to prevent extra blank lines
# with open(csvoutputfile, 'w', newline='') as wfile:
with open(csvoutputfile, 'w') as wfile:
csvw = csv.writer(wfile, dialect='excel')
csvw.writerow(header)
for key in outdict:
if key:
row = [key]
for col in header[1:]:
if col in outdict[key]:
row.append(outdict[key][col])
else:
row.append(0)
csvw.writerow(row)
print ('-- finito --')
if __name__ == '__main__':
main()
| true
| true
|
1c44d3ce85fdcbffb545fbcfd8c5b98209705ad1
| 5,068
|
py
|
Python
|
q2_feature_table/tests/test_merge.py
|
jairideout/q2-feature-table
|
494e0b8080799c746c55be2271278891798b8e56
|
[
"BSD-3-Clause"
] | null | null | null |
q2_feature_table/tests/test_merge.py
|
jairideout/q2-feature-table
|
494e0b8080799c746c55be2271278891798b8e56
|
[
"BSD-3-Clause"
] | null | null | null |
q2_feature_table/tests/test_merge.py
|
jairideout/q2-feature-table
|
494e0b8080799c746c55be2271278891798b8e56
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import skbio
import numpy as np
from biom.table import Table
import pandas as pd
import pandas.util.testing as pdt
from q2_feature_table import (merge, merge_seq_data,
merge_taxa_data)
from q2_feature_table._merge import _merge_feature_data
class MergeTableTests(unittest.TestCase):
def test_valid_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
obs = merge(t1, t2)
exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S4', 'S5', 'S6'])
obs = merge(t1, t2)
exp = Table(np.array([[0, 1, 3, 0, 0, 0], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 0, 2, 6], [0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_invalid_overlapping_sample_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S1', 'S5', 'S6'])
with self.assertRaises(ValueError):
merge(t1, t2)
class MergeFeatureDataTests(unittest.TestCase):
def test_valid_overlapping_feature_ids(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGT', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data(d1, d2)
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_first_feature_data_retained(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGAAA', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data(d1, d2)
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
# swapping input order changes f1 data
obs = _merge_feature_data(d2, d1)
exp = pd.Series(['ACGAAA', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGT', 'ACCA'], index=['f3', 'f4'])
obs = _merge_feature_data(d1, d2)
exp = pd.Series(['ACGT', 'ACCT', 'ACGT', 'ACCA'],
index=['f1', 'f2', 'f3', 'f4'])
pdt.assert_series_equal(obs, exp)
class MergeFeatureSequenceTests(unittest.TestCase):
# More extensive testing is performed in MergeFeatureDataTests, which
# tests the shared private API.
def test_merge_seq_data(self):
d1 = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),
skbio.DNA('ACCT', metadata={'id': 'xyz'})],
index=['f1', 'f2'])
d2 = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),
skbio.DNA('ACCA', metadata={'id': 'wxy'})],
index=['f1', 'f3'])
obs = merge_seq_data(d1, d2)
exp = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),
skbio.DNA('ACCT', metadata={'id': 'xyz'}),
skbio.DNA('ACCA', metadata={'id': 'wxy'})],
index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
class MergeFeatureTaxonomyTests(unittest.TestCase):
# More extensive testing is performed in MergeFeatureDataTests, which
# tests the shared private API.
def test_merge_taxa_data(self):
# this test calls the public API directly
d1 = pd.Series(['a;b;c;d', 'a;b;c;e'], index=['f1', 'f2'])
d2 = pd.Series(['a;b;c;d', 'a;b;c;e'], index=['f1', 'f3'])
obs = merge_taxa_data(d1, d2)
exp = pd.Series(['a;b;c;d', 'a;b;c;e', 'a;b;c;e'],
index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
if __name__ == "__main__":
unittest.main()
| 39.286822
| 78
| 0.48165
|
import unittest
import skbio
import numpy as np
from biom.table import Table
import pandas as pd
import pandas.util.testing as pdt
from q2_feature_table import (merge, merge_seq_data,
merge_taxa_data)
from q2_feature_table._merge import _merge_feature_data
class MergeTableTests(unittest.TestCase):
def test_valid_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S4', 'S5', 'S6'])
obs = merge(t1, t2)
exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O3', 'O4'],
['S4', 'S5', 'S6'])
obs = merge(t1, t2)
exp = Table(np.array([[0, 1, 3, 0, 0, 0], [1, 1, 2, 0, 0, 0],
[0, 0, 0, 0, 2, 6], [0, 0, 0, 2, 2, 4]]),
['O1', 'O2', 'O3', 'O4'],
['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])
self.assertEqual(obs, exp)
def test_invalid_overlapping_sample_ids(self):
t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'],
['S1', 'S2', 'S3'])
t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),
['O1', 'O3'],
['S1', 'S5', 'S6'])
with self.assertRaises(ValueError):
merge(t1, t2)
class MergeFeatureDataTests(unittest.TestCase):
def test_valid_overlapping_feature_ids(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGT', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data(d1, d2)
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_first_feature_data_retained(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGAAA', 'ACCA'], index=['f1', 'f3'])
obs = _merge_feature_data(d1, d2)
exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
obs = _merge_feature_data(d2, d1)
exp = pd.Series(['ACGAAA', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
def test_valid_non_overlapping_feature_ids(self):
d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])
d2 = pd.Series(['ACGT', 'ACCA'], index=['f3', 'f4'])
obs = _merge_feature_data(d1, d2)
exp = pd.Series(['ACGT', 'ACCT', 'ACGT', 'ACCA'],
index=['f1', 'f2', 'f3', 'f4'])
pdt.assert_series_equal(obs, exp)
class MergeFeatureSequenceTests(unittest.TestCase):
def test_merge_seq_data(self):
d1 = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),
skbio.DNA('ACCT', metadata={'id': 'xyz'})],
index=['f1', 'f2'])
d2 = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),
skbio.DNA('ACCA', metadata={'id': 'wxy'})],
index=['f1', 'f3'])
obs = merge_seq_data(d1, d2)
exp = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),
skbio.DNA('ACCT', metadata={'id': 'xyz'}),
skbio.DNA('ACCA', metadata={'id': 'wxy'})],
index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
class MergeFeatureTaxonomyTests(unittest.TestCase):
def test_merge_taxa_data(self):
d1 = pd.Series(['a;b;c;d', 'a;b;c;e'], index=['f1', 'f2'])
d2 = pd.Series(['a;b;c;d', 'a;b;c;e'], index=['f1', 'f3'])
obs = merge_taxa_data(d1, d2)
exp = pd.Series(['a;b;c;d', 'a;b;c;e', 'a;b;c;e'],
index=['f1', 'f2', 'f3'])
pdt.assert_series_equal(obs, exp)
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c44d430b2eeaaad52c4aa52a38f7b194de4bb78
| 3,626
|
py
|
Python
|
pyinfra/api/connectors/sshuserclient/client.py
|
ryanwersal/pyinfra
|
350c9053953531d1d258512f1e0761879df772fb
|
[
"MIT"
] | null | null | null |
pyinfra/api/connectors/sshuserclient/client.py
|
ryanwersal/pyinfra
|
350c9053953531d1d258512f1e0761879df772fb
|
[
"MIT"
] | null | null | null |
pyinfra/api/connectors/sshuserclient/client.py
|
ryanwersal/pyinfra
|
350c9053953531d1d258512f1e0761879df772fb
|
[
"MIT"
] | null | null | null |
'''
This file as originally part of the "sshuserclient" pypi package. The GitHub
source has now vanished (https://github.com/tobald/sshuserclient).
'''
from os import path
from paramiko import (
AutoAddPolicy,
ProxyCommand,
SSHClient as ParamikoClient,
)
from pyinfra.api.util import memoize
from .config import SSHConfig
@memoize
def get_ssh_config():
user_config_file = path.expanduser('~/.ssh/config')
if path.exists(user_config_file):
with open(user_config_file) as f:
ssh_config = SSHConfig()
ssh_config.parse(f)
return ssh_config
class SSHClient(ParamikoClient):
'''
An SSHClient which honors ssh_config and supports proxyjumping
original idea at http://bitprophet.org/blog/2012/11/05/gateway-solutions/
'''
def connect(self, hostname, **kwargs):
self.hostname, self.config = self.parse_config(hostname)
self.config.update(kwargs)
super(SSHClient, self).connect(hostname, **self.config)
def gateway(self, target, target_port):
transport = self.get_transport()
return transport.open_channel(
'direct-tcpip',
(target, target_port),
(self.hostname, self.config['port']))
def parse_config(self, hostname):
cfg = {'port': 22}
ssh_config = get_ssh_config()
if not ssh_config:
return hostname, cfg
host_config = ssh_config.lookup(hostname)
if 'hostname' in host_config:
hostname = host_config['hostname']
if 'user' in host_config:
cfg['username'] = host_config['user']
if 'identityfile' in host_config:
cfg['key_filename'] = host_config['identityfile']
if 'port' in host_config:
cfg['port'] = int(host_config['port'])
if 'proxycommand' in host_config:
cfg['sock'] = ProxyCommand(host_config['proxycommand'])
elif 'proxyjump' in host_config:
hops = host_config['proxyjump'].split(',')
sock = None
for i, hop in enumerate(hops):
hop_hostname, hop_config = self.derive_shorthand(hop)
c = SSHClient()
c.set_missing_host_key_policy(AutoAddPolicy())
c.connect(hop_hostname, sock=sock, **hop_config)
if i == len(hops) - 1:
target = hostname
target_config = {'port': cfg['port']}
else:
target, target_config = self.derive_shorthand(
hops[i + 1])
sock = c.gateway(target, target_config['port'])
cfg['sock'] = sock
return hostname, cfg
def derive_shorthand(self, host_string):
config = {}
user_hostport = host_string.rsplit('@', 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
if user:
config['username'] = user
# IPv6: can't reliably tell where addr ends and port begins, so don't
# try (and don't bother adding special syntax either, user should avoid
# this situation by using port=).
if hostport.count(':') > 1:
host = hostport
config['port'] = 22
# IPv4: can split on ':' reliably.
else:
host_port = hostport.rsplit(':', 1)
host = host_port.pop(0) or None
if host_port and host_port[0]:
config['port'] = int(host_port[0])
else:
config['port'] = 22
return host, config
| 33.266055
| 79
| 0.585769
|
from os import path
from paramiko import (
AutoAddPolicy,
ProxyCommand,
SSHClient as ParamikoClient,
)
from pyinfra.api.util import memoize
from .config import SSHConfig
@memoize
def get_ssh_config():
user_config_file = path.expanduser('~/.ssh/config')
if path.exists(user_config_file):
with open(user_config_file) as f:
ssh_config = SSHConfig()
ssh_config.parse(f)
return ssh_config
class SSHClient(ParamikoClient):
def connect(self, hostname, **kwargs):
self.hostname, self.config = self.parse_config(hostname)
self.config.update(kwargs)
super(SSHClient, self).connect(hostname, **self.config)
def gateway(self, target, target_port):
transport = self.get_transport()
return transport.open_channel(
'direct-tcpip',
(target, target_port),
(self.hostname, self.config['port']))
def parse_config(self, hostname):
cfg = {'port': 22}
ssh_config = get_ssh_config()
if not ssh_config:
return hostname, cfg
host_config = ssh_config.lookup(hostname)
if 'hostname' in host_config:
hostname = host_config['hostname']
if 'user' in host_config:
cfg['username'] = host_config['user']
if 'identityfile' in host_config:
cfg['key_filename'] = host_config['identityfile']
if 'port' in host_config:
cfg['port'] = int(host_config['port'])
if 'proxycommand' in host_config:
cfg['sock'] = ProxyCommand(host_config['proxycommand'])
elif 'proxyjump' in host_config:
hops = host_config['proxyjump'].split(',')
sock = None
for i, hop in enumerate(hops):
hop_hostname, hop_config = self.derive_shorthand(hop)
c = SSHClient()
c.set_missing_host_key_policy(AutoAddPolicy())
c.connect(hop_hostname, sock=sock, **hop_config)
if i == len(hops) - 1:
target = hostname
target_config = {'port': cfg['port']}
else:
target, target_config = self.derive_shorthand(
hops[i + 1])
sock = c.gateway(target, target_config['port'])
cfg['sock'] = sock
return hostname, cfg
def derive_shorthand(self, host_string):
config = {}
user_hostport = host_string.rsplit('@', 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
if user:
config['username'] = user
# this situation by using port=).
if hostport.count(':') > 1:
host = hostport
config['port'] = 22
# IPv4: can split on ':' reliably.
else:
host_port = hostport.rsplit(':', 1)
host = host_port.pop(0) or None
if host_port and host_port[0]:
config['port'] = int(host_port[0])
else:
config['port'] = 22
return host, config
| true
| true
|
1c44d59547bb28cda69dfd6ea00db05d926f2024
| 3,035
|
py
|
Python
|
test/test_base_processor.py
|
tienanh-1999/TensorFlowTTS
|
cd3a5e1f9915fa7dd646771fd50fe6fef94fe9fc
|
[
"Apache-2.0"
] | 1,961
|
2020-07-31T07:31:27.000Z
|
2022-03-31T20:39:29.000Z
|
test/test_base_processor.py
|
neso613/TensorFlowTTS
|
978f397c244a4987e2aa11e5db8d1e5902332826
|
[
"Apache-2.0"
] | 587
|
2020-07-31T03:24:54.000Z
|
2022-03-29T02:31:50.000Z
|
test/test_base_processor.py
|
neso613/TensorFlowTTS
|
978f397c244a4987e2aa11e5db8d1e5902332826
|
[
"Apache-2.0"
] | 483
|
2020-07-31T17:48:32.000Z
|
2022-03-31T13:55:49.000Z
|
import pytest
from tensorflow_tts.processor.base_processor import BaseProcessor, DataProcessorError
import string
from dataclasses import dataclass
from shutil import copyfile
@dataclass
class LJ(BaseProcessor):
def get_one_sample(self, item):
sample = {
"raw_text": None,
"text_ids": None,
"audio": None,
"utt_id": None,
"speaker_name": None,
"rate": None,
}
return sample
def text_to_sequence(self, text):
return ["0"]
def setup_eos_token(self):
return None
def save_pretrained(self, saved_path):
return super().save_pretrained(saved_path)
@pytest.fixture
def processor(tmpdir):
copyfile("test/files/train.txt", f"{tmpdir}/train.txt")
processor = LJ(data_dir=tmpdir, symbols=list(string.ascii_lowercase))
return processor
@pytest.fixture
def mapper_processor(tmpdir):
copyfile("test/files/train.txt", f"{tmpdir}/train.txt")
copyfile("test/files/mapper.json", f"{tmpdir}/mapper.json")
processor = LJ(data_dir=tmpdir, loaded_mapper_path=f"{tmpdir}/mapper.json")
return processor
def test_items_creation(processor):
# Check text
assert processor.items[0][0] == "in fact its just a test."
assert processor.items[1][0] == "in fact its just a speaker number one."
# Check path
assert processor.items[0][1].split("/")[-1] == "libri1.wav"
assert processor.items[1][1].split("/")[-1] == "libri2.wav"
# Check speaker name
assert processor.items[0][2] == "One"
assert processor.items[1][2] == "Two"
def test_mapper(processor):
# check symbol to id mapper
assert processor.symbol_to_id["a"] == 0
# check id to symbol mapper
assert processor.id_to_symbol[0] == "a"
# check speaker mapper
assert processor.speakers_map["One"] == 0
assert processor.speakers_map["Two"] == 1
def test_adding_symbols(processor):
# check symbol to id mapper
assert processor.symbol_to_id["a"] == 0
# check id to symbol mapper
assert processor.id_to_symbol[0] == "a"
old_processor_len = len(processor.symbols)
# Test adding new symbol
processor.add_symbol("O_O")
assert processor.symbol_to_id["a"] == 0
assert (
processor.symbol_to_id["O_O"] == len(processor.symbols) - 1
) # new symbol should have last id
assert processor.id_to_symbol[0] == "a"
assert processor.id_to_symbol[len(processor.symbols) - 1] == "O_O"
assert old_processor_len == len(processor.symbols) - 1
def test_loading_mapper(mapper_processor):
assert mapper_processor.symbol_to_id["a"] == 0
assert mapper_processor.symbol_to_id["@ph"] == 2
assert mapper_processor.speakers_map["test_one"] == 0
assert mapper_processor.speakers_map["test_two"] == 1
assert mapper_processor.id_to_symbol[0] == "a"
assert mapper_processor.id_to_symbol[2] == "@ph"
# Test failed creation
with pytest.raises(DataProcessorError):
failed = LJ(data_dir="test/files")
| 28.101852
| 85
| 0.670181
|
import pytest
from tensorflow_tts.processor.base_processor import BaseProcessor, DataProcessorError
import string
from dataclasses import dataclass
from shutil import copyfile
@dataclass
class LJ(BaseProcessor):
def get_one_sample(self, item):
sample = {
"raw_text": None,
"text_ids": None,
"audio": None,
"utt_id": None,
"speaker_name": None,
"rate": None,
}
return sample
def text_to_sequence(self, text):
return ["0"]
def setup_eos_token(self):
return None
def save_pretrained(self, saved_path):
return super().save_pretrained(saved_path)
@pytest.fixture
def processor(tmpdir):
copyfile("test/files/train.txt", f"{tmpdir}/train.txt")
processor = LJ(data_dir=tmpdir, symbols=list(string.ascii_lowercase))
return processor
@pytest.fixture
def mapper_processor(tmpdir):
copyfile("test/files/train.txt", f"{tmpdir}/train.txt")
copyfile("test/files/mapper.json", f"{tmpdir}/mapper.json")
processor = LJ(data_dir=tmpdir, loaded_mapper_path=f"{tmpdir}/mapper.json")
return processor
def test_items_creation(processor):
assert processor.items[0][0] == "in fact its just a test."
assert processor.items[1][0] == "in fact its just a speaker number one."
assert processor.items[0][1].split("/")[-1] == "libri1.wav"
assert processor.items[1][1].split("/")[-1] == "libri2.wav"
assert processor.items[0][2] == "One"
assert processor.items[1][2] == "Two"
def test_mapper(processor):
assert processor.symbol_to_id["a"] == 0
assert processor.id_to_symbol[0] == "a"
assert processor.speakers_map["One"] == 0
assert processor.speakers_map["Two"] == 1
def test_adding_symbols(processor):
assert processor.symbol_to_id["a"] == 0
assert processor.id_to_symbol[0] == "a"
old_processor_len = len(processor.symbols)
processor.add_symbol("O_O")
assert processor.symbol_to_id["a"] == 0
assert (
processor.symbol_to_id["O_O"] == len(processor.symbols) - 1
)
assert processor.id_to_symbol[0] == "a"
assert processor.id_to_symbol[len(processor.symbols) - 1] == "O_O"
assert old_processor_len == len(processor.symbols) - 1
def test_loading_mapper(mapper_processor):
assert mapper_processor.symbol_to_id["a"] == 0
assert mapper_processor.symbol_to_id["@ph"] == 2
assert mapper_processor.speakers_map["test_one"] == 0
assert mapper_processor.speakers_map["test_two"] == 1
assert mapper_processor.id_to_symbol[0] == "a"
assert mapper_processor.id_to_symbol[2] == "@ph"
with pytest.raises(DataProcessorError):
failed = LJ(data_dir="test/files")
| true
| true
|
1c44d6adc5f4ee15843ce36776690e20cbf7f159
| 1,363
|
py
|
Python
|
meregistro/apps/registro/models/EstablecimientoAutoridad.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
meregistro/apps/registro/models/EstablecimientoAutoridad.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
meregistro/apps/registro/models/EstablecimientoAutoridad.py
|
MERegistro/meregistro
|
6cde3cab2bd1a8e3084fa38147de377d229391e3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models
from apps.registro.models.Establecimiento import Establecimiento
from apps.seguridad.models.TipoDocumento import TipoDocumento
from apps.registro.models.AutoridadCargo import AutoridadCargo
from django.core.exceptions import ValidationError
class EstablecimientoAutoridad(models.Model):
establecimiento = models.ForeignKey(Establecimiento, editable=False, related_name='autoridades')
apellido = models.CharField(max_length=40, null=False)
nombre = models.CharField(max_length=40, null=False)
fecha_nacimiento = models.DateField(null=True)
cargo = models.ForeignKey(AutoridadCargo, null=True, blank=True)
tipo_documento = models.ForeignKey(TipoDocumento, null=True, blank=True)
documento = models.CharField(max_length=8, null=True, blank=True)
telefono = models.CharField(max_length=30, null=True, blank=True)
celular = models.CharField(max_length=30, null=True, blank=True)
email = models.EmailField(max_length=255, null=True, blank=True)
class Meta:
app_label = 'registro'
db_table = 'registro_establecimiento_autoridades'
def __unicode__(self):
return self.cargo.descripcion + ": " + self.apellido + " " + self.nombre
def __init__(self, *args, **kwargs):
super(EstablecimientoAutoridad, self).__init__(*args, **kwargs)
| 45.433333
| 100
| 0.752018
|
from django.db import models
from apps.registro.models.Establecimiento import Establecimiento
from apps.seguridad.models.TipoDocumento import TipoDocumento
from apps.registro.models.AutoridadCargo import AutoridadCargo
from django.core.exceptions import ValidationError
class EstablecimientoAutoridad(models.Model):
establecimiento = models.ForeignKey(Establecimiento, editable=False, related_name='autoridades')
apellido = models.CharField(max_length=40, null=False)
nombre = models.CharField(max_length=40, null=False)
fecha_nacimiento = models.DateField(null=True)
cargo = models.ForeignKey(AutoridadCargo, null=True, blank=True)
tipo_documento = models.ForeignKey(TipoDocumento, null=True, blank=True)
documento = models.CharField(max_length=8, null=True, blank=True)
telefono = models.CharField(max_length=30, null=True, blank=True)
celular = models.CharField(max_length=30, null=True, blank=True)
email = models.EmailField(max_length=255, null=True, blank=True)
class Meta:
app_label = 'registro'
db_table = 'registro_establecimiento_autoridades'
def __unicode__(self):
return self.cargo.descripcion + ": " + self.apellido + " " + self.nombre
def __init__(self, *args, **kwargs):
super(EstablecimientoAutoridad, self).__init__(*args, **kwargs)
| true
| true
|
1c44d7fbd04a0e2096ef3888fde21248877de8f1
| 4,060
|
py
|
Python
|
python/tests/server/test_http_output.py
|
dashstander/cog
|
0aee3c9ef50ac346d053010e39c4e7becbbcb70d
|
[
"Apache-2.0"
] | null | null | null |
python/tests/server/test_http_output.py
|
dashstander/cog
|
0aee3c9ef50ac346d053010e39c4e7becbbcb70d
|
[
"Apache-2.0"
] | null | null | null |
python/tests/server/test_http_output.py
|
dashstander/cog
|
0aee3c9ef50ac346d053010e39c4e7becbbcb70d
|
[
"Apache-2.0"
] | null | null | null |
import base64
import io
import os
import tempfile
import numpy as np
from PIL import Image
import responses
from responses.matchers import multipart_matcher
from cog import BaseModel, BasePredictor, Path, File
from .test_http import make_client
def test_return_wrong_type():
class Predictor(BasePredictor):
def predict(self) -> int:
return "foo"
client = make_client(Predictor(), raise_server_exceptions=False)
resp = client.post("/predictions")
assert resp.status_code == 500
def test_path_output_path():
class Predictor(BasePredictor):
def predict(self) -> Path:
temp_dir = tempfile.mkdtemp()
temp_path = os.path.join(temp_dir, "my_file.bmp")
img = Image.new("RGB", (255, 255), "red")
img.save(temp_path)
return Path(temp_path)
client = make_client(Predictor())
res = client.post("/predictions")
assert res.status_code == 200
header, b64data = res.json()["output"].split(",", 1)
# need both image/bmp and image/x-ms-bmp until https://bugs.python.org/issue44211 is fixed
assert header in ["data:image/bmp;base64", "data:image/x-ms-bmp;base64"]
assert len(base64.b64decode(b64data)) == 195894
@responses.activate
def test_output_path_to_http():
class Predictor(BasePredictor):
def predict(self) -> Path:
temp_dir = tempfile.mkdtemp()
temp_path = os.path.join(temp_dir, "file.txt")
with open(temp_path, "w") as fh:
fh.write("hello")
return Path(temp_path)
fh = io.BytesIO(b"hello")
fh.name = "file.txt"
responses.add(
responses.PUT,
"http://example.com/upload/file.txt",
status=201,
match=[multipart_matcher({"file": fh})],
)
client = make_client(Predictor())
res = client.post(
"/predictions", json={"output_file_prefix": "http://example.com/upload/"}
)
assert res.json() == {
"status": "succeeded",
"output": "http://example.com/upload/file.txt",
}
assert res.status_code == 200
def test_path_output_file():
class Predictor(BasePredictor):
def predict(self) -> File:
return io.StringIO("hello")
client = make_client(Predictor())
res = client.post("/predictions")
assert res.status_code == 200
assert res.json() == {
"status": "succeeded",
"output": "data:application/octet-stream;base64,aGVsbG8=", # hello
}
@responses.activate
def test_output_file_to_http():
class Predictor(BasePredictor):
def predict(self) -> File:
fh = io.StringIO("hello")
fh.name = "foo.txt"
return fh
responses.add(
responses.PUT,
"http://example.com/upload/foo.txt",
status=201,
match=[multipart_matcher({"file": ("foo.txt", b"hello")})],
)
client = make_client(Predictor())
res = client.post(
"/predictions", json={"output_file_prefix": "http://example.com/upload/"}
)
assert res.json() == {
"status": "succeeded",
"output": "http://example.com/upload/foo.txt",
}
assert res.status_code == 200
def test_json_output_numpy():
class Predictor(BasePredictor):
def predict(self) -> np.float64:
return np.float64(1.0)
client = make_client(Predictor())
resp = client.post("/predictions")
assert resp.status_code == 200
assert resp.json() == {"output": 1.0, "status": "succeeded"}
def test_complex_output():
class Output(BaseModel):
text: str
file: File
class Predictor(BasePredictor):
def predict(self) -> Output:
return Output(text="hello", file=io.StringIO("hello"))
client = make_client(Predictor())
resp = client.post("/predictions")
assert resp.json() == {
"output": {
"file": "data:application/octet-stream;base64,aGVsbG8=",
"text": "hello",
},
"status": "succeeded",
}
assert resp.status_code == 200
| 28.391608
| 94
| 0.61133
|
import base64
import io
import os
import tempfile
import numpy as np
from PIL import Image
import responses
from responses.matchers import multipart_matcher
from cog import BaseModel, BasePredictor, Path, File
from .test_http import make_client
def test_return_wrong_type():
class Predictor(BasePredictor):
def predict(self) -> int:
return "foo"
client = make_client(Predictor(), raise_server_exceptions=False)
resp = client.post("/predictions")
assert resp.status_code == 500
def test_path_output_path():
class Predictor(BasePredictor):
def predict(self) -> Path:
temp_dir = tempfile.mkdtemp()
temp_path = os.path.join(temp_dir, "my_file.bmp")
img = Image.new("RGB", (255, 255), "red")
img.save(temp_path)
return Path(temp_path)
client = make_client(Predictor())
res = client.post("/predictions")
assert res.status_code == 200
header, b64data = res.json()["output"].split(",", 1)
assert header in ["data:image/bmp;base64", "data:image/x-ms-bmp;base64"]
assert len(base64.b64decode(b64data)) == 195894
@responses.activate
def test_output_path_to_http():
class Predictor(BasePredictor):
def predict(self) -> Path:
temp_dir = tempfile.mkdtemp()
temp_path = os.path.join(temp_dir, "file.txt")
with open(temp_path, "w") as fh:
fh.write("hello")
return Path(temp_path)
fh = io.BytesIO(b"hello")
fh.name = "file.txt"
responses.add(
responses.PUT,
"http://example.com/upload/file.txt",
status=201,
match=[multipart_matcher({"file": fh})],
)
client = make_client(Predictor())
res = client.post(
"/predictions", json={"output_file_prefix": "http://example.com/upload/"}
)
assert res.json() == {
"status": "succeeded",
"output": "http://example.com/upload/file.txt",
}
assert res.status_code == 200
def test_path_output_file():
class Predictor(BasePredictor):
def predict(self) -> File:
return io.StringIO("hello")
client = make_client(Predictor())
res = client.post("/predictions")
assert res.status_code == 200
assert res.json() == {
"status": "succeeded",
"output": "data:application/octet-stream;base64,aGVsbG8=",
}
@responses.activate
def test_output_file_to_http():
class Predictor(BasePredictor):
def predict(self) -> File:
fh = io.StringIO("hello")
fh.name = "foo.txt"
return fh
responses.add(
responses.PUT,
"http://example.com/upload/foo.txt",
status=201,
match=[multipart_matcher({"file": ("foo.txt", b"hello")})],
)
client = make_client(Predictor())
res = client.post(
"/predictions", json={"output_file_prefix": "http://example.com/upload/"}
)
assert res.json() == {
"status": "succeeded",
"output": "http://example.com/upload/foo.txt",
}
assert res.status_code == 200
def test_json_output_numpy():
class Predictor(BasePredictor):
def predict(self) -> np.float64:
return np.float64(1.0)
client = make_client(Predictor())
resp = client.post("/predictions")
assert resp.status_code == 200
assert resp.json() == {"output": 1.0, "status": "succeeded"}
def test_complex_output():
class Output(BaseModel):
text: str
file: File
class Predictor(BasePredictor):
def predict(self) -> Output:
return Output(text="hello", file=io.StringIO("hello"))
client = make_client(Predictor())
resp = client.post("/predictions")
assert resp.json() == {
"output": {
"file": "data:application/octet-stream;base64,aGVsbG8=",
"text": "hello",
},
"status": "succeeded",
}
assert resp.status_code == 200
| true
| true
|
1c44d821b673acb2343e06993bdaf8292215ca6a
| 26,336
|
py
|
Python
|
rnn.py
|
hamk-uas/HAMK_Smart_City
|
c9408ea1caac995522489a331207737b37971314
|
[
"Apache-2.0"
] | 1
|
2021-12-19T09:53:28.000Z
|
2021-12-19T09:53:28.000Z
|
rnn.py
|
hamk-uas/HAMK_Smart_City
|
c9408ea1caac995522489a331207737b37971314
|
[
"Apache-2.0"
] | null | null | null |
rnn.py
|
hamk-uas/HAMK_Smart_City
|
c9408ea1caac995522489a331207737b37971314
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from tensorflow import Variable
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, GRU, LSTM, SimpleRNN, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
from collections import deque
import matplotlib.pyplot as plt
import kerastuner as kt
from datetime import datetime, date
from joblib import dump, load
import os
import json
import csv
import math
class RNN:
'''
Parent class for RNN models.
'''
def __init__(self, quant=None, seq=4, fut=0, parameters=None):
'''
All parameters for class objects are defined here, child classes don't have __init__ methods
Inputs: target quantities as list, sequence length as int, future period as int, input parameters as a list.
'''
self.quant = quant
self.seq = seq
self.fut = fut
self.parameters = parameters
self.date = date.today() # For bookkeeping purposes
self.model = None # For storage of a model
self.scaler = None # For storage of feature scaler
self.name = None # Defined after training
def preprocess(self, raw_data):
'''
Function for preprocessing downsampled data for sequence modeling.
Inputs: Downsampled data frame with desired parameters defined in class attribute list in headers
Output: Training input data, training target data, testing input data, testing target data, sklearn scaler object for inverse transformations
'''
raw_data.iloc[:,0] = pd.to_datetime(raw_data.iloc[:,0], format='%Y-%m-%d %H:%M:%S%z')
vec = raw_data.iloc[:,0].values
datetimes = np.array([[vec, vec], [vec, vec]], dtype = 'M8[ms]').astype('O')[0,1]
raw_data['weekday'] = [t.timetuple().tm_wday for t in datetimes]
raw_data['hours'] = [t.hour for t in datetimes]
# Encode time parameters to cyclical features
raw_data['hours_sin'] = np.sin(2 * np.pi * raw_data['hours']/24.0)
raw_data['hours_cos'] = np.cos(2 * np.pi * raw_data['hours']/24.0)
raw_data['weekday_sin'] = np.sin(2 * np.pi * raw_data['weekday']/7)
raw_data['weekday_cos'] = np.cos(2 * np.pi * raw_data['weekday']/7)
# Extend parameter list by quantity for picking data
self.parameters.extend(self.quant)
# Split the data to training and testing sets
raw_data = raw_data[self.parameters].copy()
df_train = raw_data[int(len(raw_data)*0.2):].copy()
df_val = raw_data[:int(len(raw_data)*0.2)].copy()
# Delete the quantity from parameter list to preserve the original inputs
self.parameters = [x for x in self.parameters if x not in self.quant]
# Scale all data features to range [0,1]
self.scaler = MinMaxScaler()
df_train = self.scaler.fit_transform(df_train)
df_val = self.scaler.transform(df_val)
# Next generate a list which will hold all of the sequences for training data
sequences_train = []
sequences_val = []
prev_days_train = deque(maxlen=self.seq) # Placeholder for the sequences
prev_days_val = deque(maxlen=self.seq)
l_quant = len(self.quant)
for count, row in enumerate(pd.DataFrame(df_train).values):
prev_days_train.append([val for val in row[:-l_quant]]) # store everything but the target values
if (len(prev_days_train) == self.seq): # This checks that our sequences are of the correct length and target value is at full hour
if (any(pd.isna(pd.DataFrame(df_train).values[count-1][-l_quant:]))): # Test for 30 min data interval because of energy data gaps
continue
try:
sequences_train.append([np.array(prev_days_train), pd.DataFrame(df_train).values[count+1][-l_quant:]])
except IndexError:
break
for count, row in enumerate(pd.DataFrame(df_val).values):
prev_days_val.append([val for val in row[:-l_quant]]) # store everything but the target values
if (len(prev_days_val) == self.seq): # This checks that our sequences are of the correct length and target value is at full hour
if (any(pd.isna(pd.DataFrame(df_val).values[count-1][-l_quant:]))): # Test for 30 min data interval because of energy data gaps
continue
try:
sequences_val.append([np.array(prev_days_val), pd.DataFrame(df_val).values[count+1][-l_quant:]])
except IndexError:
break
# Iterating through the sequences in order to differentiate X and y
X_train = []
y_train = []
X_val = []
y_val = []
for seq, target in sequences_train:
X_train.append(seq)
y_train.append(target)
for seq, target in sequences_val:
X_val.append(seq)
y_val.append(target)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_val = np.array(X_val)
y_val = np.array(y_val)
# Output the shapes of training and testing data.
print(f'Shape of training data: {X_train.shape}')
print(f'Shape of testing data: {X_val.shape}')
return X_train, y_train, X_val, y_val
def inv_target(self, X, preds, y_val):
'''
Method for inverting the scaling target variable
Inputs: 3-dimensional data matrix used to train (or validate) the model, predictions obtained using the model,
validation target vector and pre-fitted sklearn scaler.
Note: the X tensor is more of a placeholder in this function used only for getting the dimensions correct.
Output: Inversely transformed predictions and validation vectors
'''
preds = np.concatenate((X[:len(preds),-1], np.array(preds).reshape(len(preds), 1)), axis=1) # Reshape is necessary as there are issues with dimensions
y_val = np.concatenate((X[:len(preds),-1], np.array(y_val[:len(preds)]).reshape(len(preds), 1)), axis=1)
preds = self.scaler.inverse_transform(preds)[:,-1:]
y_val = self.scaler.inverse_transform(y_val)[:,-1:]
return preds, y_val
def plot_preds(self, preds, y_val, low=[], up=[], conf=0.9):
'''
Producing plots of predictions with the measured values as time series.
Inputs: predicted and measured values as numpy arrays.
'''
# Number of instances to plot.
if len(low) != 0: # Check whether the list is empty.
rounds = len(low)
else:
rounds = len(preds)
plt.figure()
plt.plot(preds[:rounds], color='navy', label='Predicted')
plt.plot(y_val[:rounds], color='darkorange', label='Measured', marker='*')
if len(low) != 0: # Check whether the list is empty.
plt.fill_between(range(rounds), (preds[:rounds,0])+(low[:,0]), (preds[:rounds,0])+(up[:,0]), color='gray', alpha=0.25, label=f'{round(conf*100)}% prediction interval')
plt.legend()
plt.grid()
plt.title(f'Predictions for {self.quant[0]} with {self.name}.')
plt.show()
def load_intervals(self, int_path, conf=0.9):
'''
Method for loading desired prediction intervals for ML forecasts.
Inputs: path to the prediction interval .csv file, confidence level as float (0.5-0.99)
'''
# Load the predictions
with open(int_path) as csvf:
read_fil = csv.reader(csvf)
percs = list(read_fil)
percs = np.array([obj for obj in percs if obj])
low_ind = round(((1-conf)/2 - 0.01) * 100)
up_ind = round((conf + (1-conf)/2 - 0.01) * 100)
# Select the desired intervals bounds. Reshape is necessary for following target inversion.
lower, upper = percs[:,low_ind].reshape(len(percs), 1), percs[:,up_ind].reshape(len(percs), 1)
return lower, upper
#plt.figure()
#
#plt.plot(preds, label='Predicted')
#plt.plot(y_val, label='Measured', marker='*')
#plt.fill_between(range(len(preds)), (preds)+(percs[:,low_ind]), (preds)+(percs[:,up_ind]), color='gray', alpha=0.25, label=f'{round(100*conf)}% prediction interval')
#plt.legend()
#
#plt.show()
def save(self, path=rf'{os.getcwd()}'):
'''
Method for saving the model, scaler, and other attributes to compatible forms.
Uses same folder as subclasses fit-method to save the information.
Input: Desired path for saving the information.
'''
# Define the folder which the results are saved to
new_fold_path = rf'{path}/{self.name}_{self.quant[0]}_{str(self.date)}'
if not os.path.exists(new_fold_path): # Test whether the directory already exists
os.makedirs(new_fold_path)
print(f'Folder created on path: {new_fold_path}.')
else:
print(f'Savings results to {new_fold_path}.')
# Save model to folder
self.model.save(rf'{new_fold_path}/model.h5')
print('Model saved.')
# Save scaler to folder
dump(self.scaler, rf'{new_fold_path}/scaler.joblib')
print('Scaler saved.')
# Save all other variables to json format to folder
other_vars = {'name': self.name, 'quant': self.quant, 'seq': self.seq, 'fut': self.fut, 'parameters': self.parameters, 'date': str(self.date)}
with open(rf'{new_fold_path}/vars.json', 'w') as f:
json.dump(other_vars, f)
print('Other variables saved.')
def load(self, path):
'''
Loads RNN model information saved with .save method from location specified in function call.
Stores the information by updating class attributes.
Input: path of the storage directory
'''
# Load the model to class attribute
self.model = load_model(rf'{path}/model.h5')
print('Model loaded.')
# Load the scaler
self.scaler = load(rf'{path}/scaler.joblib')
print('Scaler loaded.')
# Load dictionary containing all other variables
with open(rf'{path}/vars.json', 'r') as f:
var_dict = json.load(f)
# Place the variables to correct positions
self.name = var_dict["name"]
self.quant = var_dict["quant"]
self.seq = var_dict["seq"]
self.fut = var_dict["fut"]
self.parameters = var_dict["parameters"]
self.date = var_dict["date"]
print('Other variables loaded.')
def prediction_interval(self, X_train, y_train, x0, path=rf'{os.getcwd()}'):
'''
Compute bootstrap prediction interval around the models prediction on single data point x0.
Inputs: pre-trained model, training input data, training output data, new input data row, number of rows to save,
path for model saving.
Output: Percentiles 0-100 for prediction intervals
'''
# Define output path for saving the percentile results.
new_fold_path = rf'{path}/{self.name}_{self.quant[0]}_{str(self.date)}'
if not os.path.exists(new_fold_path): # Test whether the directory already exists
os.makedirs(new_fold_path)
print(f'Folder created on path: {new_fold_path}.')
else:
print(f'Savings prediction intervals to {new_fold_path}.')
# Local copy of the machine learning model. Done dut to weight and bias initialization done in the script.
model = self.model
# Number of training samples
n = X_train.shape[0]
# Calculate the next prediction to be output in the end
pred_x0 = model.predict(np.reshape(x0, (1, x0.shape[0], x0.shape[1])))
# Calculate training residuals
preds = model.predict(X_train)
train_res = y_train - preds
# Number of bootstrap samples
n_boots = np.sqrt(n).astype(int)
# Compute bootstrap predictions and validation residuals
boot_preds, val_res = np.empty(n_boots), []
for b in range(n_boots):
# Reset model weights, not straightforward with tensorflow Recurrent Neural Networks
for ix, layer in enumerate(model.layers):
if hasattr(self.model.layers[ix], 'recurrent_initializer'):
weight_initializer = model.layers[ix].kernel_initializer
bias_initializer = model.layers[ix].bias_initializer
recurr_init = model.layers[ix].recurrent_initializer
old_weights, old_biases, old_recurrent = model.layers[ix].get_weights()
model.layers[ix].set_weights([
weight_initializer(shape=old_weights.shape),
bias_initializer(shape=old_biases.shape),
recurr_init(shape=old_recurrent.shape)])
elif hasattr(model.layers[ix], 'kernel_initializer') and hasattr(model.layers[ix], 'bias_initializer'):
weight_initializer = model.layers[ix].kernel_initializer
bias_initializer = model.layers[ix].bias_initializer
old_weights, old_biases = model.layers[ix].get_weights()
model.layers[ix].set_weights([
weight_initializer(shape=old_weights.shape),
bias_initializer(shape=len(old_biases))])
print(f'Starting bootstrap {b+1}/{n_boots}')
train_idx = np.random.choice(range(n), size=n, replace=True) # Draw the training indexes with replacement
val_idx = np.array([idx for idx in range(n) if idx not in train_idx]) # Use the ones left after training as validation data
# Train model with training data, validate with validation data. Early Stopping stops training after validation performance
# starts to deteriorate.
model.fit(X_train[train_idx], y_train[train_idx], epochs=100, verbose=0, validation_data=(X_train[val_idx], y_train[val_idx]),
callbacks=EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True))
preds_val = model.predict(X_train[val_idx]) # Validation predictions
val_res.append(y_train[val_idx] - preds_val) # Calculate validation residuals
boot_preds[b] = model.predict(np.reshape(x0, (1, x0.shape[0], x0.shape[1]))) # Predict with bootstrapped model
boot_preds -= np.mean(boot_preds) # Center bootstrap predictions
val_res = np.concatenate(val_res, axis=None) # Flattening predictions to a single array
# Take percentiles of training and validation residuals to compare
val_res = np.percentile(val_res, q=np.arange(100))
train_res = np.percentile(train_res, q=np.arange(100))
# Estimates for the relationship between bias and variance
no_inf_err = np.mean(np.abs(np.random.permutation(y_train) - np.random.permutation(preds)))
gener = np.abs(val_res.mean() - train_res.mean())
no_inf_val = np.abs(no_inf_err - train_res)
rel_overfitting_rate = np.mean(gener / no_inf_val)
w = .632 / (1 - .368*rel_overfitting_rate)
res = (1-w) * train_res + w*val_res
# Construct interval boundaries
C = np.array([m + o for m in boot_preds for o in res])
percs = np.percentile(C, q=np.arange(0, 101))
# Saving results to model folder...
print(f'Saving results to {new_fold_path}.')
# Writing rows to file.
with open(rf'{new_fold_path}/pred_ints.csv', 'a') as f:
write = csv.writer(f)
write.writerow(percs)
print('----------------------------------------------------------------------------------------------')
class CVTuner(kt.engine.tuner.Tuner):
'''
Class used for customizing Keras Tuner for cross-validation purposes. Inherits Tuner baseclass.
By default, 5-fold CV is implemented.
'''
def run_trial(self, trial, x, y, batch_size=32, epochs=1, patience=20):
cv = KFold(5)
val_losses = []
for train_indices, test_indices in cv.split(x):
x_train, x_test = x[train_indices], x[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
model = self.hypermodel.build(trial.hyperparameters)
# Define early stopping callback with patience parameter
stopper = EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True)
model.fit(x_train, y_train, batch_size=batch_size, validation_data=(x_test, y_test), epochs=epochs, callbacks=[stopper])
val_losses.append(model.evaluate(x_test, y_test))
self.oracle.update_trial(trial.trial_id, {'val_loss': np.mean(val_losses)})
self.save_model(trial.trial_id, model)
class RNN_HyperModel(kt.HyperModel):
'''
Class for custom implementation of Keras Tuner HyperModel. Two methods: initiation with parameters and formation of the hypermodel.
Inherits Keras Tuner HyperModel base class. Is used in fit-method of child classes.
Inputs: model type as string, input data shape as tuple, unit boundaries as list, layer boundaries as list,
learning rate values as list, suitable activation functions as a list.
'''
def __init__(self, mtype, input_shape, units, layers, lr, act):
self.mtype = mtype
self.input_shape = input_shape
self.units = units
self.layers = layers
self.lr = lr
self.act = act
def build(self, hp):
# Create TensorFlow sequential model
model = Sequential()
# Define hyperparameter search space
hp_units = hp.Int('units', min_value=self.units[0], max_value=self.units[1], step=10)
try:
hp_layers = hp.Int('layers', min_value=self.layers[0], max_value=self.layers[1])
except IndexError:
hp_layers = hp.Fixed('layers', value=self.layers[0])
hp_act = hp.Choice('activation function', values=self.act)
hp_lr = hp.Choice('learning rate', values=self.lr)
# Select correct implementation of layer formation based on the model type.
if self.mtype == 'SimpleRNN':
for i in range(hp_layers):
if i == 0 and max(range(hp_layers)) == 0:
model.add(SimpleRNN(units=hp_units, activation=hp_act, input_shape=self.input_shape))
elif i == 0:
model.add(SimpleRNN(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))
model.add(BatchNormalization())
elif i < max(range(hp_layers)):
model.add(SimpleRNN(units=hp_units, activation=hp_act, return_sequences=True))
model.add(BatchNormalization())
else:
model.add(SimpleRNN(units=hp_units, activation=hp_act))
elif self.mtype == 'GRU':
for i in range(hp_layers):
if i == 0 and max(range(hp_layers)) == 0:
model.add(GRU(units=hp_units, activation=hp_act, input_shape=self.input_shape))
elif i == 0:
model.add(GRU(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))
model.add(BatchNormalization())
elif i < max(range(hp_layers)):
model.add(GRU(units=hp_units, activation=hp_act, return_sequences=True))
model.add(BatchNormalization())
else:
model.add(GRU(units=hp_units, activation=hp_act))
elif self.mtype == 'LSTM':
for i in range(hp_layers):
if i == 0 and max(range(hp_layers)) == 0:
model.add(LSTM(units=hp_units, activation=hp_act, input_shape=self.input_shape))
elif i == 0:
model.add(LSTM(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))
model.add(BatchNormalization())
elif i < max(range(hp_layers)):
model.add(LSTM(units=hp_units, activation=hp_act, return_sequences=True))
model.add(BatchNormalization())
else:
model.add(LSTM(units=hp_units, activation=hp_act))
# Add a single output cell with linear activation function.
model.add(Dense(1))
# Define model optimizer, here Adam is used with learning rate decided with Bayesian Optimization
opt = Adam(learning_rate=hp_lr)
# Compile the model. Mean Squared Error is used as loss function while Mean Absolute Error is calculated for illustration
model.compile(loss='mse', optimizer=opt, metrics=['mae'])
return model
class VanillaRNN(RNN):
'''
Conventional Recurrent Neural Network model.
'''
def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh', 'relu'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):
'''
Fitting method performing hyperparameter optimization. Bayesian Optimization is used for finding correct
direction in search space, while 5-fold cross-validation is used for measuring predictive performance of
a model. Saves the model object and the name to class attributes.
Inputs: Preprocessed input and target data as numpy arrays, maximum epochs for training as int, model compositions to be tested as int,
hyperparameter search space with fitting default values.
'''
tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='SimpleRNN', input_shape=(X.shape[1], X.shape[2]), units=[10,100],
act=act, layers=layers, lr=lr),
oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),
directory=os.getcwd(),
project_name=f'VanillaRNN_{self.quant[0]}_{str(date.today())}', overwrite=True)
tuner.search(X, y, epochs=epochs)
print(tuner.results_summary())
best = tuner.get_best_models(num_models=1)[0]
self.name = f'VanillaRNN'
self.model = best
class MyGRU(RNN):
'''
Gated Recurrent Unit variant of RNN. Inherits all attributes and methods from parent class.
'''
def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):
'''
Fitting method performing hyperparameter optimization. Bayesian Optimization is used for finding correct
direction in search space, while 5-fold cross-validation is used for measuring predictive performance of
a model. Saves the model object and the name to class attributes.
Inputs: Preprocessed input and target data as numpy arrays, maximum epochs for training as int, model compositions to be tested as int,
hyperparameter search space with fitting default values.
'''
tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='GRU', input_shape=(X.shape[1], X.shape[2]), units=[10,100],
act=act, layers=layers, lr=lr),
oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),
directory=os.getcwd(),
project_name=f'GRU_{self.quant[0]}_{str(date.today())}', overwrite=True)
tuner.search(X, y, epochs=epochs)
print(tuner.results_summary())
best = tuner.get_best_models(num_models=1)[0]
self.name = f'GRU'
self.model = best
class MyLSTM(RNN):
'''
Long Short Term Memory variant of RNN. Inherits all attributes and methods from parent class.
'''
def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):
'''
Fitting method performing hyperparameter optimization. Bayesian Optimization is used for finding correct
direction in search space, while 5-fold cross-validation is used for measuring predictive performance of
a model. Saves the model object and the name to class attributes.
Inputs: Preprocessed input and target data as numpy arrays, maximum epochs for training as int, model compositions to be tested as int,
hyperparameter search space with fitting default values.
'''
tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='LSTM', input_shape=(X.shape[1], X.shape[2]), units=[10,100],
act=act, layers=layers, lr=lr),
oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),
directory=os.getcwd(),
project_name=f'LSTM_{self.quant[0]}_{str(date.today())}', overwrite=True)
tuner.search(X, y, epochs=epochs)
print(tuner.results_summary())
best = tuner.get_best_models(num_models=1)[0]
self.name = f'LSTM'
self.model = best
| 48.500921
| 179
| 0.606508
|
import pandas as pd
import numpy as np
from tensorflow import Variable
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, GRU, LSTM, SimpleRNN, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
from collections import deque
import matplotlib.pyplot as plt
import kerastuner as kt
from datetime import datetime, date
from joblib import dump, load
import os
import json
import csv
import math
class RNN:
def __init__(self, quant=None, seq=4, fut=0, parameters=None):
self.quant = quant
self.seq = seq
self.fut = fut
self.parameters = parameters
self.date = date.today()
self.model = None
self.scaler = None
self.name = None
def preprocess(self, raw_data):
raw_data.iloc[:,0] = pd.to_datetime(raw_data.iloc[:,0], format='%Y-%m-%d %H:%M:%S%z')
vec = raw_data.iloc[:,0].values
datetimes = np.array([[vec, vec], [vec, vec]], dtype = 'M8[ms]').astype('O')[0,1]
raw_data['weekday'] = [t.timetuple().tm_wday for t in datetimes]
raw_data['hours'] = [t.hour for t in datetimes]
raw_data['hours_sin'] = np.sin(2 * np.pi * raw_data['hours']/24.0)
raw_data['hours_cos'] = np.cos(2 * np.pi * raw_data['hours']/24.0)
raw_data['weekday_sin'] = np.sin(2 * np.pi * raw_data['weekday']/7)
raw_data['weekday_cos'] = np.cos(2 * np.pi * raw_data['weekday']/7)
self.parameters.extend(self.quant)
raw_data = raw_data[self.parameters].copy()
df_train = raw_data[int(len(raw_data)*0.2):].copy()
df_val = raw_data[:int(len(raw_data)*0.2)].copy()
self.parameters = [x for x in self.parameters if x not in self.quant]
self.scaler = MinMaxScaler()
df_train = self.scaler.fit_transform(df_train)
df_val = self.scaler.transform(df_val)
sequences_train = []
sequences_val = []
prev_days_train = deque(maxlen=self.seq)
prev_days_val = deque(maxlen=self.seq)
l_quant = len(self.quant)
for count, row in enumerate(pd.DataFrame(df_train).values):
prev_days_train.append([val for val in row[:-l_quant]])
if (len(prev_days_train) == self.seq):
if (any(pd.isna(pd.DataFrame(df_train).values[count-1][-l_quant:]))):
continue
try:
sequences_train.append([np.array(prev_days_train), pd.DataFrame(df_train).values[count+1][-l_quant:]])
except IndexError:
break
for count, row in enumerate(pd.DataFrame(df_val).values):
prev_days_val.append([val for val in row[:-l_quant]])
if (len(prev_days_val) == self.seq):
if (any(pd.isna(pd.DataFrame(df_val).values[count-1][-l_quant:]))):
continue
try:
sequences_val.append([np.array(prev_days_val), pd.DataFrame(df_val).values[count+1][-l_quant:]])
except IndexError:
break
X_train = []
y_train = []
X_val = []
y_val = []
for seq, target in sequences_train:
X_train.append(seq)
y_train.append(target)
for seq, target in sequences_val:
X_val.append(seq)
y_val.append(target)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_val = np.array(X_val)
y_val = np.array(y_val)
print(f'Shape of training data: {X_train.shape}')
print(f'Shape of testing data: {X_val.shape}')
return X_train, y_train, X_val, y_val
def inv_target(self, X, preds, y_val):
preds = np.concatenate((X[:len(preds),-1], np.array(preds).reshape(len(preds), 1)), axis=1)
y_val = np.concatenate((X[:len(preds),-1], np.array(y_val[:len(preds)]).reshape(len(preds), 1)), axis=1)
preds = self.scaler.inverse_transform(preds)[:,-1:]
y_val = self.scaler.inverse_transform(y_val)[:,-1:]
return preds, y_val
def plot_preds(self, preds, y_val, low=[], up=[], conf=0.9):
if len(low) != 0:
rounds = len(low)
else:
rounds = len(preds)
plt.figure()
plt.plot(preds[:rounds], color='navy', label='Predicted')
plt.plot(y_val[:rounds], color='darkorange', label='Measured', marker='*')
if len(low) != 0:
plt.fill_between(range(rounds), (preds[:rounds,0])+(low[:,0]), (preds[:rounds,0])+(up[:,0]), color='gray', alpha=0.25, label=f'{round(conf*100)}% prediction interval')
plt.legend()
plt.grid()
plt.title(f'Predictions for {self.quant[0]} with {self.name}.')
plt.show()
def load_intervals(self, int_path, conf=0.9):
with open(int_path) as csvf:
read_fil = csv.reader(csvf)
percs = list(read_fil)
percs = np.array([obj for obj in percs if obj])
low_ind = round(((1-conf)/2 - 0.01) * 100)
up_ind = round((conf + (1-conf)/2 - 0.01) * 100)
lower, upper = percs[:,low_ind].reshape(len(percs), 1), percs[:,up_ind].reshape(len(percs), 1)
return lower, upper
def save(self, path=rf'{os.getcwd()}'):
new_fold_path = rf'{path}/{self.name}_{self.quant[0]}_{str(self.date)}'
if not os.path.exists(new_fold_path):
os.makedirs(new_fold_path)
print(f'Folder created on path: {new_fold_path}.')
else:
print(f'Savings results to {new_fold_path}.')
self.model.save(rf'{new_fold_path}/model.h5')
print('Model saved.')
dump(self.scaler, rf'{new_fold_path}/scaler.joblib')
print('Scaler saved.')
other_vars = {'name': self.name, 'quant': self.quant, 'seq': self.seq, 'fut': self.fut, 'parameters': self.parameters, 'date': str(self.date)}
with open(rf'{new_fold_path}/vars.json', 'w') as f:
json.dump(other_vars, f)
print('Other variables saved.')
def load(self, path):
self.model = load_model(rf'{path}/model.h5')
print('Model loaded.')
self.scaler = load(rf'{path}/scaler.joblib')
print('Scaler loaded.')
with open(rf'{path}/vars.json', 'r') as f:
var_dict = json.load(f)
self.name = var_dict["name"]
self.quant = var_dict["quant"]
self.seq = var_dict["seq"]
self.fut = var_dict["fut"]
self.parameters = var_dict["parameters"]
self.date = var_dict["date"]
print('Other variables loaded.')
def prediction_interval(self, X_train, y_train, x0, path=rf'{os.getcwd()}'):
new_fold_path = rf'{path}/{self.name}_{self.quant[0]}_{str(self.date)}'
if not os.path.exists(new_fold_path):
os.makedirs(new_fold_path)
print(f'Folder created on path: {new_fold_path}.')
else:
print(f'Savings prediction intervals to {new_fold_path}.')
model = self.model
n = X_train.shape[0]
pred_x0 = model.predict(np.reshape(x0, (1, x0.shape[0], x0.shape[1])))
preds = model.predict(X_train)
train_res = y_train - preds
n_boots = np.sqrt(n).astype(int)
boot_preds, val_res = np.empty(n_boots), []
for b in range(n_boots):
for ix, layer in enumerate(model.layers):
if hasattr(self.model.layers[ix], 'recurrent_initializer'):
weight_initializer = model.layers[ix].kernel_initializer
bias_initializer = model.layers[ix].bias_initializer
recurr_init = model.layers[ix].recurrent_initializer
old_weights, old_biases, old_recurrent = model.layers[ix].get_weights()
model.layers[ix].set_weights([
weight_initializer(shape=old_weights.shape),
bias_initializer(shape=old_biases.shape),
recurr_init(shape=old_recurrent.shape)])
elif hasattr(model.layers[ix], 'kernel_initializer') and hasattr(model.layers[ix], 'bias_initializer'):
weight_initializer = model.layers[ix].kernel_initializer
bias_initializer = model.layers[ix].bias_initializer
old_weights, old_biases = model.layers[ix].get_weights()
model.layers[ix].set_weights([
weight_initializer(shape=old_weights.shape),
bias_initializer(shape=len(old_biases))])
print(f'Starting bootstrap {b+1}/{n_boots}')
train_idx = np.random.choice(range(n), size=n, replace=True)
val_idx = np.array([idx for idx in range(n) if idx not in train_idx])
model.fit(X_train[train_idx], y_train[train_idx], epochs=100, verbose=0, validation_data=(X_train[val_idx], y_train[val_idx]),
callbacks=EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True))
preds_val = model.predict(X_train[val_idx])
val_res.append(y_train[val_idx] - preds_val)
boot_preds[b] = model.predict(np.reshape(x0, (1, x0.shape[0], x0.shape[1])))
boot_preds -= np.mean(boot_preds)
val_res = np.concatenate(val_res, axis=None)
val_res = np.percentile(val_res, q=np.arange(100))
train_res = np.percentile(train_res, q=np.arange(100))
no_inf_err = np.mean(np.abs(np.random.permutation(y_train) - np.random.permutation(preds)))
gener = np.abs(val_res.mean() - train_res.mean())
no_inf_val = np.abs(no_inf_err - train_res)
rel_overfitting_rate = np.mean(gener / no_inf_val)
w = .632 / (1 - .368*rel_overfitting_rate)
res = (1-w) * train_res + w*val_res
C = np.array([m + o for m in boot_preds for o in res])
percs = np.percentile(C, q=np.arange(0, 101))
print(f'Saving results to {new_fold_path}.')
with open(rf'{new_fold_path}/pred_ints.csv', 'a') as f:
write = csv.writer(f)
write.writerow(percs)
print('----------------------------------------------------------------------------------------------')
class CVTuner(kt.engine.tuner.Tuner):
def run_trial(self, trial, x, y, batch_size=32, epochs=1, patience=20):
cv = KFold(5)
val_losses = []
for train_indices, test_indices in cv.split(x):
x_train, x_test = x[train_indices], x[test_indices]
y_train, y_test = y[train_indices], y[test_indices]
model = self.hypermodel.build(trial.hyperparameters)
stopper = EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True)
model.fit(x_train, y_train, batch_size=batch_size, validation_data=(x_test, y_test), epochs=epochs, callbacks=[stopper])
val_losses.append(model.evaluate(x_test, y_test))
self.oracle.update_trial(trial.trial_id, {'val_loss': np.mean(val_losses)})
self.save_model(trial.trial_id, model)
class RNN_HyperModel(kt.HyperModel):
def __init__(self, mtype, input_shape, units, layers, lr, act):
self.mtype = mtype
self.input_shape = input_shape
self.units = units
self.layers = layers
self.lr = lr
self.act = act
def build(self, hp):
model = Sequential()
hp_units = hp.Int('units', min_value=self.units[0], max_value=self.units[1], step=10)
try:
hp_layers = hp.Int('layers', min_value=self.layers[0], max_value=self.layers[1])
except IndexError:
hp_layers = hp.Fixed('layers', value=self.layers[0])
hp_act = hp.Choice('activation function', values=self.act)
hp_lr = hp.Choice('learning rate', values=self.lr)
if self.mtype == 'SimpleRNN':
for i in range(hp_layers):
if i == 0 and max(range(hp_layers)) == 0:
model.add(SimpleRNN(units=hp_units, activation=hp_act, input_shape=self.input_shape))
elif i == 0:
model.add(SimpleRNN(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))
model.add(BatchNormalization())
elif i < max(range(hp_layers)):
model.add(SimpleRNN(units=hp_units, activation=hp_act, return_sequences=True))
model.add(BatchNormalization())
else:
model.add(SimpleRNN(units=hp_units, activation=hp_act))
elif self.mtype == 'GRU':
for i in range(hp_layers):
if i == 0 and max(range(hp_layers)) == 0:
model.add(GRU(units=hp_units, activation=hp_act, input_shape=self.input_shape))
elif i == 0:
model.add(GRU(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))
model.add(BatchNormalization())
elif i < max(range(hp_layers)):
model.add(GRU(units=hp_units, activation=hp_act, return_sequences=True))
model.add(BatchNormalization())
else:
model.add(GRU(units=hp_units, activation=hp_act))
elif self.mtype == 'LSTM':
for i in range(hp_layers):
if i == 0 and max(range(hp_layers)) == 0:
model.add(LSTM(units=hp_units, activation=hp_act, input_shape=self.input_shape))
elif i == 0:
model.add(LSTM(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))
model.add(BatchNormalization())
elif i < max(range(hp_layers)):
model.add(LSTM(units=hp_units, activation=hp_act, return_sequences=True))
model.add(BatchNormalization())
else:
model.add(LSTM(units=hp_units, activation=hp_act))
model.add(Dense(1))
opt = Adam(learning_rate=hp_lr)
model.compile(loss='mse', optimizer=opt, metrics=['mae'])
return model
class VanillaRNN(RNN):
def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh', 'relu'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):
tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='SimpleRNN', input_shape=(X.shape[1], X.shape[2]), units=[10,100],
act=act, layers=layers, lr=lr),
oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),
directory=os.getcwd(),
project_name=f'VanillaRNN_{self.quant[0]}_{str(date.today())}', overwrite=True)
tuner.search(X, y, epochs=epochs)
print(tuner.results_summary())
best = tuner.get_best_models(num_models=1)[0]
self.name = f'VanillaRNN'
self.model = best
class MyGRU(RNN):
def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):
tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='GRU', input_shape=(X.shape[1], X.shape[2]), units=[10,100],
act=act, layers=layers, lr=lr),
oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),
directory=os.getcwd(),
project_name=f'GRU_{self.quant[0]}_{str(date.today())}', overwrite=True)
tuner.search(X, y, epochs=epochs)
print(tuner.results_summary())
best = tuner.get_best_models(num_models=1)[0]
self.name = f'GRU'
self.model = best
class MyLSTM(RNN):
def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):
tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='LSTM', input_shape=(X.shape[1], X.shape[2]), units=[10,100],
act=act, layers=layers, lr=lr),
oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),
directory=os.getcwd(),
project_name=f'LSTM_{self.quant[0]}_{str(date.today())}', overwrite=True)
tuner.search(X, y, epochs=epochs)
print(tuner.results_summary())
best = tuner.get_best_models(num_models=1)[0]
self.name = f'LSTM'
self.model = best
| true
| true
|
1c44d872e71fe39535f712ec003d271474cc86bc
| 772
|
py
|
Python
|
qcloudsdkds/ContractRecoveryRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkds/ContractRecoveryRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkds/ContractRecoveryRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class ContractRecoveryRequest(Request):
def __init__(self):
super(ContractRecoveryRequest, self).__init__(
'ds', 'qcloudcliV1', 'ContractRecovery', 'ds.api.qcloud.com')
def get_contractResId(self):
return self.get_params().get('contractResId')
def set_contractResId(self, contractResId):
self.add_param('contractResId', contractResId)
def get_module(self):
return self.get_params().get('module')
def set_module(self, module):
self.add_param('module', module)
def get_operation(self):
return self.get_params().get('operation')
def set_operation(self, operation):
self.add_param('operation', operation)
| 27.571429
| 73
| 0.676166
|
from qcloudsdkcore.request import Request
class ContractRecoveryRequest(Request):
def __init__(self):
super(ContractRecoveryRequest, self).__init__(
'ds', 'qcloudcliV1', 'ContractRecovery', 'ds.api.qcloud.com')
def get_contractResId(self):
return self.get_params().get('contractResId')
def set_contractResId(self, contractResId):
self.add_param('contractResId', contractResId)
def get_module(self):
return self.get_params().get('module')
def set_module(self, module):
self.add_param('module', module)
def get_operation(self):
return self.get_params().get('operation')
def set_operation(self, operation):
self.add_param('operation', operation)
| true
| true
|
1c44d8736d714d113497f524b08d44eb57ca8a35
| 2,233
|
py
|
Python
|
feedler/feedparser.py
|
pcoder/public-health-ch
|
cebc4849653560c54238b67814074353ff7c01f3
|
[
"MIT"
] | 2
|
2020-10-29T16:27:21.000Z
|
2021-06-07T12:47:46.000Z
|
feedler/feedparser.py
|
pcoder/public-health-ch
|
cebc4849653560c54238b67814074353ff7c01f3
|
[
"MIT"
] | 11
|
2017-05-09T10:50:28.000Z
|
2021-12-15T17:01:23.000Z
|
feedler/feedparser.py
|
pcoder/public-health-ch
|
cebc4849653560c54238b67814074353ff7c01f3
|
[
"MIT"
] | 4
|
2017-04-24T13:06:55.000Z
|
2021-06-04T02:18:32.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime
from guess_language import guess_language
def parse(obj, raw, stream):
"""
Parse raw JSON implementation from the Feedly API
"""
obj.raw = raw
obj.stream = stream
obj.entry_id = raw['id']
# Date stamp handling
ts = raw['published'] / 1000
obj.published = datetime.fromtimestamp(ts)
# Authorship and title
obj.title = raw['title'][:250]
if 'author' in raw['origin']:
obj.author = raw['author'][:250]
elif 'title' in raw['origin']:
obj.author = raw['origin']['title'][:250]
# Parse links and references
if len(raw['alternate']) > 0:
obj.link = raw['alternate'][0]['href'][:500]
if 'thumbnail' in raw and len(raw['thumbnail']) > 0:
if 'url' in raw['thumbnail'][0]:
obj.visual = raw['thumbnail'][0]['url'][:500]
elif 'enclosure' in raw and len(raw['enclosure']) > 0:
if 'href' in raw['enclosure'][0]:
obj.visual = raw['enclosure'][0]['href'][:500]
elif 'visual' in raw and 'url' in raw['visual']:
obj.visual = raw['visual']['url'][:500]
if obj.visual.lower().strip() == 'none':
obj.visual = ''
# Collect text in nested JSON content
if 'summary' in obj.raw:
if 'content' in obj.raw['summary']:
obj.content = obj.raw['summary']['content']
else:
obj.content = obj.raw['summary']
elif 'content' in obj.raw:
if 'content' in obj.raw['content']:
obj.content = obj.raw['content']['content']
else:
obj.content = obj.raw['content']
elif 'fullContent' in obj.raw:
obj.content = obj.raw['fullContent']
else:
obj.content = ''
# Detect language
try:
obj.lang = guess_language(obj.content) or ''
except:
obj.lang = ''
# Collect tags
tags = []
if 'tags' in obj.raw:
for tag in obj.raw['tags']:
if 'label' in tag:
label = tag['label'].replace(',','-')
label = label.strip().lower()
if len(label) > 3 and not label in tags:
tags.append(label)
obj.tags = ','.join(tags)
return obj
| 30.589041
| 58
| 0.545455
|
from datetime import datetime
from guess_language import guess_language
def parse(obj, raw, stream):
obj.raw = raw
obj.stream = stream
obj.entry_id = raw['id']
ts = raw['published'] / 1000
obj.published = datetime.fromtimestamp(ts)
obj.title = raw['title'][:250]
if 'author' in raw['origin']:
obj.author = raw['author'][:250]
elif 'title' in raw['origin']:
obj.author = raw['origin']['title'][:250]
if len(raw['alternate']) > 0:
obj.link = raw['alternate'][0]['href'][:500]
if 'thumbnail' in raw and len(raw['thumbnail']) > 0:
if 'url' in raw['thumbnail'][0]:
obj.visual = raw['thumbnail'][0]['url'][:500]
elif 'enclosure' in raw and len(raw['enclosure']) > 0:
if 'href' in raw['enclosure'][0]:
obj.visual = raw['enclosure'][0]['href'][:500]
elif 'visual' in raw and 'url' in raw['visual']:
obj.visual = raw['visual']['url'][:500]
if obj.visual.lower().strip() == 'none':
obj.visual = ''
if 'summary' in obj.raw:
if 'content' in obj.raw['summary']:
obj.content = obj.raw['summary']['content']
else:
obj.content = obj.raw['summary']
elif 'content' in obj.raw:
if 'content' in obj.raw['content']:
obj.content = obj.raw['content']['content']
else:
obj.content = obj.raw['content']
elif 'fullContent' in obj.raw:
obj.content = obj.raw['fullContent']
else:
obj.content = ''
try:
obj.lang = guess_language(obj.content) or ''
except:
obj.lang = ''
tags = []
if 'tags' in obj.raw:
for tag in obj.raw['tags']:
if 'label' in tag:
label = tag['label'].replace(',','-')
label = label.strip().lower()
if len(label) > 3 and not label in tags:
tags.append(label)
obj.tags = ','.join(tags)
return obj
| true
| true
|
1c44da8cd8e14680cabbb02c8fd7a7fdb43c144d
| 393
|
py
|
Python
|
rockwell/rockwell/asgi.py
|
Xiangyongluo/Hackathon-Project
|
815eb9b4e1ea9d41d4ddc90e204bbe919b8bc2ba
|
[
"Apache-2.0"
] | null | null | null |
rockwell/rockwell/asgi.py
|
Xiangyongluo/Hackathon-Project
|
815eb9b4e1ea9d41d4ddc90e204bbe919b8bc2ba
|
[
"Apache-2.0"
] | 1
|
2021-12-04T04:35:52.000Z
|
2021-12-04T04:35:52.000Z
|
rockwell/rockwell/asgi.py
|
Xiangyongluo/Hackathon-Project
|
815eb9b4e1ea9d41d4ddc90e204bbe919b8bc2ba
|
[
"Apache-2.0"
] | null | null | null |
"""
ASGI config for rockwell project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rockwell.settings')
application = get_asgi_application()
| 23.117647
| 78
| 0.78626
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rockwell.settings')
application = get_asgi_application()
| true
| true
|
1c44db39a1bbd369f3b8ca3833e06ce7a09f36e3
| 6,679
|
py
|
Python
|
dm_control/locomotion/walkers/base.py
|
h8907283/dm_control
|
fe4449606742a7b8bec81930790b98244cddc538
|
[
"Apache-2.0"
] | 1
|
2022-03-22T11:53:38.000Z
|
2022-03-22T11:53:38.000Z
|
dm_control/locomotion/walkers/base.py
|
krakhit/dm_control
|
4e1a35595124742015ae0c7a829e099a5aa100f5
|
[
"Apache-2.0"
] | null | null | null |
dm_control/locomotion/walkers/base.py
|
krakhit/dm_control
|
4e1a35595124742015ae0c7a829e099a5aa100f5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base class for Walkers."""
import abc
import collections
from dm_control import composer
from dm_control.composer.observation import observable
from dm_env import specs
import numpy as np
def _make_readonly_float64_copy(value):
if np.isscalar(value):
return np.float64(value)
else:
out = np.array(value, dtype=np.float64)
out.flags.writeable = False
return out
class WalkerPose(collections.namedtuple(
'WalkerPose', ('qpos', 'xpos', 'xquat'))):
"""A named tuple representing a walker's joint and Cartesian pose."""
__slots__ = ()
def __new__(cls, qpos=None, xpos=(0, 0, 0), xquat=(1, 0, 0, 0)):
"""Creates a new WalkerPose.
Args:
qpos: The joint position for the pose, or `None` if the `qpos0` values in
the `mjModel` should be used.
xpos: A Cartesian displacement, for example if the walker should be lifted
or lowered by a specific amount for this pose.
xquat: A quaternion displacement for the root body.
Returns:
A new instance of `WalkerPose`.
"""
return super(WalkerPose, cls).__new__(
cls,
qpos=_make_readonly_float64_copy(qpos) if qpos is not None else None,
xpos=_make_readonly_float64_copy(xpos),
xquat=_make_readonly_float64_copy(xquat))
def __eq__(self, other):
return (np.all(self.qpos == other.qpos) and
np.all(self.xpos == other.xpos) and
np.all(self.xquat == other.xquat))
class Walker(composer.Robot, metaclass=abc.ABCMeta):
"""Abstract base class for Walker robots."""
def create_root_joints(self, attachment_frame):
attachment_frame.add('freejoint')
def _build_observables(self):
return WalkerObservables(self)
def transform_vec_to_egocentric_frame(self, physics, vec_in_world_frame):
"""Linearly transforms a world-frame vector into walker's egocentric frame.
Note that this function does not perform an affine transformation of the
vector. In other words, the input vector is assumed to be specified with
respect to the same origin as this walker's egocentric frame. This function
can also be applied to matrices whose innermost dimensions are either 2 or
3. In this case, a matrix with the same leading dimensions is returned
where the innermost vectors are replaced by their values computed in the
egocentric frame.
Args:
physics: An `mjcf.Physics` instance.
vec_in_world_frame: A NumPy array with last dimension of shape (2,) or
(3,) that represents a vector quantity in the world frame.
Returns:
The same quantity as `vec_in_world_frame` but reexpressed in this
entity's egocentric frame. The returned np.array has the same shape as
np.asarray(vec_in_world_frame).
Raises:
ValueError: if `vec_in_world_frame` does not have shape ending with (2,)
or (3,).
"""
return super().global_vector_to_local_frame(physics, vec_in_world_frame)
def transform_xmat_to_egocentric_frame(self, physics, xmat):
"""Transforms another entity's `xmat` into this walker's egocentric frame.
This function takes another entity's (E) xmat, which is an SO(3) matrix
from E's frame to the world frame, and turns it to a matrix that transforms
from E's frame into this walker's egocentric frame.
Args:
physics: An `mjcf.Physics` instance.
xmat: A NumPy array of shape (3, 3) or (9,) that represents another
entity's xmat.
Returns:
The `xmat` reexpressed in this entity's egocentric frame. The returned
np.array has the same shape as np.asarray(xmat).
Raises:
ValueError: if `xmat` does not have shape (3, 3) or (9,).
"""
return super().global_xmat_to_local_frame(physics, xmat)
@abc.abstractproperty
def root_body(self):
raise NotImplementedError
@abc.abstractproperty
def observable_joints(self):
raise NotImplementedError
@property
def action_spec(self):
if not self.actuators:
minimum, maximum = (), ()
else:
minimum, maximum = zip(*[
a.ctrlrange if a.ctrlrange is not None else (-1., 1.)
for a in self.actuators
])
return specs.BoundedArray(
shape=(len(self.actuators),),
dtype=float,
minimum=minimum,
maximum=maximum,
name='\t'.join([actuator.name for actuator in self.actuators]))
def apply_action(self, physics, action, random_state):
"""Apply action to walker's actuators."""
del random_state
physics.bind(self.actuators).ctrl = action
class WalkerObservables(composer.Observables):
"""Base class for Walker obserables."""
@composer.observable
def joints_pos(self):
return observable.MJCFFeature('qpos', self._entity.observable_joints)
@composer.observable
def sensors_gyro(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.gyro)
@composer.observable
def sensors_accelerometer(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.accelerometer)
@composer.observable
def sensors_framequat(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.framequat)
# Semantic groupings of Walker observables.
def _collect_from_attachments(self, attribute_name):
out = []
for entity in self._entity.iter_entities(exclude_self=True):
out.extend(getattr(entity.observables, attribute_name, []))
return out
@property
def proprioception(self):
return ([self.joints_pos] +
self._collect_from_attachments('proprioception'))
@property
def kinematic_sensors(self):
return ([self.sensors_gyro,
self.sensors_accelerometer,
self.sensors_framequat] +
self._collect_from_attachments('kinematic_sensors'))
@property
def dynamic_sensors(self):
return self._collect_from_attachments('dynamic_sensors')
| 33.562814
| 80
| 0.690972
|
import abc
import collections
from dm_control import composer
from dm_control.composer.observation import observable
from dm_env import specs
import numpy as np
def _make_readonly_float64_copy(value):
if np.isscalar(value):
return np.float64(value)
else:
out = np.array(value, dtype=np.float64)
out.flags.writeable = False
return out
class WalkerPose(collections.namedtuple(
'WalkerPose', ('qpos', 'xpos', 'xquat'))):
__slots__ = ()
def __new__(cls, qpos=None, xpos=(0, 0, 0), xquat=(1, 0, 0, 0)):
return super(WalkerPose, cls).__new__(
cls,
qpos=_make_readonly_float64_copy(qpos) if qpos is not None else None,
xpos=_make_readonly_float64_copy(xpos),
xquat=_make_readonly_float64_copy(xquat))
def __eq__(self, other):
return (np.all(self.qpos == other.qpos) and
np.all(self.xpos == other.xpos) and
np.all(self.xquat == other.xquat))
class Walker(composer.Robot, metaclass=abc.ABCMeta):
def create_root_joints(self, attachment_frame):
attachment_frame.add('freejoint')
def _build_observables(self):
return WalkerObservables(self)
def transform_vec_to_egocentric_frame(self, physics, vec_in_world_frame):
return super().global_vector_to_local_frame(physics, vec_in_world_frame)
def transform_xmat_to_egocentric_frame(self, physics, xmat):
return super().global_xmat_to_local_frame(physics, xmat)
@abc.abstractproperty
def root_body(self):
raise NotImplementedError
@abc.abstractproperty
def observable_joints(self):
raise NotImplementedError
@property
def action_spec(self):
if not self.actuators:
minimum, maximum = (), ()
else:
minimum, maximum = zip(*[
a.ctrlrange if a.ctrlrange is not None else (-1., 1.)
for a in self.actuators
])
return specs.BoundedArray(
shape=(len(self.actuators),),
dtype=float,
minimum=minimum,
maximum=maximum,
name='\t'.join([actuator.name for actuator in self.actuators]))
def apply_action(self, physics, action, random_state):
del random_state
physics.bind(self.actuators).ctrl = action
class WalkerObservables(composer.Observables):
@composer.observable
def joints_pos(self):
return observable.MJCFFeature('qpos', self._entity.observable_joints)
@composer.observable
def sensors_gyro(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.gyro)
@composer.observable
def sensors_accelerometer(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.accelerometer)
@composer.observable
def sensors_framequat(self):
return observable.MJCFFeature('sensordata',
self._entity.mjcf_model.sensor.framequat)
def _collect_from_attachments(self, attribute_name):
out = []
for entity in self._entity.iter_entities(exclude_self=True):
out.extend(getattr(entity.observables, attribute_name, []))
return out
@property
def proprioception(self):
return ([self.joints_pos] +
self._collect_from_attachments('proprioception'))
@property
def kinematic_sensors(self):
return ([self.sensors_gyro,
self.sensors_accelerometer,
self.sensors_framequat] +
self._collect_from_attachments('kinematic_sensors'))
@property
def dynamic_sensors(self):
return self._collect_from_attachments('dynamic_sensors')
| true
| true
|
1c44dce31e003e663b608bdf55046a067adb45b9
| 6,765
|
py
|
Python
|
fonts/connect3d_infineon_8x16.py
|
ccccmagicboy/st7735_mpy
|
b15f1bde69fbe6e0eb4931c57e71c136d8e7f024
|
[
"MIT"
] | 6
|
2020-07-11T16:59:19.000Z
|
2021-07-16T19:32:49.000Z
|
ports/esp32/user_modules/st7735_mpy/fonts/connect3d_infineon_8x16.py
|
d4niele/micropython
|
a1f7b37d392bf46b28045ce215ae899fda8d8c38
|
[
"MIT"
] | 1
|
2020-04-14T03:14:45.000Z
|
2020-04-14T03:14:45.000Z
|
fonts/connect3d_infineon_8x16.py
|
ccccmagicboy/st7735_mpy
|
b15f1bde69fbe6e0eb4931c57e71c136d8e7f024
|
[
"MIT"
] | null | null | null |
"""converted from ..\fonts\Connect3d_Infineon_8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x36\x36\x36\x36\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x6c\x6c\x6c\xfe\x6c\x6c\xfe\x6c\x6c\x6c\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x7c\xc6\xc0\x78\x3c\x06\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x62\x66\x0c\x18\x30\x66\xc6\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x38\x30\x76\x7e\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x0c\x0c\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\x38\xfe\x38\x6c\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x18\x78\x18\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xcc\xfe\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\x7e\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x0c\x18\x30\x60\xc0\x60\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\x00\xfe\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xde\xde\xde\xdc\xc0\x7e\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x64\x7c\x64\x60\x60\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x64\x7c\x64\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc0\xc0\xc0\xce\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\xd8\xd8\x70\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xcc\xd8\xf0\xf0\xd8\xcc\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xee\xee\xfe\xd6\xd6\xd6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xe6\xe6\xf6\xde\xce\xce\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xd6\x7c\x06\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\x70\x1c\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x5a\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\x6c\x38\x38\x6c\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\x60\x60\x60\x60\x60\x60\x60\x60\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x80\xc0\x60\x30\x18\x0c\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x7c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x7c\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00'\
b'\x00\x18\x18\x18\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xdc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x7c\xcc\xcc\xcc\xcc\xcc\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xfe\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x36\x30\x30\xfc\x30\x30\x30\x30\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xce\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x0c\x00\x1c\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00'\
b'\x00\x00\xe0\x60\x60\x66\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x18\x18\x18\x18\x18\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\xfe\xd6\xd6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x7c\x60\x60\xf0\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\x7c\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x86\x0c\x18\x30\x62\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x10\x38\x38\x6c\x6c\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| 64.428571
| 68
| 0.709978
|
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x36\x36\x36\x36\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x6c\x6c\x6c\xfe\x6c\x6c\xfe\x6c\x6c\x6c\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x7c\xc6\xc0\x78\x3c\x06\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x62\x66\x0c\x18\x30\x66\xc6\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x38\x30\x76\x7e\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x0c\x0c\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\x38\xfe\x38\x6c\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x18\x78\x18\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xcc\xfe\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\x7e\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x0c\x18\x30\x60\xc0\x60\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\x00\xfe\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xde\xde\xde\xdc\xc0\x7e\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x64\x7c\x64\x60\x60\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x64\x7c\x64\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc0\xc0\xc0\xce\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\xd8\xd8\x70\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xcc\xd8\xf0\xf0\xd8\xcc\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xee\xee\xfe\xd6\xd6\xd6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xe6\xe6\xf6\xde\xce\xce\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xd6\x7c\x06\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\x70\x1c\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x5a\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\x6c\x38\x38\x6c\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\x60\x60\x60\x60\x60\x60\x60\x60\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x80\xc0\x60\x30\x18\x0c\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x7c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x7c\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00'\
b'\x00\x18\x18\x18\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xdc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x7c\xcc\xcc\xcc\xcc\xcc\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xfe\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x36\x30\x30\xfc\x30\x30\x30\x30\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xce\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x0c\x00\x1c\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00'\
b'\x00\x00\xe0\x60\x60\x66\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x18\x18\x18\x18\x18\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\xfe\xd6\xd6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x7c\x60\x60\xf0\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\x7c\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x86\x0c\x18\x30\x62\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x10\x38\x38\x6c\x6c\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| true
| true
|
1c44ddb53005f0348306ada9b2eb5fc4041f1299
| 6,990
|
py
|
Python
|
src/connectedvmware/azext_connectedvmware/vendored_sdks/models/virtual_machine_template.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/connectedvmware/azext_connectedvmware/vendored_sdks/models/virtual_machine_template.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/connectedvmware/azext_connectedvmware/vendored_sdks/models/virtual_machine_template.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineTemplate(Model):
"""Define the virtualMachineTemplate.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar uuid: Gets or sets a unique identifier for this resource.
:vartype uuid: str
:param v_center_id: Gets or sets the ARM Id of the vCenter resource in
which this template resides.
:type v_center_id: str
:param mo_ref_id: Gets or sets the vCenter MoRef (Managed Object
Reference) ID for the virtual machine
template.
:type mo_ref_id: str
:param inventory_item_id: Gets or sets the inventory Item ID for the
virtual machine template.
:type inventory_item_id: str
:ivar mo_name: Gets or sets the vCenter Managed Object name for the
virtual machine template.
:vartype mo_name: str
:ivar memory_size_mb: Gets or sets memory size in MBs for the template.
:vartype memory_size_mb: int
:ivar num_cp_us: Gets or sets the number of vCPUs for the template.
:vartype num_cp_us: int
:ivar num_cores_per_socket: Gets or sets the number of cores per socket
for the template.
Defaults to 1 if unspecified.
:vartype num_cores_per_socket: int
:ivar os_type: Gets or sets the type of the os. Possible values include:
'Windows', 'Linux', 'Other'
:vartype os_type: str or
~azure.mgmt.vmware.v2020_10_01_preview.models.OsType
:ivar os_name: Gets or sets os name.
:vartype os_name: str
:ivar folder_path: Gets or sets the folder path of the template.
:vartype folder_path: str
:ivar network_interfaces: Gets or sets the network interfaces of the
template.
:vartype network_interfaces:
list[~azure.mgmt.vmware.v2020_10_01_preview.models.NetworkInterface]
:ivar disks: Gets or sets the disks the template.
:vartype disks:
list[~azure.mgmt.vmware.v2020_10_01_preview.models.VirtualDisk]
:ivar custom_resource_name: Gets the name of the corresponding resource in
Kubernetes.
:vartype custom_resource_name: str
:ivar provisioning_state: Gets or sets the provisioning state.
:vartype provisioning_state: str
:param location: Required. Gets or sets the location.
:type location: str
:param extended_location: Gets or sets the extended location.
:type extended_location:
~azure.mgmt.vmware.v2020_10_01_preview.models.ExtendedLocation
:param system_data: The system data.
:type system_data:
~azure.mgmt.vmware.v2020_10_01_preview.models.SystemData
:param tags: Gets or sets the Resource tags.
:type tags: dict[str, str]
:ivar name: Gets or sets the name.
:vartype name: str
:ivar id: Gets or sets the Id.
:vartype id: str
:ivar type: Gets or sets the type of the resource.
:vartype type: str
:param kind: Metadata used by portal/tooling/etc to render different UX
experiences for resources of the same type; e.g. ApiApps are a kind of
Microsoft.Web/sites type. If supported, the resource provider must
validate and persist this value.
:type kind: str
"""
_validation = {
'uuid': {'readonly': True},
'mo_name': {'readonly': True},
'memory_size_mb': {'readonly': True},
'num_cp_us': {'readonly': True},
'num_cores_per_socket': {'readonly': True},
'os_type': {'readonly': True},
'os_name': {'readonly': True},
'folder_path': {'readonly': True},
'network_interfaces': {'readonly': True},
'disks': {'readonly': True},
'custom_resource_name': {'readonly': True},
'provisioning_state': {'readonly': True},
'location': {'required': True},
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'uuid': {'key': 'properties.uuid', 'type': 'str'},
'v_center_id': {'key': 'properties.vCenterId', 'type': 'str'},
'mo_ref_id': {'key': 'properties.moRefId', 'type': 'str'},
'inventory_item_id': {'key': 'properties.inventoryItemId', 'type': 'str'},
'mo_name': {'key': 'properties.moName', 'type': 'str'},
'memory_size_mb': {'key': 'properties.memorySizeMB', 'type': 'int'},
'num_cp_us': {'key': 'properties.numCPUs', 'type': 'int'},
'num_cores_per_socket': {'key': 'properties.numCoresPerSocket', 'type': 'int'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'os_name': {'key': 'properties.osName', 'type': 'str'},
'folder_path': {'key': 'properties.folderPath', 'type': 'str'},
'network_interfaces': {'key': 'properties.networkInterfaces', 'type': '[NetworkInterface]'},
'disks': {'key': 'properties.disks', 'type': '[VirtualDisk]'},
'custom_resource_name': {'key': 'properties.customResourceName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualMachineTemplate, self).__init__(**kwargs)
self.uuid = None
self.v_center_id = kwargs.get('v_center_id', None)
self.mo_ref_id = kwargs.get('mo_ref_id', None)
self.inventory_item_id = kwargs.get('inventory_item_id', None)
self.mo_name = None
self.memory_size_mb = None
self.num_cp_us = None
self.num_cores_per_socket = None
self.os_type = None
self.os_name = None
self.folder_path = None
self.network_interfaces = None
self.disks = None
self.custom_resource_name = None
self.provisioning_state = None
self.location = kwargs.get('location', None)
self.extended_location = kwargs.get('extended_location', None)
self.system_data = kwargs.get('system_data', None)
self.tags = kwargs.get('tags', None)
self.name = None
self.id = None
self.type = None
self.kind = kwargs.get('kind', None)
| 43.962264
| 100
| 0.634621
|
from msrest.serialization import Model
class VirtualMachineTemplate(Model):
_validation = {
'uuid': {'readonly': True},
'mo_name': {'readonly': True},
'memory_size_mb': {'readonly': True},
'num_cp_us': {'readonly': True},
'num_cores_per_socket': {'readonly': True},
'os_type': {'readonly': True},
'os_name': {'readonly': True},
'folder_path': {'readonly': True},
'network_interfaces': {'readonly': True},
'disks': {'readonly': True},
'custom_resource_name': {'readonly': True},
'provisioning_state': {'readonly': True},
'location': {'required': True},
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'uuid': {'key': 'properties.uuid', 'type': 'str'},
'v_center_id': {'key': 'properties.vCenterId', 'type': 'str'},
'mo_ref_id': {'key': 'properties.moRefId', 'type': 'str'},
'inventory_item_id': {'key': 'properties.inventoryItemId', 'type': 'str'},
'mo_name': {'key': 'properties.moName', 'type': 'str'},
'memory_size_mb': {'key': 'properties.memorySizeMB', 'type': 'int'},
'num_cp_us': {'key': 'properties.numCPUs', 'type': 'int'},
'num_cores_per_socket': {'key': 'properties.numCoresPerSocket', 'type': 'int'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'os_name': {'key': 'properties.osName', 'type': 'str'},
'folder_path': {'key': 'properties.folderPath', 'type': 'str'},
'network_interfaces': {'key': 'properties.networkInterfaces', 'type': '[NetworkInterface]'},
'disks': {'key': 'properties.disks', 'type': '[VirtualDisk]'},
'custom_resource_name': {'key': 'properties.customResourceName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'extended_location': {'key': 'extendedLocation', 'type': 'ExtendedLocation'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualMachineTemplate, self).__init__(**kwargs)
self.uuid = None
self.v_center_id = kwargs.get('v_center_id', None)
self.mo_ref_id = kwargs.get('mo_ref_id', None)
self.inventory_item_id = kwargs.get('inventory_item_id', None)
self.mo_name = None
self.memory_size_mb = None
self.num_cp_us = None
self.num_cores_per_socket = None
self.os_type = None
self.os_name = None
self.folder_path = None
self.network_interfaces = None
self.disks = None
self.custom_resource_name = None
self.provisioning_state = None
self.location = kwargs.get('location', None)
self.extended_location = kwargs.get('extended_location', None)
self.system_data = kwargs.get('system_data', None)
self.tags = kwargs.get('tags', None)
self.name = None
self.id = None
self.type = None
self.kind = kwargs.get('kind', None)
| true
| true
|
1c44ddf468ea1fa13eb77cec4667348d4bc60c09
| 7,181
|
py
|
Python
|
main.py
|
zhangchenkai/piwise_segmentation
|
3dfecaae32cde9097d9c312e3373a834b0884319
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
zhangchenkai/piwise_segmentation
|
3dfecaae32cde9097d9c312e3373a834b0884319
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
zhangchenkai/piwise_segmentation
|
3dfecaae32cde9097d9c312e3373a834b0884319
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from argparse import ArgumentParser
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Normalize
from torchvision.transforms import ToTensor, ToPILImage, Resize
sys.path.append("/home/nico/PycharmProjects/project-marvel/defect-detection")
from defect_detection.evaluator.evaluation import save_metrics_on_results
from piwise.criterion import CrossEntropyLoss2d
from piwise.dataset import VOCTrain, VOCTest
from piwise.network import FCN8, FCN16, FCN32, UNet, PSPNet, SegNet
from piwise.transform import ToLabel, Colorize
from piwise.visualize import Dashboard
NUM_CHANNELS = 3
NUM_CLASSES = 16
color_transform = Colorize(n=NUM_CLASSES)
image_transform = ToPILImage()
input_transform = Compose([
Resize(256),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
Normalize([.5, .5, .5], [.5, .5, .5]),
])
target_transform = Compose([
Resize(256),
ToLabel(),
# Relabel(255, 21),
])
def train(args, model):
model.train()
weight = torch.ones(NUM_CLASSES)
weight[0] = 0.1
loader = DataLoader(VOCTrain(args.datadir, 'train', input_transform, target_transform),
num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True)
if args.cuda:
criterion = CrossEntropyLoss2d(weight.cuda())
else:
criterion = CrossEntropyLoss2d(weight)
optimizer = Adam(model.parameters(), lr=1e-5)
# if args.model.startswith('FCN'):
# optimizer = SGD(model.parameters(), 1e-4, .9, 2e-5)
# if args.model.startswith('PSP'):
# optimizer = SGD(model.parameters(), 1e-2, .9, 1e-4)
# if args.model.startswith('Seg'):
# optimizer = SGD(model.parameters(), 1e-3, .9)
if args.steps_plot > 0:
board = Dashboard(args.port)
for epoch in range(1, args.num_epochs + 1):
epoch_loss = []
for step, (images, labels) in enumerate(loader):
if args.cuda:
images = images.cuda()
labels = labels.cuda()
inputs = Variable(images)
targets = Variable(labels)
outputs = model(inputs)
optimizer.zero_grad()
loss = criterion(outputs, targets[:, 0])
loss.backward()
optimizer.step()
epoch_loss.append(loss.item())
if args.steps_plot > 0 and step % args.steps_plot == 0:
image = inputs[0].cpu().data
image[0] = image[0] * .5 + .5
image[1] = image[1] * .5 + .5
image[2] = image[2] * .5 + .5
board.image(image,
f'input (epoch: {epoch}, step: {step})')
board.image(color_transform(outputs[0].cpu().max(0, keepdim=True)[1].data),
f'output (epoch: {epoch}, step: {step})')
board.image(color_transform(targets[0].cpu().data),
f'target (epoch: {epoch}, step: {step})')
if args.steps_loss > 0 and step % args.steps_loss == 0:
average = sum(epoch_loss) / len(epoch_loss)
print(f'loss: {average} (epoch: {epoch}, step: {step})')
if args.steps_save > 0 and step % args.steps_save == 0:
filename = f'{args.model}-{epoch:03}-{step:04}.pth'
torch.save(model.state_dict(), filename)
print(f'save: {filename} (epoch: {epoch}, step: {step})')
def evaluate(args, model):
save_dir = '/home/nico/Desktop/FCN-8s/'
os.makedirs(save_dir, exist_ok=True)
model.eval()
all_metrics_list = []
for p_id in range(1, 16):
print('=====pattern %d=====' % p_id)
loader = DataLoader(VOCTest(args.datadir, p_id, input_transform, target_transform),
num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False)
targets = []
preds = []
for step, (image, label) in enumerate(loader):
if args.cuda:
image = image.cuda()
# inputs = Variable(image)
targets.append(label.numpy().astype(np.uint8))
outputs = model(image)
pred = outputs.detach().cpu().numpy().argmax(axis=1)
preds.append(pred.astype(np.uint8))
targets = np.concatenate(targets).flatten() == p_id
preds = np.concatenate(preds).flatten() == p_id
print('======start evaluation======')
metrics_dict = save_metrics_on_results(label_pred=None, label_true=None,
binary_result=preds, binary_mask=targets,
model_name='fcn8s-p%d' % p_id, save_dir=save_dir)
all_metrics_list.append(metrics_dict)
df = pd.DataFrame(all_metrics_list)
df.to_csv('~/Desktop/all_metrics_of_%s.csv' % 'fcn8s')
def main(args):
Net = None
if args.model == 'fcn8':
Net = FCN8
if args.model == 'fcn16':
Net = FCN16
if args.model == 'fcn32':
Net = FCN32
if args.model == 'fcn32':
Net = FCN32
if args.model == 'unet':
Net = UNet
if args.model == 'pspnet':
Net = PSPNet
if args.model == 'segnet':
Net = SegNet
assert Net is not None, f'model {args.model} not available'
model = Net(NUM_CLASSES)
if args.cuda:
model = model.cuda()
if args.state:
try:
model.load_state_dict(torch.load(args.state))
except AssertionError:
model.load_state_dict(torch.load(args.state,
map_location=lambda storage, loc: storage))
if args.mode == 'eval':
evaluate(args, model)
if args.mode == 'train':
train(args, model)
if __name__ == '__main__':
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
parser = ArgumentParser()
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--model', required=True)
parser.add_argument('--state')
subparsers = parser.add_subparsers(dest='mode')
subparsers.required = True
parser_eval = subparsers.add_parser('eval')
parser_eval.add_argument('--datadir', default='data')
parser_eval.add_argument('--batch-size', type=int, default=4)
parser_eval.add_argument('--num-workers', type=int, default=4)
# parser_eval.add_argument('image')
# parser_eval.add_argument('label')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--datadir', default='data')
parser_train.add_argument('--port', type=int, default=5000)
parser_train.add_argument('--num-epochs', type=int, default=32)
parser_train.add_argument('--num-workers', type=int, default=4)
parser_train.add_argument('--batch-size', type=int, default=4)
parser_train.add_argument('--steps-loss', type=int, default=50)
parser_train.add_argument('--steps-plot', type=int, default=100)
parser_train.add_argument('--steps-save', type=int, default=500)
main(parser.parse_args())
| 35.374384
| 100
| 0.604512
|
import sys
from argparse import ArgumentParser
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Normalize
from torchvision.transforms import ToTensor, ToPILImage, Resize
sys.path.append("/home/nico/PycharmProjects/project-marvel/defect-detection")
from defect_detection.evaluator.evaluation import save_metrics_on_results
from piwise.criterion import CrossEntropyLoss2d
from piwise.dataset import VOCTrain, VOCTest
from piwise.network import FCN8, FCN16, FCN32, UNet, PSPNet, SegNet
from piwise.transform import ToLabel, Colorize
from piwise.visualize import Dashboard
NUM_CHANNELS = 3
NUM_CLASSES = 16
color_transform = Colorize(n=NUM_CLASSES)
image_transform = ToPILImage()
input_transform = Compose([
Resize(256),
ToTensor(),
Normalize([.5, .5, .5], [.5, .5, .5]),
])
target_transform = Compose([
Resize(256),
ToLabel(),
])
def train(args, model):
model.train()
weight = torch.ones(NUM_CLASSES)
weight[0] = 0.1
loader = DataLoader(VOCTrain(args.datadir, 'train', input_transform, target_transform),
num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True)
if args.cuda:
criterion = CrossEntropyLoss2d(weight.cuda())
else:
criterion = CrossEntropyLoss2d(weight)
optimizer = Adam(model.parameters(), lr=1e-5)
if args.steps_plot > 0:
board = Dashboard(args.port)
for epoch in range(1, args.num_epochs + 1):
epoch_loss = []
for step, (images, labels) in enumerate(loader):
if args.cuda:
images = images.cuda()
labels = labels.cuda()
inputs = Variable(images)
targets = Variable(labels)
outputs = model(inputs)
optimizer.zero_grad()
loss = criterion(outputs, targets[:, 0])
loss.backward()
optimizer.step()
epoch_loss.append(loss.item())
if args.steps_plot > 0 and step % args.steps_plot == 0:
image = inputs[0].cpu().data
image[0] = image[0] * .5 + .5
image[1] = image[1] * .5 + .5
image[2] = image[2] * .5 + .5
board.image(image,
f'input (epoch: {epoch}, step: {step})')
board.image(color_transform(outputs[0].cpu().max(0, keepdim=True)[1].data),
f'output (epoch: {epoch}, step: {step})')
board.image(color_transform(targets[0].cpu().data),
f'target (epoch: {epoch}, step: {step})')
if args.steps_loss > 0 and step % args.steps_loss == 0:
average = sum(epoch_loss) / len(epoch_loss)
print(f'loss: {average} (epoch: {epoch}, step: {step})')
if args.steps_save > 0 and step % args.steps_save == 0:
filename = f'{args.model}-{epoch:03}-{step:04}.pth'
torch.save(model.state_dict(), filename)
print(f'save: {filename} (epoch: {epoch}, step: {step})')
def evaluate(args, model):
save_dir = '/home/nico/Desktop/FCN-8s/'
os.makedirs(save_dir, exist_ok=True)
model.eval()
all_metrics_list = []
for p_id in range(1, 16):
print('=====pattern %d=====' % p_id)
loader = DataLoader(VOCTest(args.datadir, p_id, input_transform, target_transform),
num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False)
targets = []
preds = []
for step, (image, label) in enumerate(loader):
if args.cuda:
image = image.cuda()
targets.append(label.numpy().astype(np.uint8))
outputs = model(image)
pred = outputs.detach().cpu().numpy().argmax(axis=1)
preds.append(pred.astype(np.uint8))
targets = np.concatenate(targets).flatten() == p_id
preds = np.concatenate(preds).flatten() == p_id
print('======start evaluation======')
metrics_dict = save_metrics_on_results(label_pred=None, label_true=None,
binary_result=preds, binary_mask=targets,
model_name='fcn8s-p%d' % p_id, save_dir=save_dir)
all_metrics_list.append(metrics_dict)
df = pd.DataFrame(all_metrics_list)
df.to_csv('~/Desktop/all_metrics_of_%s.csv' % 'fcn8s')
def main(args):
Net = None
if args.model == 'fcn8':
Net = FCN8
if args.model == 'fcn16':
Net = FCN16
if args.model == 'fcn32':
Net = FCN32
if args.model == 'fcn32':
Net = FCN32
if args.model == 'unet':
Net = UNet
if args.model == 'pspnet':
Net = PSPNet
if args.model == 'segnet':
Net = SegNet
assert Net is not None, f'model {args.model} not available'
model = Net(NUM_CLASSES)
if args.cuda:
model = model.cuda()
if args.state:
try:
model.load_state_dict(torch.load(args.state))
except AssertionError:
model.load_state_dict(torch.load(args.state,
map_location=lambda storage, loc: storage))
if args.mode == 'eval':
evaluate(args, model)
if args.mode == 'train':
train(args, model)
if __name__ == '__main__':
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
parser = ArgumentParser()
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--model', required=True)
parser.add_argument('--state')
subparsers = parser.add_subparsers(dest='mode')
subparsers.required = True
parser_eval = subparsers.add_parser('eval')
parser_eval.add_argument('--datadir', default='data')
parser_eval.add_argument('--batch-size', type=int, default=4)
parser_eval.add_argument('--num-workers', type=int, default=4)
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--datadir', default='data')
parser_train.add_argument('--port', type=int, default=5000)
parser_train.add_argument('--num-epochs', type=int, default=32)
parser_train.add_argument('--num-workers', type=int, default=4)
parser_train.add_argument('--batch-size', type=int, default=4)
parser_train.add_argument('--steps-loss', type=int, default=50)
parser_train.add_argument('--steps-plot', type=int, default=100)
parser_train.add_argument('--steps-save', type=int, default=500)
main(parser.parse_args())
| true
| true
|
1c44de44632df44a415f68c307e8e4ac5cfde4f8
| 183
|
py
|
Python
|
PAT/pythonSrc/PAT-B1011.A+B和C.py
|
OS-EDU/KO--CSP
|
615a3a02853be6832f0e958969a2cb26106d3908
|
[
"Apache-2.0"
] | 30
|
2020-11-07T06:56:26.000Z
|
2022-02-21T09:12:39.000Z
|
PAT/pythonSrc/PAT-B1011.A+B和C.py
|
OS-EDU/KO--CSP
|
615a3a02853be6832f0e958969a2cb26106d3908
|
[
"Apache-2.0"
] | 166
|
2020-11-05T07:28:15.000Z
|
2022-03-28T04:00:08.000Z
|
PAT/pythonSrc/PAT-B1011.A+B和C.py
|
OS-EDU/KO--CSP
|
615a3a02853be6832f0e958969a2cb26106d3908
|
[
"Apache-2.0"
] | 28
|
2020-11-07T06:56:29.000Z
|
2021-09-14T11:20:27.000Z
|
n = int(input()) #测试数据组数
i = 1 #记录循环次数
while i <= n:
a, b, c = map(int, input().split())
print("Case #%d: "%i, end="")
if a + b > c:
print("true")
else:
print("false")
i += 1
| 18.3
| 36
| 0.508197
|
n = int(input())
i = 1
while i <= n:
a, b, c = map(int, input().split())
print("Case #%d: "%i, end="")
if a + b > c:
print("true")
else:
print("false")
i += 1
| true
| true
|
1c44de9cdbe7f0da12017ec35e71d4438341fccc
| 2,577
|
py
|
Python
|
03_train_model/source_dir/training_debug.py
|
alar0330/amazon-sagemaker-build-train-deploy
|
b476c5ba5b3bd55a99709e7788079763fa498682
|
[
"Apache-2.0"
] | 23
|
2020-03-30T08:02:48.000Z
|
2022-02-01T16:16:43.000Z
|
03_train_model/source_dir/training_debug.py
|
alar0330/amazon-sagemaker-build-train-deploy
|
b476c5ba5b3bd55a99709e7788079763fa498682
|
[
"Apache-2.0"
] | null | null | null |
03_train_model/source_dir/training_debug.py
|
alar0330/amazon-sagemaker-build-train-deploy
|
b476c5ba5b3bd55a99709e7788079763fa498682
|
[
"Apache-2.0"
] | 11
|
2020-04-04T09:01:27.000Z
|
2021-06-02T12:10:21.000Z
|
import argparse
import json
import os
import random
import pandas as pd
import glob
import pickle as pkl
import xgboost
from smdebug import SaveConfig
from smdebug.xgboost import Hook
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--max_depth", type=int, default=5)
parser.add_argument("--eta", type=float, default=0.05)
parser.add_argument("--gamma", type=int, default=4)
parser.add_argument("--min_child_weight", type=int, default=6)
parser.add_argument("--silent", type=int, default=0)
parser.add_argument("--objective", type=str, default="reg:logistic")
parser.add_argument("--num_round", type=int, default=10)
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--validation', type=str, default=os.environ.get('SM_CHANNEL_VALIDATION'))
args = parser.parse_args()
return args
def main():
args = parse_args()
train_files_path, validation_files_path = args.train, args.validation
train_features_path = os.path.join(args.train, 'train_features.csv')
train_labels_path = os.path.join(args.train, 'train_labels.csv')
val_features_path = os.path.join(args.validation, 'val_features.csv')
val_labels_path = os.path.join(args.validation, 'val_labels.csv')
print('Loading training dataframes...')
df_train_features = pd.read_csv(train_features_path)
df_train_labels = pd.read_csv(train_labels_path)
print('Loading validation dataframes...')
df_val_features = pd.read_csv(val_features_path)
df_val_labels = pd.read_csv(val_labels_path)
X = df_train_features.values
y = df_train_labels.values
val_X = df_val_features.values
val_y = df_val_labels.values
dtrain = xgboost.DMatrix(X, label=y)
dval = xgboost.DMatrix(val_X, label=val_y)
hook = Hook.create_from_json_file()
hook.train_data = dtrain
hook.validation_data = dval
watchlist = [(dtrain, "train"), (dval, "validation")]
params = {
"max_depth": args.max_depth,
"eta": args.eta,
"gamma": args.gamma,
"min_child_weight": args.min_child_weight,
"silent": args.silent,
"objective": args.objective
}
bst = xgboost.train(
params=params,
dtrain=dtrain,
evals=watchlist,
num_boost_round=args.num_round,
callbacks=[hook])
model_dir = os.environ.get('SM_MODEL_DIR')
pkl.dump(bst, open(model_dir + '/model.bin', 'wb'))
if __name__ == "__main__":
main()
| 29.62069
| 98
| 0.684517
|
import argparse
import json
import os
import random
import pandas as pd
import glob
import pickle as pkl
import xgboost
from smdebug import SaveConfig
from smdebug.xgboost import Hook
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--max_depth", type=int, default=5)
parser.add_argument("--eta", type=float, default=0.05)
parser.add_argument("--gamma", type=int, default=4)
parser.add_argument("--min_child_weight", type=int, default=6)
parser.add_argument("--silent", type=int, default=0)
parser.add_argument("--objective", type=str, default="reg:logistic")
parser.add_argument("--num_round", type=int, default=10)
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--validation', type=str, default=os.environ.get('SM_CHANNEL_VALIDATION'))
args = parser.parse_args()
return args
def main():
args = parse_args()
train_files_path, validation_files_path = args.train, args.validation
train_features_path = os.path.join(args.train, 'train_features.csv')
train_labels_path = os.path.join(args.train, 'train_labels.csv')
val_features_path = os.path.join(args.validation, 'val_features.csv')
val_labels_path = os.path.join(args.validation, 'val_labels.csv')
print('Loading training dataframes...')
df_train_features = pd.read_csv(train_features_path)
df_train_labels = pd.read_csv(train_labels_path)
print('Loading validation dataframes...')
df_val_features = pd.read_csv(val_features_path)
df_val_labels = pd.read_csv(val_labels_path)
X = df_train_features.values
y = df_train_labels.values
val_X = df_val_features.values
val_y = df_val_labels.values
dtrain = xgboost.DMatrix(X, label=y)
dval = xgboost.DMatrix(val_X, label=val_y)
hook = Hook.create_from_json_file()
hook.train_data = dtrain
hook.validation_data = dval
watchlist = [(dtrain, "train"), (dval, "validation")]
params = {
"max_depth": args.max_depth,
"eta": args.eta,
"gamma": args.gamma,
"min_child_weight": args.min_child_weight,
"silent": args.silent,
"objective": args.objective
}
bst = xgboost.train(
params=params,
dtrain=dtrain,
evals=watchlist,
num_boost_round=args.num_round,
callbacks=[hook])
model_dir = os.environ.get('SM_MODEL_DIR')
pkl.dump(bst, open(model_dir + '/model.bin', 'wb'))
if __name__ == "__main__":
main()
| true
| true
|
1c44df28fd3a2056d9abd1da319c0912766c6417
| 41
|
py
|
Python
|
sksurgerybard/ui/__init__.py
|
SciKit-Surgery/scikit-surgerybard
|
4ac2ea28acb150437361c9abd53db3e3bba6d803
|
[
"BSD-3-Clause"
] | 1
|
2021-06-30T15:55:21.000Z
|
2021-06-30T15:55:21.000Z
|
sksurgerybard/ui/__init__.py
|
UCL/scikit-surgerybard
|
7ebe4d15d3d3fa67218424c9f737a9e8d93bfbf3
|
[
"BSD-3-Clause"
] | 68
|
2020-04-30T07:29:33.000Z
|
2022-01-20T09:47:54.000Z
|
sksurgerybard/ui/__init__.py
|
SciKit-Surgery/scikit-surgerybard
|
4ac2ea28acb150437361c9abd53db3e3bba6d803
|
[
"BSD-3-Clause"
] | 1
|
2021-06-30T15:55:48.000Z
|
2021-06-30T15:55:48.000Z
|
# coding=utf-8
"""scikit-surgerybard"""
| 10.25
| 24
| 0.658537
| true
| true
|
|
1c44e010fefb96406841d2144b608e607f89b5a7
| 7,410
|
py
|
Python
|
apps/tests/test_measurement_on_request1.py
|
sanchezcarlosjr/PyK-Ar
|
13ed535b85a9025464ae85feda46d94887d75e3e
|
[
"Apache-2.0"
] | 1
|
2021-09-24T23:13:22.000Z
|
2021-09-24T23:13:22.000Z
|
apps/tests/test_measurement_on_request1.py
|
sanchezcarlosjr/PyK-Ar
|
13ed535b85a9025464ae85feda46d94887d75e3e
|
[
"Apache-2.0"
] | 10
|
2021-09-25T05:15:04.000Z
|
2021-10-04T20:02:27.000Z
|
apps/tests/test_measurement_on_request1.py
|
sanchezcarlosjr/PyK-Ar
|
13ed535b85a9025464ae85feda46d94887d75e3e
|
[
"Apache-2.0"
] | null | null | null |
import json
import sys
from datetime import datetime
from firebase_admin import initialize_app
from domain.Ar36_Ar38_composition_of_atmospheric import Ar36Ar38CompositionOfAtmospheric
from domain.Ar36_Ar38_ratio_for_tracer import Ar36Ar38RatioForTracer
from domain.Ar40_Ar38_composition_of_atmospheric import Ar40Ar38CompositionOfAtmospheric
from domain.Ar40_Ar38_ratio_for_tracer import Ar40Ar38RatioForTracer
from domain.D import D
from domain.T0 import T0
from domain.atoms_K40_divides_atomsK import AtomsK40DividesAtomsK
from domain.gramsK_divides_moleK import GramsKDividesMoleK
from domain.spectometeter_scale38_scale36_factors import SpectrometerScale38Scale36
from domain.spectometeter_scale40_scale38_factors import SpectrometerScale40Scale38
from domain.x import X
from potassium_argon_age_calculation_mock_repository import PotassiumArgonAgeCalculationMockRepository
initialize_app()
sys.path.append('../apps')
from domain.measurement import Measurement
from application.raw_mass_spectrometry_to_measurements_decorator import raw_mass_spectrometry_to_measurements
def load_measurement_from_json(file: str) -> Measurement:
with open(file) as f:
sample = json.load(f)['data']
return raw_mass_spectrometry_to_measurements(lambda m: m)(sample, {'user_id': 'A'})
def setup_measurement_request1():
measurement = load_measurement_from_json("tests/request1.json")
measurement.atoms_K40_divides_atomsK = AtomsK40DividesAtomsK(0.000119)
measurement.gramsK_divides_moleK = GramsKDividesMoleK(39.1)
measurement.x = X(44)
measurement.t0 = T0(3.086e-10)
measurement.d = D(0.999)
measurement.spectrometer_scale38_scale36 = SpectrometerScale38Scale36(1000)
measurement.spectrometer_scale40_scale38 = SpectrometerScale40Scale38(2)
measurement.Ar40_Ar38_ratio_for_tracer = Ar40Ar38RatioForTracer(0.0012)
measurement.Ar36_Ar38_ratio_for_tracer = Ar36Ar38RatioForTracer(2.67e-05)
measurement.Ar36_Ar38_composition_of_atmospheric = Ar36Ar38CompositionOfAtmospheric(5.35)
measurement.Ar40_Ar38_composition_of_atmospheric = Ar40Ar38CompositionOfAtmospheric(1581)
return measurement
def test_set_measurement_id():
measurement = load_measurement_from_json("tests/request1.json")
mock = PotassiumArgonAgeCalculationMockRepository()
m = mock.save(measurement)
assert 0 <= float(m.id) <= 1, "Dict should set id"
def test_check_if_spectrum_user_name_is_formatted_as_title():
measurement = setup_measurement_request1()
assert measurement.spectrum_user_name == "Miguel"
def test_filter_corrected_cycles():
measurement = load_measurement_from_json("tests/request1.json")
e = measurement.experiments
cycles = e.filter_corrected_cycles()
for cycle in cycles:
assert cycle.measure == "Corrected"
def test_equals_experiments():
def test_raw_mass_spectrometry_to_measurements():
measurement = load_measurement_from_json("tests/request1.json")
with open("tests/request1.json") as f:
sample = json.load(f)['data']
assert measurement.experiments == sample['experiments']
test_raw_mass_spectrometry_to_measurements()
def test_convert_to_dict():
def test_raw_mass_spectrometry_to_measurements():
measurement_obj = load_measurement_from_json("tests/request1.json")
measurement = measurement_obj.to_dict()
assert 'experiments' not in measurement
assert 'blank_index' not in measurement
assert 'sample_index' not in measurement
with open("tests/request1.json") as f:
sample = json.load(f)['data']
assert measurement['id'] == sample['experiments'][1]['sample_id']
keys_to_check = [
'spectrum',
'type',
'file_name',
]
for key in keys_to_check:
assert measurement[key] == sample['experiments'][1][key]
analysis_date = sample['experiments'][1]["analysis_date"]
assert measurement["analysis_date"] == datetime.strptime(analysis_date, '%Y-%m-%dT%H:%M:%S.%fZ')
test_raw_mass_spectrometry_to_measurements()
def test_calculate_moles_of_K40():
measurement = setup_measurement_request1()
assert measurement.atoms_K40_divides_atomsK == 0.000119
assert measurement.gramsK_divides_moleK == 39.1
assert measurement.moles_of_K40 == 0
measurement.calculate_moles_of_K40()
assert measurement.moles_of_K40 == 5.787E-8
def test_calculate_moles_Ar38_in_tracer():
measurement = setup_measurement_request1()
assert measurement.moles_Ar38_in_tracer == 0
measurement.calculate_moles_Ar38_in_tracer()
assert measurement.moles_Ar38_in_tracer == 2.976E-10
def test_calculate_Ar40_Ar38_ratio():
measurement = setup_measurement_request1()
assert measurement.Ar40_Ar38_ratio == 0
measurement.calculate_Ar38()
measurement.calculate_Ar40()
measurement.calculate_Ar40_Ar38_ratio()
assert measurement.Ar40_Ar38_ratio == 1358.1974700151454
def test_clone_experiment():
measurement = setup_measurement_request1()
old = measurement.experiments[0]
new = measurement.experiments[0].filter_corrected_cycles()
assert old != new
assert old is not new
def test_should_calculate_cycles_mean():
measurement = setup_measurement_request1()
measurement.calculate_Ar36()
assert measurement.Ar36 == 0.000114046680375
def test_should_calculate_Ar40_Ar38_ratios_in_the_gas_mixture():
measurement = setup_measurement_request1()
measurement.Ar38 = 1
measurement.Ar40 = 0.743
measurement.calculate_Ar40_Ar38_ratio()
assert measurement.Ar40_Ar38_ratio == 0.743
assert measurement.Ar40_Ar38_ratios_in_the_gas_mixture == 0
measurement.calculate_Ar40_Ar38_ratios_in_the_gas_mixture()
assert measurement.Ar40_Ar38_ratios_in_the_gas_mixture == 1.485
def test_should_calculate_Ar38_Ar36_ratios_in_the_gas_mixture():
measurement = setup_measurement_request1()
measurement.Ar38 = 1
measurement.Ar36 = 0.98814229249
measurement.calculate_Ar38_Ar36_ratio()
assert measurement.Ar38_Ar36_ratio == 1.012
assert measurement.Ar38_Ar36_ratios_in_the_gas_mixture == 0
measurement.calculate_Ar38_Ar36_ratios_in_the_gas_mixture()
assert measurement.Ar38_Ar36_ratios_in_the_gas_mixture == 1011
def test_should_calculate_total_Ar40():
measurement = setup_measurement_request1()
measurement.Ar38 = 1
measurement.Ar40 = 0.743
assert measurement.total_Ar40 == 0
measurement.calculate_total_Ar40()
assert measurement.total_Ar40 == 4.419E-10
def test_should_calculate_Ar40_rad():
measurement = setup_measurement_request1()
assert measurement.Ar36_Ar38_ratio_for_tracer == 2.67e-05
def test_should_calculate_percentage_of_Ar40_rad_in_the_analysis():
measurement = setup_measurement_request1()
measurement.Ar36 = 0.98814229249
measurement.Ar38 = 1
measurement.Ar40 = 0.743
assert measurement.percentage_of_Ar40_rad_in_the_analysis == 0
measurement.calculate_percentage_of_Ar40_rad_in_the_analysis()
assert measurement.percentage_of_Ar40_rad_in_the_analysis == 80.8
def test_should_calculate_age():
measurement = setup_measurement_request1()
measurement.Ar36 = 0.98814229249
measurement.Ar38 = 1
measurement.Ar40 = 0.743
assert measurement.age == 0
measurement.calculate_age()
assert measurement.age == 102603993.84
| 38
| 109
| 0.778812
|
import json
import sys
from datetime import datetime
from firebase_admin import initialize_app
from domain.Ar36_Ar38_composition_of_atmospheric import Ar36Ar38CompositionOfAtmospheric
from domain.Ar36_Ar38_ratio_for_tracer import Ar36Ar38RatioForTracer
from domain.Ar40_Ar38_composition_of_atmospheric import Ar40Ar38CompositionOfAtmospheric
from domain.Ar40_Ar38_ratio_for_tracer import Ar40Ar38RatioForTracer
from domain.D import D
from domain.T0 import T0
from domain.atoms_K40_divides_atomsK import AtomsK40DividesAtomsK
from domain.gramsK_divides_moleK import GramsKDividesMoleK
from domain.spectometeter_scale38_scale36_factors import SpectrometerScale38Scale36
from domain.spectometeter_scale40_scale38_factors import SpectrometerScale40Scale38
from domain.x import X
from potassium_argon_age_calculation_mock_repository import PotassiumArgonAgeCalculationMockRepository
initialize_app()
sys.path.append('../apps')
from domain.measurement import Measurement
from application.raw_mass_spectrometry_to_measurements_decorator import raw_mass_spectrometry_to_measurements
def load_measurement_from_json(file: str) -> Measurement:
with open(file) as f:
sample = json.load(f)['data']
return raw_mass_spectrometry_to_measurements(lambda m: m)(sample, {'user_id': 'A'})
def setup_measurement_request1():
measurement = load_measurement_from_json("tests/request1.json")
measurement.atoms_K40_divides_atomsK = AtomsK40DividesAtomsK(0.000119)
measurement.gramsK_divides_moleK = GramsKDividesMoleK(39.1)
measurement.x = X(44)
measurement.t0 = T0(3.086e-10)
measurement.d = D(0.999)
measurement.spectrometer_scale38_scale36 = SpectrometerScale38Scale36(1000)
measurement.spectrometer_scale40_scale38 = SpectrometerScale40Scale38(2)
measurement.Ar40_Ar38_ratio_for_tracer = Ar40Ar38RatioForTracer(0.0012)
measurement.Ar36_Ar38_ratio_for_tracer = Ar36Ar38RatioForTracer(2.67e-05)
measurement.Ar36_Ar38_composition_of_atmospheric = Ar36Ar38CompositionOfAtmospheric(5.35)
measurement.Ar40_Ar38_composition_of_atmospheric = Ar40Ar38CompositionOfAtmospheric(1581)
return measurement
def test_set_measurement_id():
measurement = load_measurement_from_json("tests/request1.json")
mock = PotassiumArgonAgeCalculationMockRepository()
m = mock.save(measurement)
assert 0 <= float(m.id) <= 1, "Dict should set id"
def test_check_if_spectrum_user_name_is_formatted_as_title():
measurement = setup_measurement_request1()
assert measurement.spectrum_user_name == "Miguel"
def test_filter_corrected_cycles():
measurement = load_measurement_from_json("tests/request1.json")
e = measurement.experiments
cycles = e.filter_corrected_cycles()
for cycle in cycles:
assert cycle.measure == "Corrected"
def test_equals_experiments():
def test_raw_mass_spectrometry_to_measurements():
measurement = load_measurement_from_json("tests/request1.json")
with open("tests/request1.json") as f:
sample = json.load(f)['data']
assert measurement.experiments == sample['experiments']
test_raw_mass_spectrometry_to_measurements()
def test_convert_to_dict():
def test_raw_mass_spectrometry_to_measurements():
measurement_obj = load_measurement_from_json("tests/request1.json")
measurement = measurement_obj.to_dict()
assert 'experiments' not in measurement
assert 'blank_index' not in measurement
assert 'sample_index' not in measurement
with open("tests/request1.json") as f:
sample = json.load(f)['data']
assert measurement['id'] == sample['experiments'][1]['sample_id']
keys_to_check = [
'spectrum',
'type',
'file_name',
]
for key in keys_to_check:
assert measurement[key] == sample['experiments'][1][key]
analysis_date = sample['experiments'][1]["analysis_date"]
assert measurement["analysis_date"] == datetime.strptime(analysis_date, '%Y-%m-%dT%H:%M:%S.%fZ')
test_raw_mass_spectrometry_to_measurements()
def test_calculate_moles_of_K40():
measurement = setup_measurement_request1()
assert measurement.atoms_K40_divides_atomsK == 0.000119
assert measurement.gramsK_divides_moleK == 39.1
assert measurement.moles_of_K40 == 0
measurement.calculate_moles_of_K40()
assert measurement.moles_of_K40 == 5.787E-8
def test_calculate_moles_Ar38_in_tracer():
measurement = setup_measurement_request1()
assert measurement.moles_Ar38_in_tracer == 0
measurement.calculate_moles_Ar38_in_tracer()
assert measurement.moles_Ar38_in_tracer == 2.976E-10
def test_calculate_Ar40_Ar38_ratio():
measurement = setup_measurement_request1()
assert measurement.Ar40_Ar38_ratio == 0
measurement.calculate_Ar38()
measurement.calculate_Ar40()
measurement.calculate_Ar40_Ar38_ratio()
assert measurement.Ar40_Ar38_ratio == 1358.1974700151454
def test_clone_experiment():
measurement = setup_measurement_request1()
old = measurement.experiments[0]
new = measurement.experiments[0].filter_corrected_cycles()
assert old != new
assert old is not new
def test_should_calculate_cycles_mean():
measurement = setup_measurement_request1()
measurement.calculate_Ar36()
assert measurement.Ar36 == 0.000114046680375
def test_should_calculate_Ar40_Ar38_ratios_in_the_gas_mixture():
measurement = setup_measurement_request1()
measurement.Ar38 = 1
measurement.Ar40 = 0.743
measurement.calculate_Ar40_Ar38_ratio()
assert measurement.Ar40_Ar38_ratio == 0.743
assert measurement.Ar40_Ar38_ratios_in_the_gas_mixture == 0
measurement.calculate_Ar40_Ar38_ratios_in_the_gas_mixture()
assert measurement.Ar40_Ar38_ratios_in_the_gas_mixture == 1.485
def test_should_calculate_Ar38_Ar36_ratios_in_the_gas_mixture():
measurement = setup_measurement_request1()
measurement.Ar38 = 1
measurement.Ar36 = 0.98814229249
measurement.calculate_Ar38_Ar36_ratio()
assert measurement.Ar38_Ar36_ratio == 1.012
assert measurement.Ar38_Ar36_ratios_in_the_gas_mixture == 0
measurement.calculate_Ar38_Ar36_ratios_in_the_gas_mixture()
assert measurement.Ar38_Ar36_ratios_in_the_gas_mixture == 1011
def test_should_calculate_total_Ar40():
measurement = setup_measurement_request1()
measurement.Ar38 = 1
measurement.Ar40 = 0.743
assert measurement.total_Ar40 == 0
measurement.calculate_total_Ar40()
assert measurement.total_Ar40 == 4.419E-10
def test_should_calculate_Ar40_rad():
measurement = setup_measurement_request1()
assert measurement.Ar36_Ar38_ratio_for_tracer == 2.67e-05
def test_should_calculate_percentage_of_Ar40_rad_in_the_analysis():
measurement = setup_measurement_request1()
measurement.Ar36 = 0.98814229249
measurement.Ar38 = 1
measurement.Ar40 = 0.743
assert measurement.percentage_of_Ar40_rad_in_the_analysis == 0
measurement.calculate_percentage_of_Ar40_rad_in_the_analysis()
assert measurement.percentage_of_Ar40_rad_in_the_analysis == 80.8
def test_should_calculate_age():
measurement = setup_measurement_request1()
measurement.Ar36 = 0.98814229249
measurement.Ar38 = 1
measurement.Ar40 = 0.743
assert measurement.age == 0
measurement.calculate_age()
assert measurement.age == 102603993.84
| true
| true
|
1c44e1b28f76d59aab7443a816e6b848c0913e2f
| 3,663
|
py
|
Python
|
tests/lib/test_binary.py
|
nrrpinto/construct
|
cfc980c6edfbe33c56015b736f59fb3155b51317
|
[
"MIT"
] | 629
|
2015-01-06T03:01:56.000Z
|
2022-03-23T13:13:26.000Z
|
tests/lib/test_binary.py
|
nrrpinto/construct
|
cfc980c6edfbe33c56015b736f59fb3155b51317
|
[
"MIT"
] | 897
|
2015-02-28T15:46:06.000Z
|
2022-03-30T08:19:13.000Z
|
tests/lib/test_binary.py
|
nrrpinto/construct
|
cfc980c6edfbe33c56015b736f59fb3155b51317
|
[
"MIT"
] | 151
|
2015-01-08T16:36:24.000Z
|
2022-03-10T16:59:49.000Z
|
from tests.declarativeunittest import *
from construct.lib.binary import *
def test_integer2bits():
assert integer2bits(0, 0, False) == b""
assert integer2bits(0, 0, True) == b""
assert integer2bits(19, 5) == b"\x01\x00\x00\x01\x01"
assert integer2bits(19, 8) == b"\x00\x00\x00\x01\x00\x00\x01\x01"
assert integer2bits(-13, 5, True) == b"\x01\x00\x00\x01\x01"
assert integer2bits(-13, 8, True) == b"\x01\x01\x01\x01\x00\x00\x01\x01"
assert raises(integer2bits, 0, -1) == ValueError
assert raises(integer2bits, -1, 8, False) == ValueError
assert raises(integer2bits, -2**64, 8, True) == ValueError
assert raises(integer2bits, 2**64, 8, True) == ValueError
assert raises(integer2bits, -2**64, 8, False) == ValueError
assert raises(integer2bits, 2**64, 8, False) == ValueError
def test_integer2bytes():
assert integer2bytes(0, 0, False) == b""
assert integer2bytes(0, 0, True) == b""
assert integer2bytes(0, 4) == b"\x00\x00\x00\x00"
assert integer2bytes(1, 4) == b"\x00\x00\x00\x01"
assert integer2bytes(19, 4) == b'\x00\x00\x00\x13'
assert integer2bytes(255, 1) == b"\xff"
assert integer2bytes(255, 4) == b"\x00\x00\x00\xff"
assert integer2bytes(-1, 4, True) == b"\xff\xff\xff\xff"
assert integer2bytes(-255, 4, True) == b"\xff\xff\xff\x01"
assert raises(integer2bytes, 0, -1) == ValueError
assert raises(integer2bytes, -1, 8, False) == ValueError
assert raises(integer2bytes, -2**64, 4, True) == ValueError
assert raises(integer2bytes, 2**64, 4, True) == ValueError
assert raises(integer2bytes, -2**64, 4, False) == ValueError
assert raises(integer2bytes, 2**64, 4, False) == ValueError
def test_bits2integer():
assert bits2integer(b"", False) == 0
assert bits2integer(b"", True) == 0
assert bits2integer(b"\x01\x00\x00\x01\x01", False) == 19
assert bits2integer(b"\x01\x00\x00\x01\x01", True) == -13
def test_bytes2integer():
assert bytes2integer(b"", False) == 0
assert bytes2integer(b"", True) == 0
assert bytes2integer(b"\x00") == 0
assert bytes2integer(b"\x00", True) == 0
assert bytes2integer(b"\xff") == 255
assert bytes2integer(b"\xff", True) == -1
assert bytes2integer(b'\x00\x00\x00\x13', False) == 19
assert bytes2integer(b'\x00\x00\x00\x13', True) == 19
def test_cross_integers():
for i in [-300,-255,-100,-1,0,1,100,255,300]:
assert bits2integer(integer2bits(i,64,signed=(i<0)),signed=(i<0)) == i
assert bytes2integer(integer2bytes(i,8,signed=(i<0)),signed=(i<0)) == i
assert bits2bytes(integer2bits(i,64,signed=(i<0))) == integer2bytes(i,8,signed=(i<0))
assert bytes2bits(integer2bytes(i,8,signed=(i<0))) == integer2bits(i,64,signed=(i<0))
def test_bytes2bits():
assert bytes2bits(b"") == b""
assert bytes2bits(b"ab") == b"\x00\x01\x01\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x01\x00"
def test_bits2bytes():
assert bits2bytes(b"") == b""
assert bits2bytes(b"\x00\x01\x01\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x01\x00") == b"ab"
assert raises(bits2bytes, b"\x00") == ValueError
assert raises(bits2bytes, b"\x00\x00\x00\x00\x00\x00\x00") == ValueError
def test_swapbytes():
assert swapbytes(b"") == b""
assert swapbytes(b"abcd") == b"dcba"
def test_swapbytesinbits():
assert swapbytesinbits(b"") == b""
assert swapbytesinbits(b"0000000011111111") == b"1111111100000000"
assert raises(swapbytesinbits, b"1") == ValueError
def test_swapbitsinbytes():
assert swapbitsinbytes(b"") == b""
assert swapbitsinbytes(b"\xf0") == b"\x0f"
assert swapbitsinbytes(b"\xf0\x00") == b"\x0f\x00"
| 44.670732
| 99
| 0.658204
|
from tests.declarativeunittest import *
from construct.lib.binary import *
def test_integer2bits():
assert integer2bits(0, 0, False) == b""
assert integer2bits(0, 0, True) == b""
assert integer2bits(19, 5) == b"\x01\x00\x00\x01\x01"
assert integer2bits(19, 8) == b"\x00\x00\x00\x01\x00\x00\x01\x01"
assert integer2bits(-13, 5, True) == b"\x01\x00\x00\x01\x01"
assert integer2bits(-13, 8, True) == b"\x01\x01\x01\x01\x00\x00\x01\x01"
assert raises(integer2bits, 0, -1) == ValueError
assert raises(integer2bits, -1, 8, False) == ValueError
assert raises(integer2bits, -2**64, 8, True) == ValueError
assert raises(integer2bits, 2**64, 8, True) == ValueError
assert raises(integer2bits, -2**64, 8, False) == ValueError
assert raises(integer2bits, 2**64, 8, False) == ValueError
def test_integer2bytes():
assert integer2bytes(0, 0, False) == b""
assert integer2bytes(0, 0, True) == b""
assert integer2bytes(0, 4) == b"\x00\x00\x00\x00"
assert integer2bytes(1, 4) == b"\x00\x00\x00\x01"
assert integer2bytes(19, 4) == b'\x00\x00\x00\x13'
assert integer2bytes(255, 1) == b"\xff"
assert integer2bytes(255, 4) == b"\x00\x00\x00\xff"
assert integer2bytes(-1, 4, True) == b"\xff\xff\xff\xff"
assert integer2bytes(-255, 4, True) == b"\xff\xff\xff\x01"
assert raises(integer2bytes, 0, -1) == ValueError
assert raises(integer2bytes, -1, 8, False) == ValueError
assert raises(integer2bytes, -2**64, 4, True) == ValueError
assert raises(integer2bytes, 2**64, 4, True) == ValueError
assert raises(integer2bytes, -2**64, 4, False) == ValueError
assert raises(integer2bytes, 2**64, 4, False) == ValueError
def test_bits2integer():
assert bits2integer(b"", False) == 0
assert bits2integer(b"", True) == 0
assert bits2integer(b"\x01\x00\x00\x01\x01", False) == 19
assert bits2integer(b"\x01\x00\x00\x01\x01", True) == -13
def test_bytes2integer():
assert bytes2integer(b"", False) == 0
assert bytes2integer(b"", True) == 0
assert bytes2integer(b"\x00") == 0
assert bytes2integer(b"\x00", True) == 0
assert bytes2integer(b"\xff") == 255
assert bytes2integer(b"\xff", True) == -1
assert bytes2integer(b'\x00\x00\x00\x13', False) == 19
assert bytes2integer(b'\x00\x00\x00\x13', True) == 19
def test_cross_integers():
for i in [-300,-255,-100,-1,0,1,100,255,300]:
assert bits2integer(integer2bits(i,64,signed=(i<0)),signed=(i<0)) == i
assert bytes2integer(integer2bytes(i,8,signed=(i<0)),signed=(i<0)) == i
assert bits2bytes(integer2bits(i,64,signed=(i<0))) == integer2bytes(i,8,signed=(i<0))
assert bytes2bits(integer2bytes(i,8,signed=(i<0))) == integer2bits(i,64,signed=(i<0))
def test_bytes2bits():
assert bytes2bits(b"") == b""
assert bytes2bits(b"ab") == b"\x00\x01\x01\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x01\x00"
def test_bits2bytes():
assert bits2bytes(b"") == b""
assert bits2bytes(b"\x00\x01\x01\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x01\x00") == b"ab"
assert raises(bits2bytes, b"\x00") == ValueError
assert raises(bits2bytes, b"\x00\x00\x00\x00\x00\x00\x00") == ValueError
def test_swapbytes():
assert swapbytes(b"") == b""
assert swapbytes(b"abcd") == b"dcba"
def test_swapbytesinbits():
assert swapbytesinbits(b"") == b""
assert swapbytesinbits(b"0000000011111111") == b"1111111100000000"
assert raises(swapbytesinbits, b"1") == ValueError
def test_swapbitsinbytes():
assert swapbitsinbytes(b"") == b""
assert swapbitsinbytes(b"\xf0") == b"\x0f"
assert swapbitsinbytes(b"\xf0\x00") == b"\x0f\x00"
| true
| true
|
1c44e1d0468e64809d81b9a7adc2c74263251bc9
| 1,943
|
py
|
Python
|
common_config.py
|
Dervish13/bsidesns-backend
|
ba8173f2b81210a561b203973eb48d5c124870b1
|
[
"BSD-2-Clause"
] | null | null | null |
common_config.py
|
Dervish13/bsidesns-backend
|
ba8173f2b81210a561b203973eb48d5c124870b1
|
[
"BSD-2-Clause"
] | null | null | null |
common_config.py
|
Dervish13/bsidesns-backend
|
ba8173f2b81210a561b203973eb48d5c124870b1
|
[
"BSD-2-Clause"
] | 3
|
2020-01-12T13:34:35.000Z
|
2021-11-01T17:50:21.000Z
|
import os
from name import app_name
# from datetime import timedelta
SECRET_KEY = 'iQfPvB6sZaNHqVFI5CJa9rM1xOEVHKIM0LwifT04yLsPlZhSSvaDuZXOgJFSpJVq'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
class Config:
NAME = app_name
API_TITLE = 'BSidesNS'
API_VERSION = '0'
PROJECT_ROOT = PROJECT_ROOT
DEBUG = False
SECURITY_PASSWORD_SALT = 'tilda'
SECRET_KEY = SECRET_KEY
SECURITY_TRACKABLE = False
JWT_SECRET_KEY = SECRET_KEY
JWT_TOKEN_LOCATION = ['cookies']
JWT_ACCESS_COOKIE_PATH = '/api/v0'
JWT_REFRESH_COOKIE_PATH = '/api/v0/auth/refresh'
JWT_SESSION_COOKIE = False
JWT_COOKIE_SECURE = True
# JWT_ACCESS_TOKEN_EXPIRES = timedelta(seconds=1)
# JWT_REFRESH_TOKEN_EXPIRES = timedelta(seconds=10)
OPENAPI_URL_PREFIX = '/doc'
OPENAPI_REDOC_PATH = '/redoc'
OPENAPI_SWAGGER_UI_PATH = '/swaggerui'
OPENAPI_SWAGGER_UI_URL = '/static/swaggerui/'
OPENAPI_VERSION = '2.0.0'
MEDIA_URL = '/media'
MEDIA_PATH = f'{PROJECT_ROOT}/media'
ACCOUNT_REQUEST_EXPIRY = 24 # in hours
PASSWORD_RESET_EXPIRY = 2 # in hours
DATABASE = {
'name': 'database.db',
'engine': 'SqliteDatabase',
}
MAIL = {
# 'host': 'mail.example.com',
# 'port': 587,
# 'ssl': True,
# 'username': 'someone@example.com',
# 'password': 'Sekrit',
}
FROM_EMAIL = 'office@bsidesns.org'
SUBJECTS = {
'prefix': '[BSidesNS] ',
'confirm': 'Account confirmation',
'register': 'Account registration',
}
@staticmethod
def init_app(app):
pass
class DevConfig(Config):
DEBUG = True
JWT_COOKIE_SECURE = False
SECURITY_SEND_REGISTER_EMAIL = False
class TestConfig(Config):
TESTING = True
JWT_COOKIE_SECURE = False
DATABASE = {
'name': 'test.db',
'engine': 'SqliteDatabase',
}
class ProdConfig(Config):
pass
| 25.233766
| 79
| 0.647967
|
import os
from name import app_name
SECRET_KEY = 'iQfPvB6sZaNHqVFI5CJa9rM1xOEVHKIM0LwifT04yLsPlZhSSvaDuZXOgJFSpJVq'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
class Config:
NAME = app_name
API_TITLE = 'BSidesNS'
API_VERSION = '0'
PROJECT_ROOT = PROJECT_ROOT
DEBUG = False
SECURITY_PASSWORD_SALT = 'tilda'
SECRET_KEY = SECRET_KEY
SECURITY_TRACKABLE = False
JWT_SECRET_KEY = SECRET_KEY
JWT_TOKEN_LOCATION = ['cookies']
JWT_ACCESS_COOKIE_PATH = '/api/v0'
JWT_REFRESH_COOKIE_PATH = '/api/v0/auth/refresh'
JWT_SESSION_COOKIE = False
JWT_COOKIE_SECURE = True
OPENAPI_URL_PREFIX = '/doc'
OPENAPI_REDOC_PATH = '/redoc'
OPENAPI_SWAGGER_UI_PATH = '/swaggerui'
OPENAPI_SWAGGER_UI_URL = '/static/swaggerui/'
OPENAPI_VERSION = '2.0.0'
MEDIA_URL = '/media'
MEDIA_PATH = f'{PROJECT_ROOT}/media'
ACCOUNT_REQUEST_EXPIRY = 24
PASSWORD_RESET_EXPIRY = 2
DATABASE = {
'name': 'database.db',
'engine': 'SqliteDatabase',
}
MAIL = {
}
FROM_EMAIL = 'office@bsidesns.org'
SUBJECTS = {
'prefix': '[BSidesNS] ',
'confirm': 'Account confirmation',
'register': 'Account registration',
}
@staticmethod
def init_app(app):
pass
class DevConfig(Config):
DEBUG = True
JWT_COOKIE_SECURE = False
SECURITY_SEND_REGISTER_EMAIL = False
class TestConfig(Config):
TESTING = True
JWT_COOKIE_SECURE = False
DATABASE = {
'name': 'test.db',
'engine': 'SqliteDatabase',
}
class ProdConfig(Config):
pass
| true
| true
|
1c44e32562aa9115a0e38ce8b781e766efe4afaa
| 4,287
|
py
|
Python
|
pirates/effects/SteamCloud.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/effects/SteamCloud.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/effects/SteamCloud.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.effects.SteamCloud
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from pirates.piratesgui.GameOptions import Options
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class SteamCloud(PooledEffect, EffectController):
__module__ = __name__
cardScale = 64.0
def __init__(self, parent=None):
PooledEffect.__init__(self)
EffectController.__init__(self)
if parent is not None:
self.reparentTo(parent)
if not SteamCloud.particleDummy:
SteamCloud.particleDummy = base.effectsRoot.attachNewNode(ModelNode('SteamCloudParticleDummy'))
SteamCloud.particleDummy.setDepthWrite(0)
SteamCloud.particleDummy.setLightOff()
SteamCloud.particleDummy.setBin('fixed', 120)
self.f = ParticleEffect.ParticleEffect('SteamCloud')
self.f.reparentTo(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleWhiteSteam')
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('RectangleEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(8)
self.p0.setBirthRate(0.5)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(1)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.setFloorZ(-10.0)
self.p0.factory.setLifespanBase(10.0)
self.p0.factory.setLifespanSpread(4.0)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(0.25)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.setEffectScale(1.0)
return
def createTrack(self, lod=Options.SpecialEffectsHigh):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.75), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy), Func(self.f.reparentTo, self))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 0.0), Wait(1.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(1.0), self.endEffect)
def setScale(self, scale=VBase3(1, 1, 1)):
self.setEffectScale(scale[0])
def setEffectScale(self, scale):
self.p0.renderer.setInitialXScale(0.5 * self.cardScale * scale)
self.p0.renderer.setFinalXScale(3.0 * self.cardScale * scale)
self.p0.renderer.setInitialYScale(0.5 * self.cardScale * scale)
self.p0.renderer.setFinalYScale(3.0 * self.cardScale * scale)
self.p0.emitter.setAmplitude(1.0 * scale)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, -6.0, 17.0) * scale)
self.p0.emitter.setMinBound(Point2(-15.0, -1.0) * scale)
self.p0.emitter.setMaxBound(Point2(15.0, 1.0) * scale)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| 46.096774
| 176
| 0.696991
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from pirates.piratesgui.GameOptions import Options
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class SteamCloud(PooledEffect, EffectController):
__module__ = __name__
cardScale = 64.0
def __init__(self, parent=None):
PooledEffect.__init__(self)
EffectController.__init__(self)
if parent is not None:
self.reparentTo(parent)
if not SteamCloud.particleDummy:
SteamCloud.particleDummy = base.effectsRoot.attachNewNode(ModelNode('SteamCloudParticleDummy'))
SteamCloud.particleDummy.setDepthWrite(0)
SteamCloud.particleDummy.setLightOff()
SteamCloud.particleDummy.setBin('fixed', 120)
self.f = ParticleEffect.ParticleEffect('SteamCloud')
self.f.reparentTo(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleWhiteSteam')
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('RectangleEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(8)
self.p0.setBirthRate(0.5)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(1)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.setFloorZ(-10.0)
self.p0.factory.setLifespanBase(10.0)
self.p0.factory.setLifespanSpread(4.0)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(0.25)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.setEffectScale(1.0)
return
def createTrack(self, lod=Options.SpecialEffectsHigh):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.75), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy), Func(self.f.reparentTo, self))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 0.0), Wait(1.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(1.0), self.endEffect)
def setScale(self, scale=VBase3(1, 1, 1)):
self.setEffectScale(scale[0])
def setEffectScale(self, scale):
self.p0.renderer.setInitialXScale(0.5 * self.cardScale * scale)
self.p0.renderer.setFinalXScale(3.0 * self.cardScale * scale)
self.p0.renderer.setInitialYScale(0.5 * self.cardScale * scale)
self.p0.renderer.setFinalYScale(3.0 * self.cardScale * scale)
self.p0.emitter.setAmplitude(1.0 * scale)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, -6.0, 17.0) * scale)
self.p0.emitter.setMinBound(Point2(-15.0, -1.0) * scale)
self.p0.emitter.setMaxBound(Point2(15.0, 1.0) * scale)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| true
| true
|
1c44e3c2f33006b5c3dda27583e1334edfeee1e7
| 3,581
|
py
|
Python
|
examples_progress_bar.py
|
ElCap1tan/ProgressPrinter
|
fd144e94543175b87a4d16234b05220a65c0140b
|
[
"MIT"
] | null | null | null |
examples_progress_bar.py
|
ElCap1tan/ProgressPrinter
|
fd144e94543175b87a4d16234b05220a65c0140b
|
[
"MIT"
] | 2
|
2019-09-10T21:48:21.000Z
|
2019-09-28T16:28:36.000Z
|
examples_progress_bar.py
|
ElCap1tan/ProgressPrinter
|
fd144e94543175b87a4d16234b05220a65c0140b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from time import sleep
from ProgressPrinter import ProgressBar
def main():
"""
Choose which examples to run in this method
"""
ex1()
ex2()
ex3()
ex4()
ex5()
def ex1():
"""
**Example 1**
::
pb1 = ProgressBar(100, '%', pre='Downloading file', post='Download finished', length=25)
pb1.print_progress() # Prints the initial empty progress bar
for mb in range(1, 101):
pb1.print_progress(mb)
sleep(0.15)
**Output:**
::
Downloading file
[========================>] - Finished 100 % of 100 %
Download finished
"""
pb1 = ProgressBar(100, '%', pre='Downloading file', post='Download finished', length=25)
pb1.print_progress() # Prints the initial empty progress bar
for mb in range(1, 101):
pb1.print_progress(mb)
sleep(0.15)
def ex2():
"""
**Example 2**
::
pb2 = ProgressBar(500, 'MB', pre='Downloading file', post='Download finished', head='#')
pb2.print_progress() # Prints the initial empty progress bar``
for mb in range(1, 501):
pb2.print_progress(mb)
sleep(0.02)
**Output:**
::
Downloading file
[=================================================#] - Finished 500 MB of 500 MB
Download finished
"""
pb2 = ProgressBar(500, 'MB', pre='Downloading file', post='Download finished', head='#')
pb2.print_progress() # Prints the initial empty progress bar
for mb in range(1, 501):
pb2.print_progress(mb)
sleep(0.02)
def ex3():
"""
**Example 3**
::
pb3 = ProgressBar(1000.12, 'MB', pre='Downloading file', post='Download finished', length=100)
pb3.print_progress() # Prints the initial empty progress bar
for mb in range(1, 1001):
if mb != 1000 and mb % 2 == 0:
mb = mb + 0.5
elif mb != 1000:
mb = mb + 0.25
else:
mb = mb + 0.12
pb3.print_progress(mb)
sleep(0.025)
**Output:**
::
Downloading file
[===================================================================================================>] - Finished 1000.12 MB of 1000.12 MB
Download finished
"""
pb3 = ProgressBar(1000.12, 'MB', pre='Downloading file', post='Download finished', length=100)
pb3.print_progress() # Prints the initial empty progress bar
for mb in range(1, 1001):
if mb != 1000 and mb % 2 == 0:
mb = mb + 0.5
elif mb != 1000:
mb = mb + 0.25
else:
mb = mb + 0.12
pb3.print_progress(mb)
sleep(0.025)
def ex4():
pb4 = ProgressBar(5, 'files', pre='Deleting files', post='Finished!', length=25, empty='*', fill='#')
pb4.print_progress() # Prints the initial empty progress bar
for file in range(1, 6):
pb4.print_progress(file, pre="Deleting file file{}.txt".format(file))
sleep(1)
def ex5():
with open('example.txt', 'r') as f:
pb5 = ProgressBar(len(f.readlines()), 'lines', pre="Reading lines from file {}".format(f.name), post='Finished reading file!')
f.seek(0) # Return to start of line after obtaining line count
pb5.print_progress() # Prints the initial empty progress bar
for lineno, line in enumerate(f, start=1):
pb5.print_progress(lineno, pre=line.replace('\n', ''))
sleep(1)
if __name__ == '__main__':
main()
| 27.335878
| 146
| 0.527506
|
from time import sleep
from ProgressPrinter import ProgressBar
def main():
ex1()
ex2()
ex3()
ex4()
ex5()
def ex1():
pb1 = ProgressBar(100, '%', pre='Downloading file', post='Download finished', length=25)
pb1.print_progress()
for mb in range(1, 101):
pb1.print_progress(mb)
sleep(0.15)
def ex2():
pb2 = ProgressBar(500, 'MB', pre='Downloading file', post='Download finished', head='#')
pb2.print_progress()
for mb in range(1, 501):
pb2.print_progress(mb)
sleep(0.02)
def ex3():
pb3 = ProgressBar(1000.12, 'MB', pre='Downloading file', post='Download finished', length=100)
pb3.print_progress()
for mb in range(1, 1001):
if mb != 1000 and mb % 2 == 0:
mb = mb + 0.5
elif mb != 1000:
mb = mb + 0.25
else:
mb = mb + 0.12
pb3.print_progress(mb)
sleep(0.025)
def ex4():
pb4 = ProgressBar(5, 'files', pre='Deleting files', post='Finished!', length=25, empty='*', fill='#')
pb4.print_progress()
for file in range(1, 6):
pb4.print_progress(file, pre="Deleting file file{}.txt".format(file))
sleep(1)
def ex5():
with open('example.txt', 'r') as f:
pb5 = ProgressBar(len(f.readlines()), 'lines', pre="Reading lines from file {}".format(f.name), post='Finished reading file!')
f.seek(0)
pb5.print_progress()
for lineno, line in enumerate(f, start=1):
pb5.print_progress(lineno, pre=line.replace('\n', ''))
sleep(1)
if __name__ == '__main__':
main()
| true
| true
|
1c44e4c3291a1e3a80c99161ea4d923297721848
| 415
|
py
|
Python
|
grakel/kernels/_isomorphism/__init__.py
|
vishalbelsare/GraKeL
|
1330c7ee4e66e6b273bcad83fff5be5df3230128
|
[
"BSD-3-Clause"
] | null | null | null |
grakel/kernels/_isomorphism/__init__.py
|
vishalbelsare/GraKeL
|
1330c7ee4e66e6b273bcad83fff5be5df3230128
|
[
"BSD-3-Clause"
] | null | null | null |
grakel/kernels/_isomorphism/__init__.py
|
vishalbelsare/GraKeL
|
1330c7ee4e66e6b273bcad83fff5be5df3230128
|
[
"BSD-3-Clause"
] | null | null | null |
"""Init file for the _isomorphism submodule project."""
# Author: Ioannis Siglidis <y.siglidis@gmail.com>
# This file is a modification and extension of the [GNU LPGL] licensed
# PyBliss which can be found at: http://www.tcs.hut.fi/Software/bliss/
# PyBliss and Bliss are copyright of their respective owners.
# License: BSD 3 clause"
from grakel.kernels._isomorphism.bliss import Graph
__all__ = [
"Graph",
]
| 34.583333
| 70
| 0.749398
|
from grakel.kernels._isomorphism.bliss import Graph
__all__ = [
"Graph",
]
| true
| true
|
1c44e4e779a113971e0da7c7ed64813dda5530f5
| 11,232
|
py
|
Python
|
custom_components/hacs/hacsbase/hacs.py
|
Lucstricke/integration
|
1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5
|
[
"MIT"
] | 1
|
2021-12-12T18:19:48.000Z
|
2021-12-12T18:19:48.000Z
|
custom_components/hacs/hacsbase/hacs.py
|
Lucstricke/integration
|
1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5
|
[
"MIT"
] | null | null | null |
custom_components/hacs/hacsbase/hacs.py
|
Lucstricke/integration
|
1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5
|
[
"MIT"
] | null | null | null |
"""Initialize the HACS base."""
from datetime import timedelta
from aiogithubapi import GitHubException
from aiogithubapi.exceptions import GitHubNotModifiedException
from custom_components.hacs.helpers import HacsHelpers
from custom_components.hacs.helpers.functions.get_list_from_default import (
async_get_list_from_default,
)
from custom_components.hacs.helpers.functions.register_repository import (
register_repository,
)
from custom_components.hacs.helpers.functions.store import (
async_load_from_store,
async_save_to_store,
)
from custom_components.hacs.share import (
get_removed,
is_removed,
list_removed_repositories,
)
from ..base import HacsBase
from ..enums import HacsCategory, HacsStage
from ..exceptions import HacsExecutionStillInProgress
from ..share import get_factory, get_queue
from ..utils.queue_manager import QueueManager
class Hacs(HacsBase, HacsHelpers):
"""The base class of HACS, nested throughout the project."""
factory = get_factory()
queue = get_queue()
def async_set_repository_id(self, repository, repo_id):
"""Update a repository id."""
existing_repo_id = str(repository.data.id)
if existing_repo_id == repo_id:
return
if existing_repo_id != "0":
raise ValueError(
f"The repo id for {repository.data.full_name_lower} is already set to {existing_repo_id}"
)
repository.data.id = repo_id
self.repositories.register(repository)
@property
def sorted_by_name(self):
"""Return a sorted(by name) list of repository objects."""
return sorted(self.repositories.list_all, key=lambda x: x.display_name)
@property
def sorted_by_repository_name(self):
"""Return a sorted(by repository_name) list of repository objects."""
return sorted(self.repositories.list_all, key=lambda x: x.data.full_name)
async def register_repository(self, full_name, category, check=True):
"""Register a repository."""
await register_repository(full_name, category, check=check)
async def startup_tasks(self, _event=None):
"""Tasks that are started after startup."""
await self.async_set_stage(HacsStage.STARTUP)
self.status.background_task = True
self.hass.bus.async_fire("hacs/status", {})
await self.handle_critical_repositories_startup()
await self.async_load_default_repositories()
await self.clear_out_removed_repositories()
self.recuring_tasks.append(
self.hass.helpers.event.async_track_time_interval(
self.recurring_tasks_installed, timedelta(hours=2)
)
)
self.recuring_tasks.append(
self.hass.helpers.event.async_track_time_interval(
self.recurring_tasks_all, timedelta(hours=25)
)
)
self.recuring_tasks.append(
self.hass.helpers.event.async_track_time_interval(
self.prosess_queue, timedelta(minutes=10)
)
)
self.hass.bus.async_fire("hacs/reload", {"force": True})
await self.recurring_tasks_installed()
await self.prosess_queue()
self.status.startup = False
self.status.background_task = False
self.hass.bus.async_fire("hacs/status", {})
await self.async_set_stage(HacsStage.RUNNING)
async def handle_critical_repositories_startup(self):
"""Handled critical repositories during startup."""
alert = False
critical = await async_load_from_store(self.hass, "critical")
if not critical:
return
for repo in critical:
if not repo["acknowledged"]:
alert = True
if alert:
self.log.critical("URGENT!: Check the HACS panel!")
self.hass.components.persistent_notification.create(
title="URGENT!", message="**Check the HACS panel!**"
)
async def handle_critical_repositories(self):
"""Handled critical repositories during runtime."""
# Get critical repositories
critical_queue = QueueManager()
instored = []
critical = []
was_installed = False
try:
critical = await self.async_github_get_hacs_default_file("critical")
except GitHubNotModifiedException:
return
except GitHubException:
pass
if not critical:
self.log.debug("No critical repositories")
return
stored_critical = await async_load_from_store(self.hass, "critical")
for stored in stored_critical or []:
instored.append(stored["repository"])
stored_critical = []
for repository in critical:
removed_repo = get_removed(repository["repository"])
removed_repo.removal_type = "critical"
repo = self.repositories.get_by_full_name(repository["repository"])
stored = {
"repository": repository["repository"],
"reason": repository["reason"],
"link": repository["link"],
"acknowledged": True,
}
if repository["repository"] not in instored:
if repo is not None and repo.installed:
self.log.critical(
"Removing repository %s, it is marked as critical",
repository["repository"],
)
was_installed = True
stored["acknowledged"] = False
# Remove from HACS
critical_queue.add(repository.uninstall())
repo.remove()
stored_critical.append(stored)
removed_repo.update_data(stored)
# Uninstall
await critical_queue.execute()
# Save to FS
await async_save_to_store(self.hass, "critical", stored_critical)
# Restart HASS
if was_installed:
self.log.critical("Resarting Home Assistant")
self.hass.async_create_task(self.hass.async_stop(100))
async def prosess_queue(self, _notarealarg=None):
"""Recurring tasks for installed repositories."""
if not self.queue.has_pending_tasks:
self.log.debug("Nothing in the queue")
return
if self.queue.running:
self.log.debug("Queue is already running")
return
can_update = await self.async_can_update()
self.log.debug(
"Can update %s repositories, items in queue %s",
can_update,
self.queue.pending_tasks,
)
if can_update != 0:
self.status.background_task = True
self.hass.bus.async_fire("hacs/status", {})
try:
await self.queue.execute(can_update)
except HacsExecutionStillInProgress:
pass
self.status.background_task = False
self.hass.bus.async_fire("hacs/status", {})
async def recurring_tasks_installed(self, _notarealarg=None):
"""Recurring tasks for installed repositories."""
self.log.debug("Starting recurring background task for installed repositories")
self.status.background_task = True
self.hass.bus.async_fire("hacs/status", {})
for repository in self.repositories.list_all:
if self.status.startup and repository.data.full_name == "hacs/integration":
continue
if repository.data.installed and repository.data.category in self.common.categories:
self.queue.add(self.factory.safe_update(repository))
await self.handle_critical_repositories()
self.status.background_task = False
self.hass.bus.async_fire("hacs/status", {})
await self.data.async_write()
self.log.debug("Recurring background task for installed repositories done")
async def recurring_tasks_all(self, _notarealarg=None):
"""Recurring tasks for all repositories."""
self.log.debug("Starting recurring background task for all repositories")
self.status.background_task = True
self.hass.bus.async_fire("hacs/status", {})
for repository in self.repositories.list_all:
if repository.data.category in self.common.categories:
self.queue.add(self.factory.safe_common_update(repository))
await self.async_load_default_repositories()
await self.clear_out_removed_repositories()
self.status.background_task = False
await self.data.async_write()
self.hass.bus.async_fire("hacs/status", {})
self.hass.bus.async_fire("hacs/repository", {"action": "reload"})
self.log.debug("Recurring background task for all repositories done")
async def clear_out_removed_repositories(self):
"""Clear out blaclisted repositories."""
need_to_save = False
for removed in list_removed_repositories():
repository = self.repositories.get_by_full_name(removed.repository)
if repository is not None:
if repository.data.installed and removed.removal_type != "critical":
self.log.warning(
f"You have {repository.data.full_name} installed with HACS "
+ "this repository has been removed, please consider removing it. "
+ f"Removal reason ({removed.removal_type})"
)
else:
need_to_save = True
repository.remove()
if need_to_save:
await self.data.async_write()
async def async_load_default_repositories(self):
"""Load known repositories."""
self.log.info("Loading known repositories")
for item in await async_get_list_from_default(HacsCategory.REMOVED):
removed = get_removed(item["repository"])
removed.reason = item.get("reason")
removed.link = item.get("link")
removed.removal_type = item.get("removal_type")
for category in self.common.categories or []:
self.queue.add(self.async_get_category_repositories(HacsCategory(category)))
await self.prosess_queue()
async def async_get_category_repositories(self, category: HacsCategory):
"""Get repositories from category."""
repositories = await async_get_list_from_default(category)
for repo in repositories:
if self.common.renamed_repositories.get(repo):
repo = self.common.renamed_repositories[repo]
if is_removed(repo):
continue
if repo in self.common.archived_repositories:
continue
repository = self.repositories.get_by_full_name(repo)
if repository is not None:
if str(repository.data.id) not in self.common.default:
self.common.default.append(str(repository.data.id))
else:
continue
continue
self.queue.add(self.factory.safe_register(repo, category))
| 38.597938
| 105
| 0.631855
|
from datetime import timedelta
from aiogithubapi import GitHubException
from aiogithubapi.exceptions import GitHubNotModifiedException
from custom_components.hacs.helpers import HacsHelpers
from custom_components.hacs.helpers.functions.get_list_from_default import (
async_get_list_from_default,
)
from custom_components.hacs.helpers.functions.register_repository import (
register_repository,
)
from custom_components.hacs.helpers.functions.store import (
async_load_from_store,
async_save_to_store,
)
from custom_components.hacs.share import (
get_removed,
is_removed,
list_removed_repositories,
)
from ..base import HacsBase
from ..enums import HacsCategory, HacsStage
from ..exceptions import HacsExecutionStillInProgress
from ..share import get_factory, get_queue
from ..utils.queue_manager import QueueManager
class Hacs(HacsBase, HacsHelpers):
factory = get_factory()
queue = get_queue()
def async_set_repository_id(self, repository, repo_id):
existing_repo_id = str(repository.data.id)
if existing_repo_id == repo_id:
return
if existing_repo_id != "0":
raise ValueError(
f"The repo id for {repository.data.full_name_lower} is already set to {existing_repo_id}"
)
repository.data.id = repo_id
self.repositories.register(repository)
@property
def sorted_by_name(self):
return sorted(self.repositories.list_all, key=lambda x: x.display_name)
@property
def sorted_by_repository_name(self):
return sorted(self.repositories.list_all, key=lambda x: x.data.full_name)
async def register_repository(self, full_name, category, check=True):
await register_repository(full_name, category, check=check)
async def startup_tasks(self, _event=None):
await self.async_set_stage(HacsStage.STARTUP)
self.status.background_task = True
self.hass.bus.async_fire("hacs/status", {})
await self.handle_critical_repositories_startup()
await self.async_load_default_repositories()
await self.clear_out_removed_repositories()
self.recuring_tasks.append(
self.hass.helpers.event.async_track_time_interval(
self.recurring_tasks_installed, timedelta(hours=2)
)
)
self.recuring_tasks.append(
self.hass.helpers.event.async_track_time_interval(
self.recurring_tasks_all, timedelta(hours=25)
)
)
self.recuring_tasks.append(
self.hass.helpers.event.async_track_time_interval(
self.prosess_queue, timedelta(minutes=10)
)
)
self.hass.bus.async_fire("hacs/reload", {"force": True})
await self.recurring_tasks_installed()
await self.prosess_queue()
self.status.startup = False
self.status.background_task = False
self.hass.bus.async_fire("hacs/status", {})
await self.async_set_stage(HacsStage.RUNNING)
async def handle_critical_repositories_startup(self):
alert = False
critical = await async_load_from_store(self.hass, "critical")
if not critical:
return
for repo in critical:
if not repo["acknowledged"]:
alert = True
if alert:
self.log.critical("URGENT!: Check the HACS panel!")
self.hass.components.persistent_notification.create(
title="URGENT!", message="**Check the HACS panel!**"
)
async def handle_critical_repositories(self):
critical_queue = QueueManager()
instored = []
critical = []
was_installed = False
try:
critical = await self.async_github_get_hacs_default_file("critical")
except GitHubNotModifiedException:
return
except GitHubException:
pass
if not critical:
self.log.debug("No critical repositories")
return
stored_critical = await async_load_from_store(self.hass, "critical")
for stored in stored_critical or []:
instored.append(stored["repository"])
stored_critical = []
for repository in critical:
removed_repo = get_removed(repository["repository"])
removed_repo.removal_type = "critical"
repo = self.repositories.get_by_full_name(repository["repository"])
stored = {
"repository": repository["repository"],
"reason": repository["reason"],
"link": repository["link"],
"acknowledged": True,
}
if repository["repository"] not in instored:
if repo is not None and repo.installed:
self.log.critical(
"Removing repository %s, it is marked as critical",
repository["repository"],
)
was_installed = True
stored["acknowledged"] = False
critical_queue.add(repository.uninstall())
repo.remove()
stored_critical.append(stored)
removed_repo.update_data(stored)
await critical_queue.execute()
await async_save_to_store(self.hass, "critical", stored_critical)
if was_installed:
self.log.critical("Resarting Home Assistant")
self.hass.async_create_task(self.hass.async_stop(100))
async def prosess_queue(self, _notarealarg=None):
if not self.queue.has_pending_tasks:
self.log.debug("Nothing in the queue")
return
if self.queue.running:
self.log.debug("Queue is already running")
return
can_update = await self.async_can_update()
self.log.debug(
"Can update %s repositories, items in queue %s",
can_update,
self.queue.pending_tasks,
)
if can_update != 0:
self.status.background_task = True
self.hass.bus.async_fire("hacs/status", {})
try:
await self.queue.execute(can_update)
except HacsExecutionStillInProgress:
pass
self.status.background_task = False
self.hass.bus.async_fire("hacs/status", {})
async def recurring_tasks_installed(self, _notarealarg=None):
self.log.debug("Starting recurring background task for installed repositories")
self.status.background_task = True
self.hass.bus.async_fire("hacs/status", {})
for repository in self.repositories.list_all:
if self.status.startup and repository.data.full_name == "hacs/integration":
continue
if repository.data.installed and repository.data.category in self.common.categories:
self.queue.add(self.factory.safe_update(repository))
await self.handle_critical_repositories()
self.status.background_task = False
self.hass.bus.async_fire("hacs/status", {})
await self.data.async_write()
self.log.debug("Recurring background task for installed repositories done")
async def recurring_tasks_all(self, _notarealarg=None):
self.log.debug("Starting recurring background task for all repositories")
self.status.background_task = True
self.hass.bus.async_fire("hacs/status", {})
for repository in self.repositories.list_all:
if repository.data.category in self.common.categories:
self.queue.add(self.factory.safe_common_update(repository))
await self.async_load_default_repositories()
await self.clear_out_removed_repositories()
self.status.background_task = False
await self.data.async_write()
self.hass.bus.async_fire("hacs/status", {})
self.hass.bus.async_fire("hacs/repository", {"action": "reload"})
self.log.debug("Recurring background task for all repositories done")
async def clear_out_removed_repositories(self):
need_to_save = False
for removed in list_removed_repositories():
repository = self.repositories.get_by_full_name(removed.repository)
if repository is not None:
if repository.data.installed and removed.removal_type != "critical":
self.log.warning(
f"You have {repository.data.full_name} installed with HACS "
+ "this repository has been removed, please consider removing it. "
+ f"Removal reason ({removed.removal_type})"
)
else:
need_to_save = True
repository.remove()
if need_to_save:
await self.data.async_write()
async def async_load_default_repositories(self):
self.log.info("Loading known repositories")
for item in await async_get_list_from_default(HacsCategory.REMOVED):
removed = get_removed(item["repository"])
removed.reason = item.get("reason")
removed.link = item.get("link")
removed.removal_type = item.get("removal_type")
for category in self.common.categories or []:
self.queue.add(self.async_get_category_repositories(HacsCategory(category)))
await self.prosess_queue()
async def async_get_category_repositories(self, category: HacsCategory):
repositories = await async_get_list_from_default(category)
for repo in repositories:
if self.common.renamed_repositories.get(repo):
repo = self.common.renamed_repositories[repo]
if is_removed(repo):
continue
if repo in self.common.archived_repositories:
continue
repository = self.repositories.get_by_full_name(repo)
if repository is not None:
if str(repository.data.id) not in self.common.default:
self.common.default.append(str(repository.data.id))
else:
continue
continue
self.queue.add(self.factory.safe_register(repo, category))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.