hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794ab32b5932561453d6667cdf03981f5834b496
| 54
|
py
|
Python
|
bin/programmanager.py
|
pyDarkVOS/InarkOS
|
21b57a7f0e5a06d276e18e25ac8c30232eb0d8de
|
[
"Apache-2.0"
] | 3
|
2022-02-20T17:22:18.000Z
|
2022-03-20T02:56:07.000Z
|
bin/programmanager.py
|
InarkVOS/InarkOS
|
21b57a7f0e5a06d276e18e25ac8c30232eb0d8de
|
[
"Apache-2.0"
] | null | null | null |
bin/programmanager.py
|
InarkVOS/InarkOS
|
21b57a7f0e5a06d276e18e25ac8c30232eb0d8de
|
[
"Apache-2.0"
] | 2
|
2022-03-11T13:36:37.000Z
|
2022-03-18T23:47:20.000Z
|
import os
def run(p):
os.system(f"python3 {p}")
| 10.8
| 27
| 0.592593
|
794ab40206460f242570eab47b2398553de8af36
| 7,985
|
py
|
Python
|
Collections-a-installer/community-general-2.4.0/plugins/modules/remote_management/redfish/idrac_redfish_facts.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/remote_management/redfish/idrac_redfish_facts.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/remote_management/redfish/idrac_redfish_facts.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: idrac_redfish_info
short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote iDRAC controllers to
get information back.
- For use with Dell EMC iDRAC operations that require Redfish OEM extensions
- This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
options:
category:
required: true
description:
- Category to execute on iDRAC controller
type: str
command:
required: true
description:
- List of commands to execute on iDRAC controller
- C(GetManagerAttributes) returns the list of dicts containing iDRAC,
LifecycleController and System attributes
type: list
baseuri:
required: true
description:
- Base URI of iDRAC controller
type: str
username:
required: true
description:
- User for authentication with iDRAC controller
type: str
password:
required: true
description:
- Password for authentication with iDRAC controller
type: str
timeout:
description:
- Timeout in seconds for URL requests to OOB controller
default: 10
type: int
author: "Jose Delarosa (@jose-delarosa)"
'''
EXAMPLES = '''
- name: Get Manager attributes with a default of 20 seconds
community.general.idrac_redfish_info:
category: Manager
command: GetManagerAttributes
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
timeout: 20
register: result
# Examples to display the value of all or a single iDRAC attribute
- name: Store iDRAC attributes as a fact variable
ansible.builtin.set_fact:
idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
- name: Display all iDRAC attributes
ansible.builtin.debug:
var: idrac_attributes
- name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
ansible.builtin.debug:
var: idrac_attributes['Syslog.1.SysLogEnable']
# Examples to display the value of all or a single LifecycleController attribute
- name: Store LifecycleController attributes as a fact variable
ansible.builtin.set_fact:
lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
- name: Display LifecycleController attributes
ansible.builtin.debug:
var: lc_attributes
- name: Display the value of 'CollectSystemInventoryOnRestart' attribute
ansible.builtin.debug:
var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
# Examples to display the value of all or a single System attribute
- name: Store System attributes as a fact variable
ansible.builtin.set_fact:
system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
- name: Display System attributes
ansible.builtin.debug:
var: system_attributes
- name: Display the value of 'PSRedPolicy'
ansible.builtin.debug:
var: system_attributes['ServerPwr.1.PSRedPolicy']
'''
RETURN = '''
msg:
description: different results depending on task
returned: always
type: dict
sample: List of Manager attributes
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
from ansible.module_utils._text import to_native
class IdracRedfishUtils(RedfishUtils):
def get_manager_attributes(self):
result = {}
manager_attributes = []
properties = ['Attributes', 'Id']
response = self.get_request(self.root_uri + self.manager_uri)
if response['ret'] is False:
return response
data = response['data']
# Manager attributes are supported as part of iDRAC OEM extension
# Attributes are supported only on iDRAC9
try:
for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
attributes_uri = members[u'@odata.id']
response = self.get_request(self.root_uri + attributes_uri)
if response['ret'] is False:
return response
data = response['data']
attributes = {}
for prop in properties:
if prop in data:
attributes[prop] = data.get(prop)
if attributes:
manager_attributes.append(attributes)
result['ret'] = True
except (AttributeError, KeyError) as e:
result['ret'] = False
result['msg'] = "Failed to find attribute/key: " + str(e)
result["entries"] = manager_attributes
return result
CATEGORY_COMMANDS_ALL = {
"Manager": ["GetManagerAttributes"]
}
def main():
result = {}
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True),
command=dict(required=True, type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
if is_old_facts:
module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
"and the renamed one no longer returns ansible_facts",
version='3.0.0', collection_name='community.general') # was Ansible 2.13
category = module.params['category']
command_list = module.params['command']
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
# Check that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
# Organize by Categories / Commands
if category == "Manager":
# execute only if we find a Manager resource
result = rf_utils._find_managers_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "GetManagerAttributes":
result = rf_utils.get_manager_attributes()
# Return data back or fail with proper message
if result['ret'] is True:
del result['ret']
if is_old_facts:
module.exit_json(ansible_facts=dict(redfish_facts=result))
else:
module.exit_json(redfish_facts=result)
else:
module.fail_json(msg=to_native(result['msg']))
if __name__ == '__main__':
main()
| 33.691983
| 156
| 0.665373
|
794ab4049b71b24e9ef1ef43cc06677b29a78388
| 2,995
|
py
|
Python
|
scripts/p600/main_p600_low_res.py
|
sakurakhadag/escp2-client
|
f8d58bdaedc4f7ca811769538586b759c37eb355
|
[
"MIT"
] | null | null | null |
scripts/p600/main_p600_low_res.py
|
sakurakhadag/escp2-client
|
f8d58bdaedc4f7ca811769538586b759c37eb355
|
[
"MIT"
] | 5
|
2019-10-10T13:53:48.000Z
|
2019-10-16T19:09:28.000Z
|
scripts/p600/main_p600_low_res.py
|
sakurakhadag/escp2-client
|
f8d58bdaedc4f7ca811769538586b759c37eb355
|
[
"MIT"
] | 2
|
2019-10-11T17:56:31.000Z
|
2021-01-15T11:33:58.000Z
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..')))
import binascii
import math
from hex_functions import *
from esc_functions import *
from characters import *
import numpy as np
# cd to project base directory
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(os.path.join(dname, '..', '..'))
# SPECIFY FILENAME, PRINTERNAME AND OUTPUTFOLDER
filename = 'test_p600_low_res'
# one of the printers for which the header and footer files are available in
# the 'prns' folder
printer = 'p600'
outputfolder = 'output'
# SET PARAMETERS
# These parameters depend on the specific printer
# printer units can be found by parsing the prn file
# Same with color codes, print a file with all colors and parse it
# other specs can be found by looking in spec sheet or service manual (if
# available)
# Shown parameters should work with R2400 / P600 / R3000
# unit parameters
pmgmt = 720
vert = 720
hor = 720
mbase = 2880
nozzles = 180
# set nozzle row numbers (def black = 00)
# Should work with R2400 and P600
black = b'\x00'
lightBlack = b'\x10'
lightLightBlack = b'\x30'
cyan = b'\x02'
lightCyan = b'\x12'
magenta = b'\x01'
lightMagenta = b'\x11'
yellow = b'\x04'
# select dot size
d = b'\x10'
# set page method ID
esc_m = ESC_m(b'\x20')
# set uni or bi directional mode
unim = b'\x00' # 01 uni, 00 bi
# CREATE THE RASTERDATA
# initialize empty byte string containing the rasterdata
raster = b''
# location of raster (in inches)
x = 1 # one inch from left edge of paper
y = 1 # one inch from top edge of paper
# Create the matrix
# width of the matrix (number of droplets in printhead travel direction)
width = 100
matrix = np.zeros((nozzles, width)) # init the matrix as all zeros
# set all rows of the matrix to 3's (large droplets), except for the last 2
# rows
matrix[0:58, :] = 3
# Create the raster,
# First set the x position of the printhead,
# Print the matrix
raster += ESC_dollar(hor, x) + ESC_i_matrix(black, matrix, spacing=0, fan=0)
# First set the vertical position on the paper, then print the raster as
# composed in the previous step, add a linefeed
rasterdata = ESC_v(pmgmt, y) + raster + b'\x0c'
# LOAD HEADER AND FOOTER FOR SELECTED PRINTER
header = load_prn_file('prns/' + printer + '/' + printer + '-header.prn')
footer = load_prn_file('prns/' + printer + '/' + printer + '-footer.prn')
# COMPOSE BODY
body = ESC_Graph() + ESC_Units(pmgmt, vert, hor, mbase) + ESC_Kmode() + \
ESC_imode(n=b'\x00') + ESC_Umode(unim) + ESC_edot(d) + \
ESC_Dras(v=240 / 3, h=120 / 3) + ESC_C(pmgmt) + ESC_c(pmgmt) + ESC_S(pmgmt) # + esc_m
# COMBINE
total = header + body + rasterdata + footer
# CREATE OUTPUT DIR
filename = outputfolder + '/' + filename + '.prn'
# if not os.path.exists(outputfolder):
# os.makedirs(outputfolder)
# SAVE PRN FILE
save_prn_file(total, filename)
print('DONE!')
print('path: ' + filename)
| 27.990654
| 90
| 0.691152
|
794ab41008d0ac086e0964a2a75758193146f1df
| 3,699
|
py
|
Python
|
lib/galaxy/managers/taggable.py
|
KyleL1998/galaxy
|
10be2cd8ac05680f8291eea7996f4d3fc76197de
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/managers/taggable.py
|
KyleL1998/galaxy
|
10be2cd8ac05680f8291eea7996f4d3fc76197de
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/managers/taggable.py
|
KyleL1998/galaxy
|
10be2cd8ac05680f8291eea7996f4d3fc76197de
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Mixins for Taggable model managers and serializers.
"""
# from galaxy import exceptions as galaxy_exceptions
import logging
from galaxy.util import unicodify
log = logging.getLogger(__name__)
# TODO: work out the relation between serializers and managers and then fold these into the parent of the two
def _tag_str_gen(item):
# TODO: which user is this? all?
for tag in item.tags:
tag_str = tag.user_tname
if tag.value is not None:
tag_str += ":" + tag.user_value
yield tag_str
def _tags_to_strings(item):
if not hasattr(item, 'tags'):
return None
return sorted(list(_tag_str_gen(item)))
def _tags_from_strings(item, tag_handler, new_tags_list, user=None):
# TODO: have to assume trans.user here...
if not user:
# raise galaxy_exceptions.RequestParameterMissingException( 'User required for tags on ' + str( item ) )
# TODO: this becomes a 'silent failure' - no tags are set. This is a questionable approach but
# I haven't found a better one for anon users copying items with tags
return
# TODO: duped from tags manager - de-dupe when moved to taggable mixin
tag_handler.delete_item_tags(user, item)
new_tags_str = ','.join(new_tags_list)
tag_handler.apply_item_tags(user, item, unicodify(new_tags_str, 'utf-8'))
# TODO:!! does the creation of new_tags_list mean there are now more and more unused tag rows in the db?
class TaggableManagerMixin(object):
#: class of TagAssociation (e.g. HistoryTagAssociation)
tag_assoc = None
# TODO: most of this can be done by delegating to the GalaxyTagHandler?
def get_tags(self, item):
"""
Return a list of tag strings.
"""
return _tags_to_strings(item)
def set_tags(self, item, new_tags, user=None):
"""
Set an `item`'s tags from a list of strings.
"""
return _tags_from_strings(item, self.app.tag_handler, new_tags, user=user)
# def tags_by_user( self, user, **kwargs ):
# TODO: here or GalaxyTagHandler
# pass
class TaggableSerializerMixin(object):
def add_serializers(self):
self.serializers['tags'] = self.serialize_tags
def serialize_tags(self, item, key, **context):
"""
Return tags as a list of strings.
"""
return _tags_to_strings(item)
class TaggableDeserializerMixin(object):
def add_deserializers(self):
self.deserializers['tags'] = self.deserialize_tags
def deserialize_tags(self, item, key, val, user=None, **context):
"""
Make sure `val` is a valid list of tag strings and assign them.
Note: this will erase any previous tags.
"""
new_tags_list = self.validate.basestring_list(key, val)
_tags_from_strings(item, self.app.tag_handler, new_tags_list, user=user)
return item.tags
class TaggableFilterMixin(object):
def filter_has_partial_tag(self, item, val):
"""
Return True if any tag partially contains `val`.
"""
for tag_str in _tag_str_gen(item):
if val in tag_str:
return True
return False
def filter_has_tag(self, item, val):
"""
Return True if any tag exactly equals `val`.
"""
for tag_str in _tag_str_gen(item):
if val == tag_str:
return True
return False
def _add_parsers(self):
self.fn_filter_parsers.update({
'tag': {
'op': {
'eq' : self.filter_has_tag,
'has' : self.filter_has_partial_tag,
}
}
})
| 30.073171
| 112
| 0.633685
|
794ab6d682d48d1fd11d8eb4e7b620152d9e3259
| 514
|
py
|
Python
|
load.py
|
TranslatorIIPrototypes/NodeNormalization
|
52461e0940e618984452b3fdf8cc13698ec71390
|
[
"MIT"
] | 2
|
2021-01-12T19:34:38.000Z
|
2022-03-08T22:40:20.000Z
|
load.py
|
TranslatorSRI/NodeNormalization
|
3e8ca8440dd4bf831c901c048bc334661ca9aa5a
|
[
"MIT"
] | 71
|
2020-10-02T12:35:49.000Z
|
2022-03-28T20:58:46.000Z
|
load.py
|
TranslatorSRI/NodeNormalization
|
3e8ca8440dd4bf831c901c048bc334661ca9aa5a
|
[
"MIT"
] | 2
|
2020-10-06T16:01:50.000Z
|
2021-07-19T21:24:36.000Z
|
from node_normalizer.loader import NodeLoader
import asyncio
async def load_redis():
# instantiate the class that does all the work
loader = NodeLoader()
# call to load redis instances with normalized node data
success: bool = await loader.load(1_000)
# check the return
if not success:
loader.print_debug_msg(f'Failed to load node normalization data.', True)
else:
loader.print_debug_msg(f'Success', True)
if __name__ == '__main__':
asyncio.run(load_redis())
| 24.47619
| 80
| 0.70428
|
794ab75caaf75c071f6775f075f5c26ed0f8ddad
| 4,049
|
py
|
Python
|
src/robotide/application/releasenotes.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robotide/application/releasenotes.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robotide/application/releasenotes.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from wx.lib.ClickableHtmlWindow import PyClickableHtmlWindow
from robotide.version import VERSION
from robotide.pluginapi import ActionInfo
class ReleaseNotes(object):
"""Shows release notes of the current version.
The release notes tab will automatically be shown once per release.
The user can also view them on demand by selecting "Release Notes"
from the help menu.
"""
def __init__(self, application):
self.application = application
settings = application.settings
self.version_shown = settings.get('version_shown', '')
self._view = None
self.enable()
def enable(self):
self.application.frame.actions.register_action(ActionInfo('Help', 'Release Notes', self.show,
doc='Show the release notes'))
self.show_if_updated()
def show_if_updated(self):
if self.version_shown != VERSION:
self.show()
self.application.settings['version_shown'] = VERSION
def show(self, event=None):
if not self._view:
self._view = self._create_view()
self.application.frame.notebook.AddPage(self._view, "Release Notes", select=False)
self.application.frame.notebook.show_tab(self._view)
def bring_to_front(self):
if self._view:
self.application.frame.notebook.show_tab(self._view)
def _create_view(self):
panel = wx.Panel(self.application.frame.notebook)
html_win = PyClickableHtmlWindow(panel, -1)
html_win.SetStandardFonts()
html_win.SetPage(WELCOME_TEXT + RELEASE_NOTES)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(html_win, 1, wx.EXPAND|wx.ALL, border=8)
panel.SetSizer(sizer)
return panel
WELCOME_TEXT = """
<h2>Welcome to use RIDE version %s</h2>
<p>Thank you for using the Robot Framework IDE (RIDE).</p>
<p>Visit RIDE on the web:</p>
<ul>
<li><a href="https://github.com/robotframework/RIDE">
RIDE project page on github</a></li>
<li><a href="https://github.com/robotframework/RIDE/wiki/Installation-Instructions">
Installation instructions</a></li>
<li><a href="https://github.com/robotframework/RIDE/wiki/Release-notes">
Release notes</a></li>
</ul>
""" % VERSION
# *** DO NOT EDIT THE CODE BELOW MANUALLY ***
# Release notes are updated automatically by package.py script whenever
# a numbered distribution is created.
RELEASE_NOTES = """
<h2>Release notes for 1.2.3</h2>
<table border="1">
<tr>
<td><p><b>ID</b></p></td>
<td><p><b>Type</b></p></td>
<td><p><b>Priority</b></p></td>
<td><p><b>Summary</b></p></td>
</tr>
<tr>
<td><a href="http://code.google.com/p/robotframework-ride/issues/detail?id=1290">Issue 1290</a></td>
<td>Defect</td>
<td>Medium</td>
<td>RIDE runs not selected (with checkboxes) tests</td>
</tr>
<tr>
<td><a href="http://code.google.com/p/robotframework-ride/issues/detail?id=1306">Issue 1306</a></td>
<td>Defect</td>
<td>Medium</td>
<td>[RIDE 1.2.2 running on Python 2.7.5.] - Unable to insert cell in RIDE, if the TC contains FOR loop.</td>
</tr>
<tr>
<td><a href="http://code.google.com/p/robotframework-ride/issues/detail?id=1307">Issue 1307</a></td>
<td>Enhancement</td>
<td>Medium</td>
<td>Possibility to create new folder with right click</td>
</tr>
</table>
<p>Altogether 3 issues.</p>
"""
| 34.313559
| 108
| 0.663621
|
794aba662fded38d8df0dd71a2abc06dceceb757
| 106
|
py
|
Python
|
lbry/__init__.py
|
SNOmad1/lbry-sdk
|
268decd6556619625151c574890eba9f8dc2c90d
|
[
"MIT"
] | 2
|
2021-12-24T18:29:49.000Z
|
2021-12-26T02:04:57.000Z
|
lbry/__init__.py
|
SNOmad1/lbry-sdk
|
268decd6556619625151c574890eba9f8dc2c90d
|
[
"MIT"
] | null | null | null |
lbry/__init__.py
|
SNOmad1/lbry-sdk
|
268decd6556619625151c574890eba9f8dc2c90d
|
[
"MIT"
] | null | null | null |
__version__ = "0.102.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
| 35.333333
| 81
| 0.698113
|
794aba680c0c281a148a75220cc54e9ca3b83457
| 1,056
|
py
|
Python
|
scripts/examples/OpenMV/14-WiFi-Shield/http_client_ssl.py
|
jiskra/openmv
|
a0f321836f77f94d8118910598dcdb79eb784d58
|
[
"MIT"
] | 1,761
|
2015-07-10T23:14:17.000Z
|
2022-03-30T07:49:49.000Z
|
scripts/examples/OpenMV/14-WiFi-Shield/http_client_ssl.py
|
jiskra/openmv
|
a0f321836f77f94d8118910598dcdb79eb784d58
|
[
"MIT"
] | 487
|
2015-07-07T23:21:20.000Z
|
2022-03-30T17:13:22.000Z
|
scripts/examples/OpenMV/14-WiFi-Shield/http_client_ssl.py
|
jiskra/openmv
|
a0f321836f77f94d8118910598dcdb79eb784d58
|
[
"MIT"
] | 882
|
2015-08-01T08:34:19.000Z
|
2022-03-30T07:36:23.000Z
|
# Simple HTTPS client example.
import network, usocket, ussl
# AP info
SSID="" # Network SSID
KEY="" # Network key
PORT = 443
HOST = "www.google.com"
# Init wlan module and connect to network
print("Trying to connect... (may take a while)...")
wlan = network.WINC()
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
# We should have a valid IP now via DHCP
print(wlan.ifconfig())
# Get addr info via DNS
addr = usocket.getaddrinfo(HOST, PORT)[0][4]
print(addr)
# Create a new socket and connect to addr
client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
client.connect(addr)
# Set timeout
client.settimeout(3.0)
client = ussl.wrap_socket(client, server_hostname=HOST)
# Send HTTP request and recv response
request = "GET / HTTP/1.1\r\n"
request += "HOST: %s\r\n"
request += "User-Agent: Mozilla/5.0\r\n"
request += "Connection: keep-alive\r\n\r\n"
# Add more headers if needed.
client.write(request%(HOST)+"\r\n")
response = client.read(1024)
for l in response.split(b"\r\n"):
print(l.decode())
# Close socket
client.close()
| 21.55102
| 61
| 0.708333
|
794abb083955bc5d92be1f7fc8a92b93ac3b8745
| 3,994
|
py
|
Python
|
src/python/dxpy/templating/templates/python/parallelized/src/code.py
|
yesimon/dx-toolkit
|
c13a16d570a55bde7778d6db9268f5c3fca81d0f
|
[
"Apache-2.0"
] | null | null | null |
src/python/dxpy/templating/templates/python/parallelized/src/code.py
|
yesimon/dx-toolkit
|
c13a16d570a55bde7778d6db9268f5c3fca81d0f
|
[
"Apache-2.0"
] | null | null | null |
src/python/dxpy/templating/templates/python/parallelized/src/code.py
|
yesimon/dx-toolkit
|
c13a16d570a55bde7778d6db9268f5c3fca81d0f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# DX_APP_WIZARD_NAME DX_APP_WIZARD_VERSION
# Generated by dx-app-wizard.
#
# Parallelized execution pattern: Your app will generate multiple jobs
# to perform some computation in parallel, followed by a final
# "postprocess" stage that will perform any additional computations as
# necessary.
#
# See https://wiki.dnanexus.com/Developer-Portal for documentation and
# tutorials on how to modify this file.
#
# DNAnexus Python Bindings (dxpy) documentation:
# http://autodoc.dnanexus.com/bindings/python/current/
import os
import dxpy
@dxpy.entry_point("postprocess")
def postprocess(process_outputs):
# Change the following to process whatever input this stage
# receives. You may also want to copy and paste the logic to download
# and upload files here as well if this stage receives file input
# and/or makes file output.
for output in process_outputs:
pass
return { "answer": "placeholder value" }
@dxpy.entry_point("process")
def process(input1):
# Change the following to process whatever input this stage
# receives. You may also want to copy and paste the logic to download
# and upload files here as well if this stage receives file input
# and/or makes file output.
print(input1)
return { "output": "placeholder value" }
@dxpy.entry_point("main")
def main(DX_APP_WIZARD_INPUT_SIGNATURE):
DX_APP_WIZARD_INITIALIZE_INPUTDX_APP_WIZARD_DOWNLOAD_ANY_FILES
# Split your work into parallel tasks. As an example, the
# following generates 10 subjobs running with the same dummy
# input.
subjobs = []
for i in range(10):
subjob_input = { "input1": True }
subjobs.append(dxpy.new_dxjob(subjob_input, "process"))
# The following line creates the job that will perform the
# "postprocess" step of your app. We've given it an input field
# that is a list of job-based object references created from the
# "process" jobs we just created. Assuming those jobs have an
# output field called "output", these values will be passed to the
# "postprocess" job. Because these values are not ready until the
# "process" jobs finish, the "postprocess" job WILL NOT RUN until
# all job-based object references have been resolved (i.e. the
# jobs they reference have finished running).
#
# If you do not plan to have the "process" jobs create output that
# the "postprocess" job will require, then you can explicitly list
# the dependencies to wait for those jobs to finish by setting the
# "depends_on" field to the list of subjobs to wait for (it
# accepts either dxpy handlers or string IDs in the list). We've
# included this parameter in the line below as well for
# completeness, though it is unnecessary if you are providing
# job-based object references in the input that refer to the same
# set of jobs.
postprocess_job = dxpy.new_dxjob(fn_input={ "process_outputs": [subjob.get_output_ref("output") for subjob in subjobs] },
fn_name="postprocess",
depends_on=subjobs)
DX_APP_WIZARD_UPLOAD_ANY_FILES
# If you would like to include any of the output fields from the
# postprocess_job as the output of your app, you should return it
# here using a job-based object reference. If the output field in
# the postprocess function is called "answer", you can pass that
# on here as follows:
#
# return { "app_output_field": postprocess_job.get_output_ref("answer"), ...}
#
# Tip: you can include in your output at this point any open
# objects (such as files) which will be closed by a job that
# finishes later. The system will check to make sure that the
# output object is closed and will attempt to clone it out as
# output into the parent container only after all subjobs have
# finished.
output = {}
DX_APP_WIZARD_OUTPUT
return output
dxpy.run()
| 40.755102
| 125
| 0.710566
|
794abb215412ad2a470d84ea519053f7e0e39ef7
| 4,248
|
py
|
Python
|
src/transformers/models/electra/__init__.py
|
kct22aws/transformers
|
04cddaf402591e9f5bdb5f116a111d829a0ce4f4
|
[
"Apache-2.0"
] | 5
|
2020-10-30T13:07:02.000Z
|
2021-03-17T12:18:30.000Z
|
src/transformers/models/electra/__init__.py
|
guang7400613/transformers
|
28e091430eea9e0d40839e56fd0d57aec262f5f9
|
[
"Apache-2.0"
] | 1
|
2022-01-17T03:24:35.000Z
|
2022-01-17T03:24:35.000Z
|
src/transformers/models/electra/__init__.py
|
guang7400613/transformers
|
28e091430eea9e0d40839e56fd0d57aec262f5f9
|
[
"Apache-2.0"
] | 1
|
2022-02-08T19:37:39.000Z
|
2022-02-08T19:37:39.000Z
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
if is_tokenizers_available():
_import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"]
if is_torch_available():
_import_structure["modeling_electra"] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
if is_tf_available():
_import_structure["modeling_tf_electra"] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
if is_flax_available():
_import_structure["modeling_flax_electra"] = [
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig
from .tokenization_electra import ElectraTokenizer
if is_tokenizers_available():
from .tokenization_electra_fast import ElectraTokenizerFast
if is_torch_available():
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
if is_tf_available():
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
if is_flax_available():
from .modeling_flax_electra import (
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 34.258065
| 118
| 0.703154
|
794abb80a002be7f7bff494974c6d6c89f0ff17c
| 11,493
|
py
|
Python
|
tests/test_elements.py
|
gatling-nrl/scikit-fem
|
04730d80d612470b7e802eed4c21dd96b89cef61
|
[
"BSD-3-Clause"
] | 1
|
2019-12-07T15:28:13.000Z
|
2019-12-07T15:28:13.000Z
|
tests/test_elements.py
|
gatling-nrl/scikit-fem
|
04730d80d612470b7e802eed4c21dd96b89cef61
|
[
"BSD-3-Clause"
] | 3
|
2022-01-07T00:56:47.000Z
|
2022-01-12T20:06:34.000Z
|
tests/test_elements.py
|
gdmcbain/scikit-fem
|
73890816c2142385abf4a9ffcd8d233e2d25e865
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import TestCase, main
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
from skfem.element import (
ElementHex1,
ElementHexS2,
ElementLineP0,
ElementLineP1,
ElementLineP2,
ElementLinePp,
ElementLineMini,
ElementQuad0,
ElementQuad1,
ElementQuad2,
ElementQuadP,
ElementQuadRT0,
ElementQuadS2,
ElementTetMini,
ElementTetP0,
ElementTetP1,
ElementTetP2,
ElementTriMini,
ElementTriP0,
ElementTriP1,
ElementTriP2,
ElementTriP3,
ElementTriP4,
ElementTriRT0,
ElementVectorH1,
ElementHex2,
ElementQuadBFS,
ElementTriCR,
ElementTriCCR,
ElementTetCR,
ElementTetCCR,
ElementTriHermite,
ElementTriMorley,
ElementTriArgyris,
ElementTriDG,
ElementTetDG,
ElementQuadDG,
ElementQuadP,
ElementHexDG,
ElementWedge1,
)
from skfem.mesh import MeshHex, MeshLine, MeshQuad, MeshTet, MeshTri
from skfem.assembly import InteriorBasis, Functional
from skfem.mapping import MappingAffine
class TestNodality(TestCase):
"""Test for Element.doflocs."""
elems = [
ElementLineP0(),
ElementLineP1(),
ElementLineP2(),
ElementLinePp(1),
ElementLinePp(3),
ElementLineMini(),
ElementTriP0(),
ElementTriP1(),
ElementTriP2(),
ElementTriP3(),
ElementTriP4(),
ElementTriMini(),
ElementQuad0(),
ElementQuad1(),
ElementQuad2(),
ElementQuadS2(),
ElementQuadP(1),
ElementQuadP(3),
ElementTetP0(),
ElementTetP1(),
ElementTetP2(),
ElementTetMini(),
ElementHex1(),
ElementHexS2(),
ElementHex2(),
ElementTetCR(),
ElementTetCCR(),
ElementTriCR(),
ElementTriCCR(),
ElementWedge1(),
]
def runTest(self):
for e in self.elems:
N = e.doflocs.shape[0]
Ih = np.zeros((N, N))
for itr in range(N):
Ih[itr] = e.lbasis(e.doflocs.T, itr)[0]
# Remove nan-rows: test nodality only on non-nan doflocs.
#
# Some elements, such as ElementTriMini might have a combination
# of nodal dofs and non-nodal dofs.
#
# Nodal dof is defined so that there exists a point where the
# corresponding basis function is one, and other basis functions
# are zero. Non-nodal dof does not satisfy this property.
ix = np.isnan(np.sum(Ih, axis=1))
Nnan = np.sum(ix)
ixs = np.nonzero(~ix)[0]
Ih = Ih[ixs].T[ixs].T
assert_allclose(Ih, np.eye(N - Nnan), atol=1e-13,
err_msg="{}".format(type(e)))
class TestNodalityTriRT0(TestCase):
elem = ElementTriRT0()
def runTest(self):
e = self.elem
N = e.doflocs.shape[0]
Ih = np.zeros((N, N))
normals = np.array([[0., -1.],
[1 / np.sqrt(2), 1 / np.sqrt(2)],
[-1., 0.]]).T
for itr in range(N):
# calculate integral of normal component over edge
A = np.sum(e.lbasis(e.doflocs.T, itr)[0] * normals, axis=0)
n = np.array([1., np.sqrt(2), 1.])
Ih[itr] = A * n
assert_allclose(Ih, np.eye(N),
err_msg="{}".format(type(e)))
class TestNodalityQuadRT0(TestCase):
elem = ElementQuadRT0()
def runTest(self):
e = self.elem
N = e.doflocs.shape[0]
Ih = np.zeros((N, N))
normals = np.array([[0., -1.],
[1., 0.],
[0., 1.],
[-1., 0.]]).T
for itr in range(N):
# calculate integral of normal component over edge
A = np.sum(e.lbasis(e.doflocs.T, itr)[0] * normals, axis=0)
n = np.ones(4)
Ih[itr] = A * n
assert_allclose(Ih, np.eye(N),
err_msg="{}".format(type(e)))
class TestComposite(TestCase):
def runTest(self):
from skfem.element.element_composite import ElementComposite
self.check_equivalence(
ElementComposite(ElementTriP1(),
ElementTriP1()),
ElementVectorH1(ElementTriP1())
)
def check_equivalence(self, ec, ev):
X = np.array([[0.125, 0.1111], [0.0555, 0.6]])
m = MeshTri.init_refdom()
mapping = MappingAffine(m)
for k in range(6):
for i in [0, 1]:
# accessing i'th component looks slightly different
if ec.gbasis(mapping, X, k)[i].is_zero():
continue
assert_array_equal(
ev.gbasis(mapping, X, k)[0].value[i],
ec.gbasis(mapping, X, k)[i].value
)
for j in [0, 1]:
assert_array_equal(
ev.gbasis(mapping, X, k)[0].grad[i][j],
ec.gbasis(mapping, X, k)[i].grad[j]
)
class TestCompositeMul(TestComposite):
def runTest(self):
self.check_equivalence(
ElementTriP1() * ElementTriP1(),
ElementVectorH1(ElementTriP1())
)
class TestCompatibilityWarning(TestCase):
meshes = [
MeshTet,
MeshQuad,
MeshHex,
MeshLine,
]
elem = ElementTriP1
def runTest(self):
for m in self.meshes:
def init_incompatible():
return InteriorBasis(m(), self.elem())
self.assertRaises(ValueError, init_incompatible)
class TestDerivatives(TestCase):
"""Test values of derivatives."""
elems = [
ElementLineP0(),
ElementLineP1(),
ElementLineP2(),
ElementLineMini(),
ElementTriP0(),
ElementTriP1(),
ElementTriP2(),
ElementTriP3(),
ElementTriP4(),
ElementTriMini(),
ElementQuad0(),
ElementQuad1(),
ElementQuad2(),
ElementQuadS2(),
ElementTetP0(),
ElementTetP1(),
ElementTetP2(),
ElementTetMini(),
ElementHex1(),
ElementHexS2(),
ElementHex2(),
ElementTriCR(),
ElementTriCCR(),
ElementTetCR(),
ElementTetCCR(),
ElementWedge1(),
]
def runTest(self):
for elem in self.elems:
eps = 1e-6
for base in [0., .3, .6, .9]:
if elem.dim == 1:
y = np.array([[base, base + eps]])
elif elem.dim == 2:
y = np.array([[base, base + eps, base, base],
[base, base, base, base + eps]])
elif elem.dim == 3:
y = np.array([[base, base + eps, base, base, base, base],
[base, base, base, base + eps, base, base],
[base, base, base, base, base, base + eps]])
i = 0
while True:
try:
out = elem.lbasis(y, i)
except ValueError:
break
diff = (out[0][1] - out[0][0]) / eps
errmsg = 'x-derivative for {}th bfun failed for {}'
self.assertAlmostEqual(diff, out[1][0][0], delta=1e-3,
msg=errmsg.format(i, elem))
if elem.dim > 1:
diff = (out[0][3] - out[0][2]) / eps
errmsg = 'y-derivative for {}th bfun failed for {}'
self.assertAlmostEqual(diff, out[1][1][3], delta=1e-3,
msg=errmsg.format(i, elem))
if elem.dim == 3:
diff = (out[0][5] - out[0][4]) / eps
errmsg = 'z-derivative for {}th bfun failed for {}'
self.assertAlmostEqual(diff, out[1][2][4], delta=1e-3,
msg=errmsg.format(i, elem))
i += 1
class TestPartitionofUnity(TestCase):
"""Test that elements form a partition of unity."""
elems = [
ElementLineP1(),
ElementLineP2(),
ElementTriP1(),
ElementTriP2(),
ElementTriP3(),
ElementTriP4(),
ElementQuad1(),
ElementQuad2(),
ElementQuadS2(),
ElementTetP1(),
ElementTetP2(),
ElementHex1(),
ElementHexS2(),
ElementHex2(),
ElementTetCR(),
ElementTetCCR(),
ElementTriCR(),
ElementTriCCR(),
ElementWedge1(),
]
def runTest(self):
for elem in self.elems:
if elem.dim == 1:
y = np.array([[.15]])
elif elem.dim == 2:
y = np.array([[.15],
[.15]])
elif elem.dim == 3:
y = np.array([[.15],
[.15],
[.15]])
out = 0.
for i in range(elem.doflocs.shape[0]):
out += elem.lbasis(y, i)[0][0]
self.assertAlmostEqual(out, 1, msg='failed for {}'.format(elem))
class TestElementLinePp(TestCase):
def test_p_less_than_1_error(self):
"""Tests that exception is thrown when initializing with p < 1."""
with self.assertRaises(ValueError):
ElementLinePp(0)
class TestElementQuadBFS(TestCase):
def test_throw_index_error(self):
"""Tests that exception is thrown when i % 4 not in (0, 1, 2, 3)."""
element = ElementQuadBFS()
with self.assertRaises(ValueError):
element.gdof(0, 0, -1)
with self.assertRaises(ValueError):
element.gdof(0, 0, 16)
@pytest.mark.parametrize(
"m,e,edg",
[
(MeshTri().refined(), ElementTriP1(), ElementTriDG),
(MeshTri().refined(), ElementTriP2(), ElementTriDG),
(MeshTet().refined(), ElementTetP1(), ElementTetDG),
(MeshTet().refined(), ElementTetP2(), ElementTetDG),
(MeshTri().refined(), ElementTriArgyris(), ElementTriDG),
(MeshTri().refined(), ElementTriMorley(), ElementTriDG),
(MeshTri().refined(), ElementTriHermite(), ElementTriDG),
(MeshHex().refined(), ElementHex1(), ElementHexDG),
(MeshQuad().refined(), ElementQuad1(), ElementQuadDG),
]
)
def test_dg_element(m, e, edg):
edg = edg(e)
@Functional
def square(w):
return w['random'] ** 2
basis = InteriorBasis(m, e)
basisdg = InteriorBasis(m, edg)
assert_allclose(
square.assemble(
basis,
random=basis.interpolate(
basis.zeros() + 1)),
square.assemble(
basisdg,
random=basisdg.interpolate(
basisdg.zeros() + 1)),
)
@pytest.mark.parametrize(
"e,edg",
[
(ElementTriP1(), ElementTriDG),
(ElementTetP2(), ElementTetDG),
(ElementTriArgyris(), ElementTriDG),
(ElementQuad1(), ElementQuadDG),
(ElementQuadP(4), ElementQuadDG),
(ElementHex2(), ElementHexDG),
]
)
def test_initialize_dg_composite_elements(e, edg):
E = edg(e) * e
| 28.876884
| 78
| 0.507178
|
794abbe2fa47dc251d96ee54829be7ee1b916ab6
| 305
|
py
|
Python
|
30_of_codes/Day 3 - Intro to Conditional Statements/2-conditionalStatements.py
|
Kani712/Hacker_Rank
|
d208ef88ac33c056b89785688cf43d90275d00da
|
[
"MIT"
] | null | null | null |
30_of_codes/Day 3 - Intro to Conditional Statements/2-conditionalStatements.py
|
Kani712/Hacker_Rank
|
d208ef88ac33c056b89785688cf43d90275d00da
|
[
"MIT"
] | null | null | null |
30_of_codes/Day 3 - Intro to Conditional Statements/2-conditionalStatements.py
|
Kani712/Hacker_Rank
|
d208ef88ac33c056b89785688cf43d90275d00da
|
[
"MIT"
] | null | null | null |
import math
import os
import random
import re
import sys
if __name__ == '__main__':
N = int(input())
if N % 2 != 0:
print("Weird")
if N % 2 == 0 and N in range(2, 6):
print("Not Weird")
if N % 2 == 0 and N in range(6, 21):
print("Weird")
if N % 2 == 0 and N > 20:
print("Not Weird")
| 16.944444
| 36
| 0.567213
|
794abd0ba787033014c9661e708d6f864f34373b
| 655
|
py
|
Python
|
src/python-version/src/main.py
|
mopsfl/RLO-Jail-Time-Converter
|
0d5966eb77f702b44e2a8fd2c72aa63355595f10
|
[
"Apache-2.0"
] | 1
|
2021-11-17T20:59:22.000Z
|
2021-11-17T20:59:22.000Z
|
src/python-version/src/main.py
|
mopsfl/RLO-Jail-Time-Converter
|
0d5966eb77f702b44e2a8fd2c72aa63355595f10
|
[
"Apache-2.0"
] | null | null | null |
src/python-version/src/main.py
|
mopsfl/RLO-Jail-Time-Converter
|
0d5966eb77f702b44e2a8fd2c72aa63355595f10
|
[
"Apache-2.0"
] | null | null | null |
import os
print("mopsfl - Real Life Online - Jail Time Convert - v.0.1\n")
try:
months = int(input("Enter the months: "))
method = input("Enter to what you want to convert it (hour, day): ")
if method == "hour":
print("\nResults: " + str(months/60) + " hour(s)")
print("Results rounded: " + str(round(months/60)) + " hour(s)\n\n")
elif method == "day":
print("\nResults: " + str(months/1440) + " day(s)")
print("Results rounded: " + str(round(months/1440)) + " day(s)\n\n")
else:
print("Invalid method")
os.system('pause')
except:
print("Invalid input")
os.system('pause')
| 27.291667
| 76
| 0.564885
|
794abd1670254d8b032ba59ef5f59d6dfd13f283
| 1,689
|
py
|
Python
|
google/cloud/essentialcontacts/v1/essentialcontacts-v1-py/google/cloud/essential_contacts_v1/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/essentialcontacts/v1/essentialcontacts-v1-py/google/cloud/essential_contacts_v1/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/essentialcontacts/v1/essentialcontacts-v1-py/google/cloud/essential_contacts_v1/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.essential_contacts_service import EssentialContactsServiceClient
from .services.essential_contacts_service import EssentialContactsServiceAsyncClient
from .types.enums import NotificationCategory
from .types.enums import ValidationState
from .types.service import ComputeContactsRequest
from .types.service import ComputeContactsResponse
from .types.service import Contact
from .types.service import CreateContactRequest
from .types.service import DeleteContactRequest
from .types.service import GetContactRequest
from .types.service import ListContactsRequest
from .types.service import ListContactsResponse
from .types.service import SendTestMessageRequest
from .types.service import UpdateContactRequest
__all__ = (
'EssentialContactsServiceAsyncClient',
'ComputeContactsRequest',
'ComputeContactsResponse',
'Contact',
'CreateContactRequest',
'DeleteContactRequest',
'EssentialContactsServiceClient',
'GetContactRequest',
'ListContactsRequest',
'ListContactsResponse',
'NotificationCategory',
'SendTestMessageRequest',
'UpdateContactRequest',
'ValidationState',
)
| 34.469388
| 84
| 0.818828
|
794abe94168e7c232d9581f529ed568b88ececf3
| 491
|
py
|
Python
|
plotly/validators/layout/mapbox/layer/symbol/_icon.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/layout/mapbox/layer/symbol/_icon.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/layout/mapbox/layer/symbol/_icon.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class IconValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='icon',
parent_name='layout.mapbox.layer.symbol',
**kwargs
):
super(IconValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 25.842105
| 66
| 0.602851
|
794abec225ffcfad4f89aa5eab00f36dde0406ec
| 9,142
|
py
|
Python
|
salika/views/store_views.py
|
BarisSari/django_crud
|
ce9586c10da2f865d29d9a18e9ff5582abe5e3a0
|
[
"MIT"
] | null | null | null |
salika/views/store_views.py
|
BarisSari/django_crud
|
ce9586c10da2f865d29d9a18e9ff5582abe5e3a0
|
[
"MIT"
] | null | null | null |
salika/views/store_views.py
|
BarisSari/django_crud
|
ce9586c10da2f865d29d9a18e9ff5582abe5e3a0
|
[
"MIT"
] | null | null | null |
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from ..models import Store
from ..forms import StoreForm
from django.urls import reverse_lazy
from django.urls import reverse
from django.http import Http404
class StoreListView(ListView):
model = Store
template_name = "salika/store_list.html"
paginate_by = 20
context_object_name = "store_list"
allow_empty = True
page_kwarg = 'page'
paginate_orphans = 0
def __init__(self, **kwargs):
return super(StoreListView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(StoreListView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(StoreListView, self).get(request, *args, **kwargs)
def get_queryset(self):
return super(StoreListView, self).get_queryset()
def get_allow_empty(self):
return super(StoreListView, self).get_allow_empty()
def get_context_data(self, *args, **kwargs):
ret = super(StoreListView, self).get_context_data(*args, **kwargs)
return ret
def get_paginate_by(self, queryset):
return super(StoreListView, self).get_paginate_by(queryset)
def get_context_object_name(self, object_list):
return super(StoreListView, self).get_context_object_name(object_list)
def paginate_queryset(self, queryset, page_size):
return super(StoreListView, self).paginate_queryset(queryset, page_size)
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True):
return super(StoreListView, self).get_paginator(queryset, per_page, orphans=0, allow_empty_first_page=True)
def render_to_response(self, context, **response_kwargs):
return super(StoreListView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(StoreListView, self).get_template_names()
class StoreDetailView(DetailView):
model = Store
template_name = "salika/store_detail.html"
context_object_name = "store"
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
def __init__(self, **kwargs):
return super(StoreDetailView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(StoreDetailView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(StoreDetailView, self).get(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(StoreDetailView, self).get_object(queryset)
def get_queryset(self):
return super(StoreDetailView, self).get_queryset()
def get_slug_field(self):
return super(StoreDetailView, self).get_slug_field()
def get_context_data(self, **kwargs):
ret = super(StoreDetailView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(StoreDetailView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(StoreDetailView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(StoreDetailView, self).get_template_names()
class StoreCreateView(CreateView):
model = Store
form_class = StoreForm
# fields = ['store_id', 'manager_staff', 'address', 'last_update']
template_name = "salika/store_create.html"
success_url = reverse_lazy("store_list")
def __init__(self, **kwargs):
return super(StoreCreateView, self).__init__(**kwargs)
def dispatch(self, request, *args, **kwargs):
return super(StoreCreateView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return super(StoreCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return super(StoreCreateView, self).post(request, *args, **kwargs)
def get_form_class(self):
return super(StoreCreateView, self).get_form_class()
def get_form(self, form_class=None):
return super(StoreCreateView, self).get_form(form_class)
def get_form_kwargs(self, **kwargs):
return super(StoreCreateView, self).get_form_kwargs(**kwargs)
def get_initial(self):
return super(StoreCreateView, self).get_initial()
def form_invalid(self, form):
return super(StoreCreateView, self).form_invalid(form)
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return super(StoreCreateView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(StoreCreateView, self).get_context_data(**kwargs)
return ret
def render_to_response(self, context, **response_kwargs):
return super(StoreCreateView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(StoreCreateView, self).get_template_names()
def get_success_url(self):
return reverse("salika:store_detail", args=(self.object.pk,))
class StoreUpdateView(UpdateView):
model = Store
form_class = StoreForm
# fields = ['store_id', 'manager_staff', 'address', 'last_update']
template_name = "salika/store_update.html"
initial = {}
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
context_object_name = "store"
def __init__(self, **kwargs):
return super(StoreUpdateView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(StoreUpdateView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return super(StoreUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return super(StoreUpdateView, self).post(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(StoreUpdateView, self).get_object(queryset)
def get_queryset(self):
return super(StoreUpdateView, self).get_queryset()
def get_slug_field(self):
return super(StoreUpdateView, self).get_slug_field()
def get_form_class(self):
return super(StoreUpdateView, self).get_form_class()
def get_form(self, form_class=None):
return super(StoreUpdateView, self).get_form(form_class)
def get_form_kwargs(self, **kwargs):
return super(StoreUpdateView, self).get_form_kwargs(**kwargs)
def get_initial(self):
return super(StoreUpdateView, self).get_initial()
def form_invalid(self, form):
return super(StoreUpdateView, self).form_invalid(form)
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
return super(StoreUpdateView, self).form_valid(form)
def get_context_data(self, **kwargs):
ret = super(StoreUpdateView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(StoreUpdateView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(StoreUpdateView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(StoreUpdateView, self).get_template_names()
def get_success_url(self):
return reverse("salika:store_detail", args=(self.object.pk,))
class StoreDeleteView(DeleteView):
model = Store
template_name = "salika/store_delete.html"
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
context_object_name = "store"
def __init__(self, **kwargs):
return super(StoreDeleteView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(StoreDeleteView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
raise Http404
def post(self, request, *args, **kwargs):
return super(StoreDeleteView, self).post(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return super(StoreDeleteView, self).delete(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(StoreDeleteView, self).get_object(queryset)
def get_queryset(self):
return super(StoreDeleteView, self).get_queryset()
def get_slug_field(self):
return super(StoreDeleteView, self).get_slug_field()
def get_context_data(self, **kwargs):
ret = super(StoreDeleteView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(StoreDeleteView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(StoreDeleteView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(StoreDeleteView, self).get_template_names()
def get_success_url(self):
return reverse("salika:store_list")
| 34.2397
| 115
| 0.698644
|
794abf88eb9add26c41f62c631e3b661446de974
| 16,233
|
py
|
Python
|
mindquantum/core/parameterresolver/parameterresolver.py
|
mindspore-ai/mindquantum
|
785150e6b44bb79b37f2fa4a3d86edc0ab3c83ce
|
[
"Apache-2.0"
] | 13
|
2021-06-04T00:47:53.000Z
|
2022-03-20T14:30:38.000Z
|
mindquantum/core/parameterresolver/parameterresolver.py
|
mindspore-ai/mindquantum
|
785150e6b44bb79b37f2fa4a3d86edc0ab3c83ce
|
[
"Apache-2.0"
] | null | null | null |
mindquantum/core/parameterresolver/parameterresolver.py
|
mindspore-ai/mindquantum
|
785150e6b44bb79b37f2fa4a3d86edc0ab3c83ce
|
[
"Apache-2.0"
] | 4
|
2022-01-17T02:43:34.000Z
|
2022-02-20T16:03:44.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Parameter resolver."""
from collections.abc import Iterable
from copy import deepcopy
import numpy as np
import sympy as sp
from mindquantum import mqbackend as mb
from mindquantum.utils.type_value_check import _num_type
class ParameterResolver(dict):
"""
A ParameterRsolver can set the parameter of parameterized quantum gate or
parameterized quantum circuit.
By specific which part of parameters needs to calculate gradient, the PQC
operator can only calculate gradient of these parameters.
Args:
data (dict): initial parameter names and its values. Default: None.
Examples:
>>> from mindquantum.core import ParameterResolver
>>> pr = ParameterResolver({'a': 0.3})
>>> pr['b'] = 0.5
>>> pr.no_grad_part('a')
>>> pr *= 2
>>> pr
{'a': 0.6, 'b': 1.0}
>>> pr.no_grad_parameters
{'a'}
"""
def __init__(self, data=None):
if data is None:
data = {}
if not isinstance(data, (dict, ParameterResolver)):
raise TypeError("Data require a dict or a ParameterResolver, but get {}!".format(type(data)))
for k, v in data.items():
if not isinstance(k, str):
raise TypeError("Parameter name should be a string, but get {}!".format(type(k)))
if not isinstance(v, _num_type):
raise TypeError("Require a number, but get {}, which is {}!".format(v, type(v)))
super(ParameterResolver, self).__init__(data)
self.no_grad_parameters = set()
self.requires_grad_parameters = set(self.params_name)
def get_cpp_obj(self):
"""Get cpp obj of this parameter resolver"""
return mb.parameter_resolver(self, self.no_grad_parameters, self.requires_grad_parameters)
def __setitem__(self, keys, values):
"""
Set parameter or as list of parameters of this parameter resolver.
By default, the parameter you set requires gradient.
Args:
keys (Union[str, list[str]]): The name of parameters.
values (Union[number, list[number]]): The value of parameters.
Raises:
TypeError: If the key that you set is not a string or a iterable of
string.
"""
if isinstance(keys, str):
if not isinstance(values, _num_type):
raise TypeError("Parameter value should be a number, but get {}, which is {}!".format(
values, type(values)))
super().__setitem__(keys, values)
self.requires_grad_parameters.add(keys)
elif isinstance(keys, Iterable):
if not isinstance(values, Iterable):
raise ValueError("Values should be iterable.")
if len(values) != len(keys):
raise ValueError("Size of keys and values do not match.")
for i, k in enumerate(keys):
self.__setitem__(k, values[i])
else:
raise TypeError("Parameter name should be a string, but get {}!".format(type(keys)))
def __add__(self, pr):
"""
Add a parameter resolver with other parameter.
Returns:
ParameterResolver, parameter resolver after adding.
Args:
pr (ParameterResolver): The parameter resolver need to add.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr1 = ParameterResolver({'a': 1})
>>> pr2 = ParameterResolver({'a': 2, 'b': 3})
>>> (pr1 + pr2).expression()
3*a + 3*b
"""
if not isinstance(pr, ParameterResolver):
raise ValueError('Require a parameter resolver, but get {}.'.format(type(pr)))
res = self * 1
pr = pr * 1
for k, v in pr.items():
if k in res:
res[k] += v
pr[k] = res[k]
res.update(pr)
return res
def __sub__(self, pr):
"""
Subtraction a parameter resolver with other parameter.
Returns:
:class:`mindquantum.core.parameterresolver.ParameterResolver`
Args:
pr (ParameterResolver): The parameter resolver need to subtract.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr1 = ParameterResolver({'a': 1})
>>> pr2 = ParameterResolver({'a': 2, 'b': 3})
>>> (pr1 - pr2).expression()
-a - 3*b
"""
return self + (-1 * pr)
def __neg__(self):
"""
Get the negative version of this parameter resolver.
Returns:
ParameterResolver, the negative version.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr1 = ParameterResolver({'a': 1})
>>> (-pr1).expression()
-a
"""
return -1 * self
def __imul__(self, num):
"""
Parameter support inplace multiply.
Returns:
:class:`mindquantum.core.parameterresolver.ParameterResolver`
Args:
num (number): Multiply factor.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr = ParameterResolver({'a': 1, 'b': 2})
>>> pr *= 2
>>> pr
{'a': 2, 'b': 4}
"""
no_grad_parameters = deepcopy(self.no_grad_parameters)
requires_grad_parameters = deepcopy(self.requires_grad_parameters)
for k in self.keys():
self[k] = self[k] * num
self.no_grad_parameters = no_grad_parameters
self.requires_grad_parameters = requires_grad_parameters
return self
def __mul__(self, num):
"""
Multiply num with every value of parameter resolver.
Returns:
:class:`mindquantum.core.parameterresolver.ParameterResolver`
Args:
num (number): Multiply factor.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr1 = ParameterResolver({'a': 1, 'b': 2})
>>> pr2 = pr1 * 2
>>> pr2
{'a': 2, 'b': 4}
"""
no_grad_parameters = deepcopy(self.no_grad_parameters)
requires_grad_parameters = deepcopy(self.requires_grad_parameters)
out = deepcopy(self)
out *= num
out.no_grad_parameters = no_grad_parameters
out.requires_grad_parameters = requires_grad_parameters
return out
def __rmul__(self, num):
"""
See :class:`mindquantum.core.parameterresolver.ParameterResolver.__mul__`.
"""
return self.__mul__(num)
def __eq__(self, other):
_check_pr_type(other)
no_grad_eq = self.no_grad_parameters == other.no_grad_parameters
requires_grad_eq = self.requires_grad_parameters == other.requires_grad_parameters
return super().__eq__(other) and no_grad_eq and requires_grad_eq
@property
def params_name(self):
"""
Get the parameters name.
Returns:
list, a list of parameters name.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr = ParameterResolver({'a': 1, 'b': 2})
>>> pr.params_name
['a', 'b']
"""
return list(self.keys())
@property
def para_value(self):
"""
Get the parameters value.
Returns:
list, a list of parameters value.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr = ParameterResolver({'a': 1, 'b': 2})
>>> pr.para_value
[1, 2]
"""
return list(self.values())
def requires_grad(self):
"""
Set all parameters of this parameter resolver to require gradient
calculation. Inplace operation.
Returns:
ParameterResolver, the parameter resolver itself.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr = ParameterResolver({'a': 1, 'b': 2})
>>> pr.no_grad_part('a')
>>> pr.requires_grad()
>>> pr.requires_grad_parameters
{'a', 'b'}
"""
self.no_grad_parameters = set()
self.requires_grad_parameters = set(self.params_name)
return self
def no_grad(self):
"""
Set all parameters to not require gradient calculation. Inplace operation.
Returns:
ParameterResolver, the parameter resolver itself.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr = ParameterResolver({'a': 1, 'b': 2})
>>> pr.no_grad()
>>> pr.requires_grad_parameters
set()
"""
self.no_grad_parameters = set(self.params_name)
self.requires_grad_parameters = set()
return self
def requires_grad_part(self, *names):
"""
Set part of parameters that requires grad. Inplace operation.
Args:
names (tuple[str]): Parameters that requires grad.
Returns:
ParameterResolver, the parameter resolver itself.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr = ParameterResolver({'a': 1, 'b': 2})
>>> pr.no_grad()
>>> pr.requires_grad_part('a')
>>> pr.requires_grad_parameters
{'a'}
"""
for name in names:
if not isinstance(name, str):
raise TypeError("name should be a string, but get {}!".format(type(name)))
if name not in self:
raise KeyError("Parameter {} not in this parameter resolver!".format(name))
while name in self.no_grad_parameters:
self.no_grad_parameters.remove(name)
while name not in self.requires_grad_parameters:
self.requires_grad_parameters.add(name)
return self
def no_grad_part(self, *names):
"""
Set part of parameters that not requires grad.
Args:
names (tuple[str]): Parameters that not requires grad.
Returns:
ParameterResolver, the parameter resolver itself.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr = ParameterResolver({'a': 1, 'b': 2})
>>> pr.no_grad_part('a')
>>> pr.requires_grad_parameters
{'b'}
"""
for name in names:
if not isinstance(name, str):
raise TypeError("name should be a string, but get {}!".format(type(name)))
if name not in self:
raise KeyError("Parameter {} not in this parameter resolver!".format(name))
while name not in self.no_grad_parameters:
self.no_grad_parameters.add(name)
while name in self.requires_grad_parameters:
self.requires_grad_parameters.remove(name)
return self
def update(self, others):
"""
Update this parameter resolver with other parameter resolver.
Args:
others (ParameterResolver): other parameter resolver.
Raises:
ValueError: If some parameters require grad and not require grad in
other parameter resolver and vice versa.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr1 = ParameterResolver({'a': 1})
>>> pr2 = ParameterResolver({'b': 2})
>>> pr2.no_grad()
>>> pr1.update(pr2)
>>> pr1
{'a': 1, 'b': 2}
>>> pr1.no_grad_parameters
{'b'}
"""
_check_pr_type(others)
super().update(others)
conflict = (self.no_grad_parameters & others.requires_grad_parameters) | (others.no_grad_parameters
& self.requires_grad_parameters)
if conflict:
raise ValueError("Parameter conflict, {} require grad in some parameter \
resolver and not require grad in other parameter resolver ".format(conflict))
self.no_grad_parameters.update(others.no_grad_parameters)
self.requires_grad_parameters.update(others.requires_grad_parameters)
def expression(self):
"""
Get the expression of this parameter resolver.
Returns:
sympy.Expr, the symbol expression of this parameter resolver.
Examples:
>>> from mindquantum.core.parameterresolver import ParameterResolver as PR
>>> pr = PR({'a' : 2, 'b' : 0.3})
>>> pr.expression()
2*a + 0.3*b
"""
res = 0
for k, v in self.items():
res += sp.Symbol(k) * v
return res
def conjugate(self):
"""
Get the conjugate of the parameter resolver.
Returns:
ParameterResolver, the conjugate version of this parameter resolver.
Examples:
>>> from mindquantum.core.parameterresolver import ParameterResolver as PR
>>> pr = PR({'a' : 1, 'b': 1j})
>>> pr.conjugate().expression()
a - 1.0*I*b
"""
out = 1 * self
for k, v in out.items():
out[k] = np.conj(v)
return out
def combination(self, pr):
"""
Apply linear combination between this parameter resolver with input pr.
Args:
pr (Union[dict, ParameterResolver]): The parameter resolver you
want to do linear combination.
Returns:
numbers.Number, the combination result.
Examples:
>>> from mindquantum import ParameterResolver
>>> pr1 = ParameterResolver({'a': 1, 'b': 2})
>>> pr2 = ParameterResolver({'a': 2, 'b': 3})
>>> pr1.combination(pr2)
8
"""
if not isinstance(pr, (ParameterResolver, dict)):
raise ValueError('Require a parameter resolver or a dict, but get {}.'.format(type(pr)))
res = 0
for k, v in self.items():
if k not in pr:
raise KeyError('{} not in input parameter resolver'.format(k))
res += v * pr[k]
return res
@property
def real(self):
"""
Get the real part of this parameter resolver
Returns:
ParameterResolver, the real part of this parameter resolver.
Examples:
>>> from mindquantum.core.parameterresolver import ParameterResolver as PR
>>> pr = PR({'a': 1.2 + 1.3j})
>>> pr.real()
{'a': 1.2}
"""
out = 1 * self
for k, v in self.items():
out[k] = np.real(v)
return out
@property
def imag(self):
"""
Get the real part of this parameter resolver
Returns:
ParameterResolver, the image part of this parameter resolver.
Examples:
>>> from mindquantum.core.parameterresolver import ParameterResolver as PR
>>> pr = PR({'a': 1.2 + 1.3j})
>>> pr.imag()
{'a': 1.3}
"""
out = 1 * self
for k, v in self.items():
out[k] = np.imag(v)
return out
def _check_pr_type(pr):
if not isinstance(pr, ParameterResolver):
raise TypeError("Require a ParameterResolver, but get {}".format(type(pr)))
| 33.608696
| 114
| 0.562558
|
794abfa34738f78f99c260e408b150aa5d1b526d
| 974
|
py
|
Python
|
RotSite/polls/views.py
|
Mot93/Website-Rotaract-Valle-del-Savena
|
3d409f2d77978331cae5fde82616f47de2b8d59b
|
[
"MIT"
] | null | null | null |
RotSite/polls/views.py
|
Mot93/Website-Rotaract-Valle-del-Savena
|
3d409f2d77978331cae5fde82616f47de2b8d59b
|
[
"MIT"
] | null | null | null |
RotSite/polls/views.py
|
Mot93/Website-Rotaract-Valle-del-Savena
|
3d409f2d77978331cae5fde82616f47de2b8d59b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.http import Http404
# Models I created
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list,
}
return HttpResponse(template.render(context, request))
def detail(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("Question does not exist")
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id)
| 34.785714
| 71
| 0.738193
|
794abfb9c8e1de418909af3b10b9c4e677cb140f
| 461
|
py
|
Python
|
python/test/test.py
|
mrondin1/StarSpace
|
fc2cd39472b3aadf00b6254edf00e099e13e598f
|
[
"MIT"
] | 2,914
|
2017-10-04T21:55:41.000Z
|
2022-03-31T04:25:31.000Z
|
python/test/test.py
|
mrondin1/StarSpace
|
fc2cd39472b3aadf00b6254edf00e099e13e598f
|
[
"MIT"
] | 215
|
2017-10-06T14:12:13.000Z
|
2022-03-18T06:56:27.000Z
|
python/test/test.py
|
mrondin1/StarSpace
|
fc2cd39472b3aadf00b6254edf00e099e13e598f
|
[
"MIT"
] | 483
|
2017-10-07T20:09:47.000Z
|
2022-03-01T02:23:20.000Z
|
import starwrap as sw
import numpy as np
arg = sw.args()
arg.trainFile = './input.txt'
arg.testFile = './input.txt'
arg.trainMode = 5
sp = sw.starSpace(arg)
sp.init()
sp.train()
# sp.evaluate()
sp.nearestNeighbor('some text', 10)
sp.saveModel('model')
sp.saveModelTsv('model.tsv')
sp.initFromSavedModel('model')
sp.initFromTsv('model.tsv')
print(np.array(sp.getDocVector('this\tis\ttest', '\t')))
print(np.array(sp.getDocVector('this is test', ' ')))
| 19.208333
| 56
| 0.691974
|
794ac0413d710f7c3106974ec8a0798ef6422ab1
| 64,517
|
py
|
Python
|
tests/unit/pypyr/context_test.py
|
FooBarQuaxx/pypyr
|
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/pypyr/context_test.py
|
FooBarQuaxx/pypyr
|
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/pypyr/context_test.py
|
FooBarQuaxx/pypyr
|
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
|
[
"Apache-2.0"
] | null | null | null |
"""context.py unit tests."""
from collections.abc import MutableMapping
from pypyr.context import Context, ContextItemInfo
from pypyr.dsl import PyString, SicString
from pypyr.errors import (
ContextError,
KeyInContextHasNoValueError,
KeyNotInContextError)
import pytest
import typing
# ------------------- behaves like a dictionary-------------------------------#
def test_context_is_dictionary_like():
"""Context should behave like a dictionary."""
# initializes to empty
d = Context()
assert d is not None
# len is not a given on custom implementations
assert len(d) == 0
# dict ctor "just works"
d = Context({'k1': 'v1', 'k2': 'v2'})
assert d
assert len(d) == 2
assert d['k1'] == 'v1'
assert d['k2'] == 'v2'
# __set_item__ assignment add and update works
d['k1'] = 'value 1'
d['k2'] = 'value 2'
d['k3'] = ['one list', 'two list', 'three list']
d['k4'] = {'kk1': 'vv1', 'kk2': 'vv2', 'kk3': 'vv3'}
d['k5'] = True
d['k6'] = ('thing', False, ['1', '2', '3'], 6)
d['k7'] = 77
assert d['k5']
# isinstance resolves to dict - this test might become invalid if refactor
# to a MutableMapping custom object
assert isinstance(d, dict)
assert isinstance(d, MutableMapping)
assert len(d) == 7
# items() can iterate
for k, v in d.items():
if k == 'k4':
assert isinstance(v, dict)
if k == 'k6':
assert isinstance(v, tuple)
# values() can iterate
for v in d.values():
assert v
# __get_item__ works
assert d['k1'] == 'value 1'
# update merging works
mergedic = {'k1': 'NEWVALUE'}
d.update(mergedic)
assert d['k1'] == 'NEWVALUE'
# del and clear
original_length = len(d)
del d['k1']
assert 'k1' not in d
assert len(d) == original_length - 1
d.clear()
assert len(d) == 0
def test_context_missing_override():
"""Subclass of dict should override __missing__ on KeyNotFound."""
context = Context({'arbkey': 'arbvalue'})
with pytest.raises(KeyNotInContextError):
context['notindict']
def test_context_missing_raise_key_error():
"""Context should raise error compatible with dict KeyError."""
context = Context({'arbkey': 'arbvalue'})
with pytest.raises(KeyError):
context['notindict']
# ------------------- behaves like a dictionary-------------------------------#
# ------------------- asserts ------------------------------------------------#
def test_assert_child_key_has_value_passes():
"""Pass if [parent][child] has value."""
context = Context({
'parent': {
'child': 1
}
})
context.assert_child_key_has_value('parent', 'child', 'arb')
def test_assert_child_key_has_value_raises_no_parent():
"""Raise if [parent] doesn't exist."""
context = Context({
'parent': {
'child': 1
}
})
with pytest.raises(KeyNotInContextError):
context.assert_child_key_has_value('XparentX', 'child', 'arb')
def test_assert_child_key_has_value_raises_no_child():
"""Raise if [parent][child] doesn't exist."""
context = Context({
'parent': {
'child': 1
}
})
with pytest.raises(KeyNotInContextError) as err:
context.assert_child_key_has_value('parent', 'XchildX', 'arb')
assert str(err.value) == (
"context['parent']['XchildX'] doesn't exist. It must exist for arb.")
def test_assert_child_key_has_value_raises_child_none():
"""Raise if [parent][child] is None."""
context = Context({
'parent': {
'child': None
}
})
with pytest.raises(KeyInContextHasNoValueError) as err:
context.assert_child_key_has_value('parent', 'child', 'arb')
assert str(err.value) == (
"context['parent']['child'] must have a value for arb.")
def test_assert_child_key_has_value_raises_parent_none():
"""Raise if [parent] is None."""
context = Context({
'parent': None
})
with pytest.raises(KeyInContextHasNoValueError) as err:
context.assert_child_key_has_value('parent', 'child', 'arb')
assert str(err.value) == ("context['parent'] must have a value for arb.")
def test_assert_child_key_has_value_raises_parent_not_iterable():
"""Raise if [parent] is not iterable."""
context = Context({
'parent': 1
})
with pytest.raises(ContextError) as err:
context.assert_child_key_has_value('parent', 'child', 'arb')
assert str(err.value) == ("context['parent'] must be iterable and contain "
"'child' for arb. argument of type 'int' is not "
"iterable")
def test_assert_key_exists_raises():
"""Raise KeyNotInContextError if key doesn't exist."""
context = Context({'key1': 'value1'})
with pytest.raises(KeyNotInContextError):
context.assert_key_exists('notindict', None)
def test_assert_key_exists_passes_value_none():
"""assert_key_has_value passes if context dictionary key value is None."""
context = Context({'key1': None})
context.assert_key_exists('key1', None)
def test_assert_key_exists_passes_string_values():
"""assert_key_has_value passes if context dictionary key value is None."""
context = Context({'key1': 'something', 'key2': 'other', 'key3': False})
context.assert_key_exists('key2', None)
context.assert_key_exists('key3', None)
def test_assert_keys_exist_passes():
"""Pass if list of keys all found in context dictionary."""
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3'})
context.assert_keys_exist(None, 'key1', 'key3')
def test_assert_keys_exists_with_values_fails():
"""Raise KeyNotInContextError if list of keys not all found in context."""
with pytest.raises(KeyNotInContextError):
context = Context({'key1': 'value1',
'key2': 'value2',
'key3': 'value3'})
context.assert_keys_exist(None,
'key1',
'key4',
'key2',
)
def test_assert_key_has_value_fails_on_context_empty():
"""Expect KeyNotInContextError if context empty."""
context = Context()
with pytest.raises(KeyNotInContextError):
context.assert_key_has_value('key', 'desc')
def test_assert_key_has_value_fails_on_key_none():
"""Expect AssertionError if assert key is None."""
context = Context({'key1': 'value1'})
with pytest.raises(AssertionError):
context.assert_key_has_value(None, None)
def test_assert_key_has_value_fails_key_not_found():
"""Raise KeyNotInContextError if context doesn't have key on assert."""
context = Context({'key1': 'value1'})
with pytest.raises(KeyNotInContextError):
context.assert_key_has_value('notindict', None)
def test_assert_key_has_value__empty():
"""No KeyNotInContextError if key exists but value empty (not None)."""
context = Context({'key': ''})
# with pytest.raises(KeyNotInContextError):
context.assert_key_has_value('key', None)
def test_assert_key_has_value_fails_key_error_message():
"""Raise KeyNotInContextError if missing key, assert message correct."""
context = Context({'key1': 'value1'})
with pytest.raises(KeyNotInContextError) as err_info:
context.assert_key_has_value('notindict', 'mydesc')
assert str(err_info.value) == ("context['notindict'] "
"doesn't exist. It must exist for "
"mydesc.")
def test_assert_key_has_value_fails_key_empty():
"""Raise KeyInContextHasNoValueError if context dict key value is None."""
context = Context({'key1': None})
with pytest.raises(KeyInContextHasNoValueError):
context.assert_key_has_value('key1', None)
def test_assert_key_has_value_passes():
"""Pass if key_in_dict_has_value dictionary key has value."""
context = Context({'key1': 'value1'})
context.assert_key_has_value('key1', None)
def test_assert_key_has_bool_true_passes():
"""Pass if key_in_dict_has_value dictionary key has bool True value."""
context = Context({'key1': True})
context.assert_key_has_value('key1', None)
def test_assert_key_has_bool_false_passes():
"""Pass if key_in_dict_has_value dictionary key has bool False value."""
context = Context({'key1': False})
context.assert_key_has_value('key1', None)
def test_assert_keys_have_values_passes():
"""Pass if list of keys all found in context dictionary."""
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3'})
context.assert_keys_have_values(None, 'key1', 'key3')
def test_assert_keys_have_values_fails():
"""Raise KeyNotInContextError if list of keys don't all have values."""
with pytest.raises(KeyNotInContextError):
context = Context({'key1': 'value1',
'key2': 'value2',
'key3': 'value3'})
context.assert_keys_have_values(None,
'key1',
'key4',
'key2',
)
def test_assert_key_type_value_passes():
"""assert_key_type_value passes if key exists, has value and type right."""
info = ContextItemInfo(key='key1',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=True)
Context().assert_key_type_value(info, None)
def test_assert_key_type_value_no_key_raises():
"""assert_key_type_value fails if key doesn't exist."""
info = ContextItemInfo(key='key1',
key_in_context=False,
expected_type=str,
is_expected_type=True,
has_value=True)
with pytest.raises(KeyNotInContextError) as err_info:
Context().assert_key_type_value(info, 'mydesc')
assert str(err_info.value) == "mydesc couldn't find key1 in context."
def test_assert_key_type_value_no_key_raises_extra_text():
"""assert_key_type_value fails if key doesn't exist."""
info = ContextItemInfo(key='key1',
key_in_context=False,
expected_type=str,
is_expected_type=True,
has_value=True)
with pytest.raises(KeyNotInContextError) as err_info:
Context().assert_key_type_value(info, 'mydesc', 'extra text here')
assert str(err_info.value) == (
"mydesc couldn't find key1 in context. extra text here")
def test_assert_key_type_value_no_value_raises():
"""assert_key_type_value fails if no value."""
info = ContextItemInfo(key='key1',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=False)
with pytest.raises(KeyInContextHasNoValueError) as err_info:
Context().assert_key_type_value(info, 'mydesc')
assert str(err_info.value) == ("mydesc found key1 in context but it "
"doesn\'t have a value.")
def test_assert_key_type_value_no_value_raises_extra_text():
"""assert_key_type_value fails if no value."""
info = ContextItemInfo(key='key1',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=False)
with pytest.raises(KeyInContextHasNoValueError) as err_info:
Context().assert_key_type_value(info, 'mydesc', 'extra text here')
assert str(err_info.value) == ("mydesc found key1 in context but it "
"doesn\'t have a value. extra text here")
def test_assert_key_type_value_wrong_type_raises():
"""assert_key_type_value fails if wrong type."""
info = ContextItemInfo(key='key1',
key_in_context=True,
expected_type=str,
is_expected_type=False,
has_value=True)
with pytest.raises(KeyInContextHasNoValueError) as err_info:
Context().assert_key_type_value(info, 'mydesc')
assert str(err_info.value) == ("mydesc found key1 in context, but "
"it\'s not a <class 'str'>.")
def test_assert_key_type_value_wrong_type_raises_with_extra_error_text():
"""assert_key_type_value fails if wrong type."""
info = ContextItemInfo(key='key1',
key_in_context=True,
expected_type=str,
is_expected_type=False,
has_value=True)
with pytest.raises(KeyInContextHasNoValueError) as err_info:
Context().assert_key_type_value(info, 'mydesc', 'extra text here')
assert str(err_info.value) == (
"mydesc found key1 in context, but "
"it\'s not a <class 'str'>. extra text here")
def test_assert_keys_type_value_passes():
"""assert_keys_type_value passes if all keys, types, values correct."""
info1 = ContextItemInfo(key='key1',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=True)
info2 = ContextItemInfo(key='key2',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=True)
info3 = ContextItemInfo(key='key3',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=True)
Context().assert_keys_type_value(None, '', info1, info2, info3)
def test_assert_keys_type_value_raises():
"""assert_keys_type_value raises if issue with one in the middle."""
info1 = ContextItemInfo(key='key1',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=True)
info2 = ContextItemInfo(key='key2',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=False)
info3 = ContextItemInfo(key='key3',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=True)
with pytest.raises(KeyInContextHasNoValueError) as err_info:
Context().assert_keys_type_value('mydesc', None, info1, info2, info3)
assert str(err_info.value) == ("mydesc found key2 in context but it "
"doesn\'t have a value.")
def test_assert_keys_type_value_raises_with_extra_error_text():
"""assert_keys_type_value raises if issue with one in the middle."""
info1 = ContextItemInfo(key='key1',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=True)
info2 = ContextItemInfo(key='key2',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=False)
info3 = ContextItemInfo(key='key3',
key_in_context=True,
expected_type=str,
is_expected_type=True,
has_value=True)
with pytest.raises(KeyInContextHasNoValueError) as err_info:
Context().assert_keys_type_value('mydesc',
'extra text here',
info1,
info2,
info3)
assert str(err_info.value) == ("mydesc found key2 in context but it "
"doesn\'t have a value. extra text here")
# ------------------- asserts ------------------------------------------------#
# ------------------- get_eval -----------------------------------------------#
def test_get_eval_string_bool():
"""Bool eval."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = 'key1 == \'down\''
output = context.get_eval_string(input_string)
assert isinstance(output, bool)
assert output
def test_get_eval_string_builtins():
"""Built-in on eval."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = 'len(key1)'
assert context.get_eval_string(input_string) == 4
# ------------------- end get_eval--------------------------------------------#
# ------------------- formats ------------------------------------------------#
def test_string_interpolate_works():
"""Interpolate works."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
context['input_string'] = 'Piping {key1} the {key2} wild'
output = context.get_formatted('input_string')
assert output == 'Piping down the valleys wild', (
"string interpolation incorrect")
def test_string_interpolate_works_with_no_swaps():
"""Interpolate no swap."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
context['input_string'] = 'Piping down the valleys wild'
output = context.get_formatted('input_string')
assert output == 'Piping down the valleys wild', (
"string interpolation incorrect")
def test_string_interpolate_escapes_double_curly():
"""Interpolate double curly escape."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
context['input_string'] = 'Piping {{ down the valleys wild'
output = context.get_formatted('input_string')
assert output == 'Piping { down the valleys wild', (
"string interpolation incorrect")
def test_string_interpolate_escapes_double_curly_pair():
"""Interpolate double double curly."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
context['input_string'] = 'Piping {{down}} the valleys wild'
output = context.get_formatted('input_string')
assert output == 'Piping {down} the valleys wild', (
"string interpolation incorrect")
def test_string_interpolate_sic():
"""Interpolate ignore sic."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
context['input_string'] = SicString("Piping {key1} the {key2} wild")
output = context.get_formatted('input_string')
assert output == 'Piping {key1} the {key2} wild', (
"string interpolation incorrect")
def test_string_interpolate_py():
"""Interpolate do py."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
context['input_string'] = PyString("len(key1) + len(key2)")
output = context.get_formatted('input_string')
assert output == 11, (
"string interpolation incorrect")
def test_single_curly_should_throw():
"""Interpolate single curly raise."""
with pytest.raises(ValueError):
context = Context({'key1': 'value1'})
context['input_string'] = '{key1} this { is {key2} string'
context.get_formatted('input_string')
def test_tag_not_in_context_should_throw():
"""Interpolate key not in context raises."""
with pytest.raises(KeyNotInContextError) as err:
context = Context({'key1': 'value1'})
context['input_string'] = '{key1} this is {key2} string'
context.get_formatted('input_string')
assert str(err.value) == (
"Unable to format '{key1} this is "
"{key2} string' at context['input_string'], because "
"key2 not found in the pypyr context.")
def test_context_item_not_a_string_should_return_as_is():
"""Interpolate non-string."""
context = Context({'key1': 'value1'})
context['input_string'] = 77
val = context.get_formatted('input_string')
assert val == 77
def test_context_item_list_should_iterate():
"""Interpolate iterable."""
context = Context({'key1': 'value1'})
context['input_string'] = ['string1', '{key1}', 'string3']
val = context.get_formatted('input_string')
assert val == ['string1', 'value1', 'string3']
def test_input_string_interpolate_works():
"""Interpolate strings."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = 'Piping {key1} the {key2} wild'
output = context.get_formatted_string(input_string)
assert output == 'Piping down the valleys wild', (
"string interpolation incorrect")
def test_input_string_tag_not_in_context_should_throw():
"""Interpolate not in context."""
with pytest.raises(KeyNotInContextError) as err_info:
context = Context({'key1': 'value1'})
input_string = '{key1} this is {key2} string'
context.get_formatted_string(input_string)
assert str(err_info.value) == (
"Unable to format '{key1} this is {key2} "
"string' because key2 not found in the pypyr context.")
def test_input_string_interpolate_sic():
"""Interpolate sic."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = SicString("Piping {key1} the {key2} wild")
output = context.get_formatted_string(input_string)
assert output == "Piping {key1} the {key2} wild", (
"string interpolation incorrect")
def test_input_string_interpolate_sic_singlequote():
"""Interpolate sic with quotes."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = SicString('Piping {key1} the {key2} wild')
output = context.get_formatted_string(input_string)
assert output == "Piping {key1} the {key2} wild", (
"string interpolation incorrect")
def test_input_string_interpolate_py_singlequote():
"""Interpolate py single quotes."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = PyString('len(key1) * len(key2)')
output = context.get_formatted_string(input_string)
assert output == 28, (
"string interpolation incorrect")
def test_input_string_not_a_string_throw():
"""Interpolate takes string."""
with pytest.raises(TypeError) as err_info:
context = Context({'key1': 'value1'})
input_string = 77
context.get_formatted_string(input_string)
assert str(err_info.value) == (
"can only format on strings. 77 is a <class 'int'> instead.")
def test_get_formatted_iterable_list():
"""Simple list."""
input_obj = ['k1', 'k2', '{ctx3}', True, False, 44]
context = Context(
{'ctx1': 'ctxvalue1', 'ctx2': 'ctxvalue2', 'ctx3': 'ctxvalue3'})
output = context.get_formatted_iterable(input_obj)
assert output is not input_obj
assert output[0] == 'k1'
assert output[1] == 'k2'
assert output[2] == 'ctxvalue3'
assert output[3]
assert not output[4]
assert output[5] == 44
def test_get_formatted_iterable_tuple():
"""Simple tuple."""
input_obj = ('k1', 'k2', '{ctx3}', True, False, 44)
context = Context(
{'ctx1': 'ctxvalue1', 'ctx2': 'ctxvalue2', 'ctx3': 'ctxvalue3'})
output = context.get_formatted_iterable(input_obj)
assert output is not input_obj
assert output[0] == 'k1'
assert output[1] == 'k2'
assert output[2] == 'ctxvalue3'
assert output[3]
assert not output[4]
assert output[5] == 44
def test_get_formatted_iterable_set():
"""Simple set."""
input_obj = {'k1', 'k2', '{ctx3}', True, False, 44}
context = Context(
{'ctx1': 'ctxvalue1', 'ctx2': 'ctxvalue2', 'ctx3': 'ctxvalue3'})
output = context.get_formatted_iterable(input_obj)
assert output is not input_obj
assert len(output) == len(input_obj)
diffs = output - input_obj
assert len(diffs) == 1
assert 'ctxvalue3' in diffs
def test_get_formatted_immutable_mapping():
"""Simple read-only mapping test."""
class ReadOnlyMapping(typing.Mapping):
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __getitem__(self, key):
return self._data[key]
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
input_obj = {'key': '{ctx}'}
context = Context(
{'ctx': ReadOnlyMapping({'arb': 1})})
output = context.get_formatted_iterable(input_obj)
assert output is not input_obj
assert isinstance(output['key'], ReadOnlyMapping)
assert output['key'] == {'arb': 1}
def test_get_formatted_iterable_nested():
"""Straight deepish copy with no formatting."""
# dict containing dict, list, dict-list-dict, tuple, dict-tuple-list
input_obj = {'k1': 'v1',
'k2': 'v2',
'k3': 'v3',
'k4': [
1,
2,
'3here',
{'key4.1': 'value4.1',
'key4.2': 'value4.2',
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2value'}}
],
'k5': {'key5.1': 'value5.1', 'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77, 'sixend'),
'k7': 'simple string to close 7'
}
context = Context(
{'ctx1': 'ctxvalue1', 'ctx2': 'ctxvalue2', 'ctx3': 'ctxvalue3'})
output = context.get_formatted_iterable(input_obj)
assert output == input_obj
assert output is not context
# verify this was a deep copy - obj refs has to be different for nested
assert id(output['k4']) != id(input_obj['k4'])
assert id(output['k4'][3]['key4.3']) != id(input_obj['k4'][3]['key4.3'])
assert id(output['k5']) != id(input_obj['k5'])
assert id(output['k6']) != id(input_obj['k6'])
assert id(output['k6'][2]) != id(input_obj['k6'][2])
assert id(output['k7']) == id(input_obj['k7'])
# and proving the theory: mutating output does not touch input
assert output['k4'][1] == 2
output['k4'][1] = 88
assert input_obj['k4'][1] == 2
assert output['k4'][1] == 88
def test_get_formatted_iterable_nested_with_formatting():
"""Straight deepish copy with formatting."""
# dict containing dict, list, dict-list-dict, tuple, dict-tuple-list, bytes
input_obj = {'k1': 'v1',
'k2': 'v2_{ctx1}',
'k3': bytes('v3{ctx1}', encoding='utf-8'),
'k4': [
1,
2,
'3_{ctx4}here',
{'key4.1': 'value4.1',
'{ctx2}_key4.2': 'value_{ctx3}_4.2',
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_{ctx1}_value'}}
],
'k5': {'key5.1': 'value5.1', 'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77, 'six_{ctx1}_end'),
'k7': 'simple string to close 7'
}
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': 'ctxvalue3',
'ctx4': 'ctxvalue4'})
output = context.get_formatted_iterable(input_obj)
assert output != input_obj
# verify formatted strings
assert input_obj['k2'] == 'v2_{ctx1}'
assert output['k2'] == 'v2_ctxvalue1'
assert input_obj['k3'] == b'v3{ctx1}'
assert output['k3'] == b'v3{ctx1}'
assert input_obj['k4'][2] == '3_{ctx4}here'
assert output['k4'][2] == '3_ctxvalue4here'
assert input_obj['k4'][3]['{ctx2}_key4.2'] == 'value_{ctx3}_4.2'
assert output['k4'][3]['ctxvalue2_key4.2'] == 'value_ctxvalue3_4.2'
assert input_obj['k4'][3]['key4.3']['4.3.2'] == '4.3.2_{ctx1}_value'
assert output['k4'][3]['key4.3']['4.3.2'] == '4.3.2_ctxvalue1_value'
assert input_obj['k6'][4] == 'six_{ctx1}_end'
assert output['k6'][4] == 'six_ctxvalue1_end'
# verify this was a deep copy - obj refs has to be different for nested
assert id(output['k4']) != id(input_obj['k4'])
assert id(output['k4'][3]['key4.3']) != id(input_obj['k4'][3]['key4.3'])
assert id(output['k5']) != id(input_obj['k5'])
assert id(output['k6']) != id(input_obj['k6'])
assert id(output['k6'][2]) != id(input_obj['k6'][2])
# strings are interned in python, so id is the same
assert id(output['k7']) == id(input_obj['k7'])
output['k7'] = 'mutate 7 on new'
assert input_obj['k7'] == 'simple string to close 7'
assert output['k7'] == 'mutate 7 on new'
def test_get_formatted_iterable_nested_with_sic():
"""Straight deepish copy with formatting."""
# dict containing dict, list, dict-list-dict, tuple, dict-tuple-list, bytes
input_obj = {'k1': 'v1',
'k2': 'v2_{ctx1}',
'k3': bytes('v3{ctx1}', encoding='utf-8'),
'k4': [
1,
2,
'3_{ctx4}here',
{'key4.1': 'value4.1',
'{ctx2}_key4.2': SicString("value_{ctx3}_4.2"),
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_{ctx1}_value'}}
],
'k5': {'key5.1': 'value5.1', 'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77, 'six_{ctx1}_end'),
'k7': 'simple string to close 7'
}
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': 'ctxvalue3',
'ctx4': 'ctxvalue4'})
output = context.get_formatted_iterable(input_obj)
assert output != input_obj
# verify formatted strings
assert input_obj['k2'] == 'v2_{ctx1}'
assert output['k2'] == 'v2_ctxvalue1'
assert input_obj['k3'] == b'v3{ctx1}'
assert output['k3'] == b'v3{ctx1}'
assert input_obj['k4'][2] == '3_{ctx4}here'
assert output['k4'][2] == '3_ctxvalue4here'
assert input_obj['k4'][3]['{ctx2}_key4.2'] == SicString("value_{ctx3}_4.2")
assert output['k4'][3]['ctxvalue2_key4.2'] == 'value_{ctx3}_4.2'
assert input_obj['k4'][3]['key4.3']['4.3.2'] == '4.3.2_{ctx1}_value'
assert output['k4'][3]['key4.3']['4.3.2'] == '4.3.2_ctxvalue1_value'
assert input_obj['k6'][4] == 'six_{ctx1}_end'
assert output['k6'][4] == 'six_ctxvalue1_end'
# verify this was a deep copy - obj refs has to be different for nested
assert id(output['k4']) != id(input_obj['k4'])
assert id(output['k4'][3]['key4.3']) != id(input_obj['k4'][3]['key4.3'])
assert id(output['k5']) != id(input_obj['k5'])
assert id(output['k6']) != id(input_obj['k6'])
assert id(output['k6'][2]) != id(input_obj['k6'][2])
# strings are interned in python, so id is the same
assert id(output['k7']) == id(input_obj['k7'])
output['k7'] = 'mutate 7 on new'
assert input_obj['k7'] == 'simple string to close 7'
assert output['k7'] == 'mutate 7 on new'
def test_get_formatted_iterable_non_string_key():
"""Format context with non-strings in keys."""
input_obj = {'k1': 'v1',
'k2': 'v2_{ctx1}',
'k3': bytes('v3{ctx1}', encoding='utf-8'),
'k4': [
1,
2,
'3_{ctx4}here',
{'key4.1': 'value4.1',
'{ctx2}_key4.2': SicString("value_{ctx3}_4.2"),
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_{ctx1}_value',
7: '4.3.3_{ctx4}_value'}}
],
'k5': {'key5.1': 'value5.1', 'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77, 'six_{ctx1}_end'),
'k7': 'simple string to close 7',
6: {7, 89}
}
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': 'ctxvalue3',
'ctx4': 'ctxvalue4',
5: [1, 2, 3]})
output = context.get_formatted_iterable(input_obj)
assert output != input_obj
assert output == {'k1': 'v1',
'k2': 'v2_ctxvalue1',
'k3': bytes('v3{ctx1}', encoding='utf-8'),
'k4': [
1,
2,
'3_ctxvalue4here',
{'key4.1': 'value4.1',
'ctxvalue2_key4.2': "value_{ctx3}_4.2",
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_ctxvalue1_value',
7: '4.3.3_ctxvalue4_value'}}
],
'k5': {'key5.1': 'value5.1', 'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77,
'six_ctxvalue1_end'),
'k7': 'simple string to close 7',
6: {7, 89}
}
def test_get_formatted_iterable_with_memo():
"""Straight deepish copy with formatting."""
arb_dict = {'key4.1': 'value4.1',
'{ctx2}_key4.2': 'value_{ctx3}_4.2',
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_{ctx1}_value'}}
arb_list = [0, 1, 2]
arb_string = 'arb string'
arb_string_with_formatting = 'a {ctx1} string'
input_obj = {'k1': arb_string,
'k2': 'v2_{ctx1}',
'k3': arb_list,
'k4': [
arb_dict,
2,
'3_{ctx4}here',
arb_dict
],
'k5': {'key5.1': arb_string,
'key5.2': arb_string_with_formatting},
'k6': ('six6.1', False, arb_list, 77, 'six_{ctx1}_end'),
'k7': 'simple string to close 7',
'k8': arb_string_with_formatting
}
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': 'ctxvalue3',
'ctx4': 'ctxvalue4'})
output = context.get_formatted_iterable(input_obj)
# same obj re-used at different levels of the hierarchy
assert id(input_obj['k3']) == id(input_obj['k6'][2])
assert id(input_obj['k4'][0]) == id(input_obj['k4'][3])
assert output != input_obj
# verify formatted strings
assert input_obj['k2'] == 'v2_{ctx1}'
assert output['k2'] == 'v2_ctxvalue1'
assert input_obj['k4'][2] == '3_{ctx4}here'
assert output['k4'][2] == '3_ctxvalue4here'
assert input_obj['k4'][3]['{ctx2}_key4.2'] == 'value_{ctx3}_4.2'
assert output['k4'][3]['ctxvalue2_key4.2'] == 'value_ctxvalue3_4.2'
assert input_obj['k4'][3]['key4.3']['4.3.2'] == '4.3.2_{ctx1}_value'
assert output['k4'][3]['key4.3']['4.3.2'] == '4.3.2_ctxvalue1_value'
assert input_obj['k6'][4] == 'six_{ctx1}_end'
assert output['k6'][4] == 'six_ctxvalue1_end'
# verify this was a deep copy - obj refs has to be different for nested
assert id(output['k4']) != id(input_obj['k4'])
assert id(output['k4'][3]['key4.3']) != id(input_obj['k4'][3]['key4.3'])
assert id(output['k5']) != id(input_obj['k5'])
assert id(output['k6']) != id(input_obj['k6'])
assert id(output['k6'][2]) != id(input_obj['k6'][2])
assert id(output['k7']) == id(input_obj['k7'])
output['k7'] = 'mutate 7 on new'
assert input_obj['k7'] == 'simple string to close 7'
assert input_obj['k8'] == arb_string_with_formatting
assert output['k8'] == 'a ctxvalue1 string'
# memo did object re-use so same obj re-used at different levels of the
# hierarchy
assert id(output['k3']) == id(output['k6'][2])
assert id(output['k4']) != id(input_obj['k4'])
assert id(output['k4'][0]) == id(output['k4'][3])
assert output['k5']['key5.1'] == input_obj['k5']['key5.1'] == arb_string
assert id(output['k5']['key5.1']) == id(
input_obj['k5']['key5.1']) == id(arb_string)
assert id(output['k8']) == id(output['k5']['key5.2'])
assert id(output['k8']) != id(arb_string_with_formatting)
def test_iter_formatted():
"""On iter_formatted yields a formatted string on each loop."""
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': 'ctxvalue3',
'ctx4': 'ctxvalue4'})
input_strings = [
"this {ctx1} is {ctx2} line 1",
"this is {ctx3} line 2",
"this is line 3",
"this {ctx4} is line 4"
]
output = list(context.iter_formatted_strings(input_strings))
assert output[0] == "this ctxvalue1 is ctxvalue2 line 1"
assert output[1] == "this is ctxvalue3 line 2"
assert output[2] == "this is line 3"
assert output[3] == "this ctxvalue4 is line 4"
def test_get_formatted_as_type_string_to_bool_no_subst():
"""On get_formatted_as_type returns bool no formatting."""
context = Context()
result = context.get_formatted_as_type('False', out_type=bool)
assert isinstance(result, bool)
assert not result
def test_get_formatted_as_type_string_to_true_bool_no_subst():
"""On get_formatted_as_type returns bool no formatting."""
context = Context()
result = context.get_formatted_as_type('True', out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_false_no_subst():
"""On get_formatted_as_type returns bool no formatting."""
context = Context()
result = context.get_formatted_as_type(False, out_type=bool)
assert isinstance(result, bool)
assert not result
def test_get_formatted_as_type_bool_true_no_subst():
"""On get_formatted_as_type returns bool no formatting."""
context = Context()
result = context.get_formatted_as_type(None, True, out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_false_with_subst():
"""On get_formatted_as_type returns bool with formatting."""
context = Context({'k1': False})
result = context.get_formatted_as_type(None, '{k1}', out_type=bool)
assert isinstance(result, bool)
assert not result
def test_get_formatted_as_type_bool_true_with_subst():
"""On get_formatted_as_type returns bool with formatting."""
context = Context({'k1': True})
result = context.get_formatted_as_type(None, '{k1}', out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_true_with_list_input():
"""On get_formatted_as_type returns bool True with arbitrary input."""
context = Context({'k1': True})
result = context.get_formatted_as_type([0, 1, 2], out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_false_with_empty_list_input():
"""On get_formatted_as_type returns bool false with empty input."""
context = Context({'k1': True})
result = context.get_formatted_as_type([], out_type=bool)
assert isinstance(result, bool)
assert not result
def test_get_formatted_as_type_bool_false_with_0_input():
"""On get_formatted_as_type returns bool False with 0 input."""
context = Context({'k1': True})
result = context.get_formatted_as_type(0, out_type=bool)
assert isinstance(result, bool)
assert not result
def test_get_formatted_as_type_bool_false_with_string_capital_false():
"""On get_formatted_as_type returns bool False with string FALSE."""
context = Context({'k1': True})
result = context.get_formatted_as_type('FALSE', out_type=bool)
assert isinstance(result, bool)
assert not result
def test_get_formatted_as_type_bool_true_with_1_input():
"""On get_formatted_as_type returns bool True with int 1 input."""
context = Context({'k1': True})
result = context.get_formatted_as_type(1, out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_true_with_decimal_input():
"""On get_formatted_as_type returns bool True with decimal input."""
context = Context({'k1': True})
result = context.get_formatted_as_type(1.1, out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_true_with_str_true():
"""On get_formatted_as_type returns bool True with string true."""
context = Context({'k1': True})
result = context.get_formatted_as_type('true', out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_true_with_str_capital_true():
"""On get_formatted_as_type returns bool True with string TRUE."""
context = Context({'k1': True})
result = context.get_formatted_as_type('TRUE', out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_true_with_str_1_true():
"""On get_formatted_as_type returns bool True with string 1."""
context = Context({'k1': True})
result = context.get_formatted_as_type('1', out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_true_with_pystring_true():
"""On get_formatted_as_type returns bool True with py string True."""
context = Context({'k1': True})
result = context.get_formatted_as_type(PyString('k1 and True'),
out_type=bool)
assert isinstance(result, bool)
assert result
def test_get_formatted_as_type_bool_false_with_pystring_false():
"""On get_formatted_as_type returns bool True with py string True."""
context = Context({'k1': True})
result = context.get_formatted_as_type(PyString('not k1'), out_type=bool)
assert isinstance(result, bool)
assert not result
def test_get_formatted_as_type_int_no_subst():
"""On get_formatted_as_type returns int no formatting."""
context = Context()
result = context.get_formatted_as_type('10', out_type=int)
assert isinstance(result, int)
assert result == 10
def test_get_formatted_as_type_int_with_subst():
"""On get_formatted_as_type returns int no formatting."""
context = Context({'k1': 10})
result = context.get_formatted_as_type('{k1}', out_type=int)
assert isinstance(result, int)
assert result == 10
def test_get_formatted_as_type_float_no_subst():
"""On get_formatted_as_type returns float no formatting."""
context = Context()
result = context.get_formatted_as_type('10.1', out_type=float)
assert isinstance(result, float)
assert result == 10.1
def test_get_formatted_as_type_default_no_subst():
"""On get_formatted_as_type returns default no formatting."""
context = Context()
result = context.get_formatted_as_type(None, default=10, out_type=int)
assert isinstance(result, int)
assert result == 10
def test_get_formatted_as_type_default_with_subst():
"""On get_formatted_as_type returns default with formatting."""
context = Context({'k1': 10})
result = context.get_formatted_as_type(
None, default='{k1}', out_type=int)
assert isinstance(result, int)
assert result == 10
def test_get_formatted_as_type_default_with_subst_str():
"""On get_formatted_as_type returns default with formatting."""
context = Context({'k1': 10})
result = context.get_formatted_as_type(
None, default='xx{k1}xx')
assert isinstance(result, str)
assert result == 'xx10xx'
def test_get_formatted_value_string():
"""Format input strings."""
context = Context({'k1': 10})
assert context.get_formatted_value('{k1}') == 10
def test_get_formatted_value_int():
"""Format input int."""
context = Context({'k1': 10})
assert context.get_formatted_value(11) == 11
def test_get_formatted_value_pystring():
"""Format input pystring."""
context = Context({'k1': 10})
out = context.get_formatted_value(PyString('11'))
assert out == 11
assert isinstance(out, int)
def test_get_formatted_value_bool():
"""Format input int."""
context = Context({'k1': 10})
assert not context.get_formatted_value(False)
def test_get_formatted_value_dict():
"""Format input dict."""
context = Context({'k1': 10})
assert context.get_formatted_value({'{k1}', 12}) == {10, 12}
def test_get_formatted_value_list():
"""Format input list."""
context = Context({'k1': 10})
assert context.get_formatted_value(['{k1}', 12, 13]) == [10, 12, 13]
def test_get_processed_string_no_interpolation():
"""On get_processed_string on plain string returns plain."""
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': 'ctxvalue3',
'ctx4': 'ctxvalue4'})
input_string = 'test string here'
output = context.get_processed_string(input_string)
assert input_string == output
def test_get_processed_string_with_interpolation():
"""Process string with interpolation."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = 'Piping {key1} the {key2} wild'
output = context.get_processed_string(input_string)
assert output == 'Piping down the valleys wild', (
"string interpolation incorrect")
def test_get_processed_string_shorter_than_6_with_interpolation():
"""Process string with interpolation."""
context = Context({'k': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = '{k}'
output = context.get_processed_string(input_string)
assert output == 'down', (
"string interpolation incorrect")
def test_get_processed_string_shorter_than_6_no_interpolation():
"""Process string with no interpolation."""
context = Context()
input_string = 'k'
output = context.get_processed_string(input_string)
assert output == 'k', (
"string interpolation incorrect")
def test_get_processed_string_sic_skips_interpolation():
"""Process string with sic interpolation."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = SicString("Piping {key1} the {key2} wild")
output = context.get_processed_string(input_string)
assert output == 'Piping {key1} the {key2} wild', (
"string interpolation incorrect")
def test_get_processed_string_pystring_double_quote():
"""Process string with double quotes interpolation."""
context = Context({'key1': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = PyString("key1 == 'down'")
output = context.get_processed_string(input_string)
assert isinstance(output, bool)
assert output
def test_get_processed_string_pystring_single_quote():
"""Process string with py string interpolation."""
context = Context({'key1': 2, 'key2': -3, 'key3': 'value3'})
input_string = PyString('abs(key1+key2)')
output = context.get_processed_string(input_string)
assert isinstance(output, int)
assert output == 1
def test_get_processed_string_single_expression_keeps_type():
"""Process string with interpolation honors type."""
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': [0, 1, 3],
'ctx4': 'ctxvalue4'})
input_string = '{ctx3}'
output = context.get_processed_string(input_string)
assert output == [0, 1, 3]
assert isinstance(output, list)
def test_get_processed_string_single_expression_keeps_type_and_iterates():
"""Process string with interpolation on iterable."""
context = Context(
{'ctx1': 'ctxvalue1',
'ctx2': 'ctxvalue2',
'ctx3': [0,
{'s1': 'v1',
'{ctx1}': '{ctx2}',
's3': [0, '{ctx4}']}, 3],
'ctx4': 'ctxvalue4'})
input_string = '{ctx3}'
output = context.get_processed_string(input_string)
assert output == [0,
{'s1': 'v1',
'ctxvalue1': 'ctxvalue2',
's3': [0, 'ctxvalue4']}, 3]
def test_get_processed_string_leading_literal():
"""Process string with interpolation leading literal."""
context = Context({'k': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = 'leading literal{k}'
output = context.get_processed_string(input_string)
assert output == 'leading literaldown', (
"string interpolation incorrect")
def test_get_processed_string_following_literal():
"""Process string with interpolation literal end."""
context = Context({'k': 'down', 'key2': 'valleys', 'key3': 'value3'})
input_string = '{k}following literal'
output = context.get_processed_string(input_string)
assert output == 'downfollowing literal', (
"string interpolation incorrect")
# ------------------- formats ------------------------------------------------#
# ------------------- key info -----------------------------------------------#
def test_key_in_context():
"""Assert key in context."""
context = Context({'k1': 'v1', 'k2': False, 'k3': ['one', 'two']})
k1, = context.keys_exist('k1')
assert k1
k1, k2, k3 = context.keys_exist('k1', 'k2', 'k3')
assert k1 and k2 and k3
k4, k2, k1 = context.keys_exist('k4', 'k2', 'k1')
assert k1 and k2 and not k4
def test_keys_of_type_exist_single():
"""Assert key in context."""
"""return a single tuple."""
context = Context({'k1': 'v1', 'k2': False, 'k3': ['one', 'two']})
k1, = context.keys_of_type_exist(('k1', str),)
assert k1
assert k1.key == 'k1'
assert k1.key_in_context
assert k1.expected_type is str
assert k1.is_expected_type
assert k1.has_value
def test_keys_of_type_exist_triple():
"""Assert key in context."""
context = Context({'k1': 'v1', 'k2': False, 'k3': ['one', 'two']})
k3, k2, k1 = context.keys_of_type_exist(
('k3', list),
('k2', list),
('k1', str)
)
assert k1
assert k1.key == 'k1'
assert k1.key_in_context
assert k1.expected_type is str
assert k1.is_expected_type
assert k1.has_value
assert k2
assert k2.key == 'k2'
assert k2.key_in_context
assert k2.expected_type is list
assert not k2.is_expected_type
assert k2.has_value
assert k3
assert k3.key == 'k3'
assert k3.key_in_context
assert k3.expected_type is list
assert k3.is_expected_type
assert k3.has_value
def test_keys_none_exist():
"""Assert key not in context."""
context = Context({'k1': 'v1', 'k2': False, 'k3': ['one', 'two']})
k4, = context.keys_of_type_exist(
('k4', list)
)
k5, k6 = context.keys_of_type_exist(
('k5', bool),
('k6', list),
)
assert k4
assert k4.key == 'k4'
assert not k4.key_in_context
assert k4.expected_type is list
assert k4.is_expected_type is None
assert not k4.has_value
assert k5
assert k5.key == 'k5'
assert not k5.key_in_context
assert k5.expected_type is bool
assert k5.is_expected_type is None
assert not k5.has_value
assert k6
assert k6.key == 'k6'
assert not k6.key_in_context
assert k6.expected_type is list
assert k6.is_expected_type is None
assert not k6.has_value
# ------------------- key info -----------------------------------------------#
# ------------------- merge --------------------------------------------------#
def test_merge_pass_no_substitutions():
"""Merge success case with no substitutions."""
context = Context({
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
6: 6
})
add_me = {
'key2': 'value4',
'key4': 'value5'
}
context.merge(add_me)
assert context['key1'] == 'value1'
assert context['key2'] == 'value4'
assert context['key3'] == 'value3'
assert context['key4'] == 'value5'
assert context[6] == 6
def test_merge_pass_nested_with_substitutions():
"""Merge success case with nested hierarchy and substitutions."""
context = Context({
'key1': 'value1',
'key2': 'value2',
'key3': {
'k31': 'value31',
'k32': 'value32',
},
'key5': False,
15: 16
})
add_me = {
'key2': 'value4',
'key3': {
'k33': 'value33'
},
'key4': '444_{key1}_444',
'key5': {
'k51': PyString('key1')
},
13: 14,
15: 17
}
context.merge(add_me)
assert context == {
'key1': 'value1',
'key2': 'value4',
'key3': {
'k31': 'value31',
'k32': 'value32',
'k33': 'value33'
},
'key4': '444_value1_444',
'key5': {
'k51': 'value1'
},
13: 14,
15: 17
}
def test_merge_pass_no_recognized_type():
"""Merge success case where type not known mergable."""
arb_obj = TimeoutError('blah')
context = Context({
'key1': 'value1',
'key2': 'value2',
'key3': {
'k31': 'value31',
'k32': 'value32',
},
'key5': TimeoutError('boom')})
add_me = {
'key2': 'value4',
'key3': {
'k33': 'value33'
},
'key4': '444_{key1}_444',
'key5': arb_obj
}
context.merge(add_me)
assert context == {
'key1': 'value1',
'key2': 'value4',
'key3': {
'k31': 'value31',
'k32': 'value32',
'k33': 'value33'
},
'key4': '444_value1_444',
'key5': arb_obj
}
def test_merge_pass_nested_with_types():
"""Merge success case with nested hierarchy, substitutions, diff types."""
context = Context({
'k1': 'v1',
'k2': 'v2_{ctx1}',
'k3': bytes('v3{ctx1}', encoding='utf-8'),
'k4': [
1,
2,
'3_{ctx4}here',
{'key4.1': 'value4.1',
'{ctx2}_key4.2': 'value_{ctx3}_4.2',
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_{ctx1}_value'}}
],
'k5': {'key5.1': {'kv511': 'value5.1'}, 'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77, 'six_{ctx1}_end'),
'k7': 'simple string to close 7',
'k8': ('tuple1', 'tuple2'),
'k9': {'set1', 'set2'},
'k10': (
1,
2,
{'10.1': '10.1v',
'10.2': '{10.2v}',
},
3),
'k11': {
'k11.1': '11.1v',
'k11.2': {
'k11.2.1': '11.2.1v',
'k11.2.2': {
'k11.2.2.1': '11.2.2.1v'
},
},
},
'k12': 'end'
}
)
add_me = {
'k4': [
4.4,
{'key4.3': {
'4.3.1': 'merged value for 4.3.1'
}
}
],
'k5': {
'key5.1': {
'kv522': 'kv522 from merge {k1}'
}},
'k8': ('tuple3', ),
'k9': {'set3', },
'k10': ({
'{k1}': [0,
1,
2,
(
'tuple in list in dict in tuple in dict',
'hello {k2}',
{'k1': '{k1}'}
),
[0, 1, 2, '{k1}', 3, (True, False), ['00', '{k1}']],
4]
},
4),
'k11': {
'k11.2': {
'k11.2.2': {
'add me': '{k1}'
},
},
},
}
context.merge(add_me)
assert context == {
'k1': 'v1',
'k2': 'v2_{ctx1}',
'k3': bytes('v3{ctx1}', encoding='utf-8'),
'k4': [
1,
2,
'3_{ctx4}here',
{'key4.1': 'value4.1',
'{ctx2}_key4.2': 'value_{ctx3}_4.2',
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_{ctx1}_value'}},
4.4,
{'key4.3': {
'4.3.1': 'merged value for 4.3.1'
}
}
],
'k5': {
'key5.1': {
'kv511': 'value5.1',
'kv522': 'kv522 from merge v1'
},
'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77, 'six_{ctx1}_end'),
'k7': 'simple string to close 7',
'k8': ('tuple1', 'tuple2', 'tuple3'),
'k9': {'set1', 'set2', 'set3'},
'k10': (
1,
2,
{'10.1': '10.1v',
'10.2': '{10.2v}',
},
3,
{'v1': [0,
1,
2,
(
'tuple in list in dict in tuple in dict',
'hello v2_{ctx1}',
{'k1': 'v1'}
),
[0, 1, 2, 'v1', 3, (True, False), ['00', 'v1']],
4]
},
4
),
'k11': {
'k11.1': '11.1v',
'k11.2': {
'k11.2.1': '11.2.1v',
'k11.2.2': {
'k11.2.2.1': '11.2.2.1v',
'add me': 'v1'
},
},
},
'k12': 'end'
}
def test_merge_interpolate_py():
"""Merge with interpolate."""
context = Context()
context.merge({"key": PyString("True")})
assert context["key"] is True
def test_merge_replaced_by_interpolated_py_mapping():
"""Merge with interpolate py string."""
context = Context({'key': {'b': 2}})
context.merge({"key": PyString("{'a': 1}")})
assert context["key"] == {'a': 1}
def test_merge_interpolate_py_with_substitutions():
"""Merge with interpolate substitutions."""
context = Context({"key": False})
context.merge({"key": PyString("5")})
assert context["key"] == 5
def test_merge_non_string_keys():
"""Merge when key is not string."""
context = Context({1: False, 2: 'two', 3: '{two}'})
context.merge({2: 'merged'})
assert context == {1: False, 2: 'merged', 3: '{two}'}
def test_merge_key_substitutions():
"""Merge when keys substitute."""
context = Context({'k1': 'v1', 'k2': 'k1', 'k3': 'value3'})
context.merge({'{k2}': 'newvalue', '{k1}': 'k1merged', '{k3}': '3new'})
# notice that k1 resolves to newvalue because it evaluates after k2 merge.
assert context == {'k1': 'newvalue',
'k2': 'k1',
'newvalue': 'k1merged',
'k3': 'value3',
'value3': '3new'}
# ------------------- merge --------------------------------------------------#
# ------------------- set_defaults -------------------------------------------#
def test_set_defaults_pass_no_substitutions():
"""Defaults success case with no substitutions."""
context = Context({
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
})
add_me = {
'key2': 'value4',
'key4': 'value5'
}
context.set_defaults(add_me)
assert context['key1'] == 'value1'
# since key2 exists already, shouldn't update
assert context['key2'] == 'value2'
assert context['key3'] == 'value3'
assert context['key4'] == 'value5'
def test_set_defaults_pass_nested_with_substitutions():
"""Merge success case with nested hierarchy and substitutions."""
context = Context({
'key1': 'value1',
'key2': 'value2',
'key3': {
'k31': 'value31',
'k32': 'value32',
}})
add_me = {
'key2': 'value4',
'key3': {
'k33': 'value33'
},
'key4': '444_{key1}_444'
}
context.set_defaults(add_me)
assert context == {
'key1': 'value1',
'key2': 'value2',
'key3': {
'k31': 'value31',
'k32': 'value32',
'k33': 'value33'
},
'key4': '444_value1_444'
}
def test_set_defaults_pass_nested_with_types():
"""Defaults with nested hierarchy, substitutions, diff types."""
context = Context({
'k1': 'v1',
'k2': 'v2_{ctx1}',
'k3': bytes('v3{ctx1}', encoding='utf-8'),
'k4': [
1,
2,
'3_{ctx4}here',
{'key4.1': 'value4.1',
'{ctx2}_key4.2': 'value_{ctx3}_4.2',
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_{ctx1}_value'}}
],
'k5': {'key5.1': {'kv511': 'value5.1'}, 'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77, 'six_{ctx1}_end'),
'k7': 'simple string to close 7',
'k8': ('tuple1', 'tuple2'),
'k9': {'set1', 'set2'},
'k10': (
1,
2,
{'10.1': '10.1v',
'10.2': '{10.2v}',
},
3),
'k11': {
'k11.1': '11.1v',
'k11.2': {
'k11.2.1': '11.2.1v',
'k11.2.2': {
'k11.2.2.1': '11.2.2.1v'
},
},
},
'k12': 'end'
}
)
add_me = {
'k4': [
4.4,
{'key4.3': {
'4.3.1': 'merged value for 4.3.1'
}
}
],
'k5': {
'key5.1': {
'kv522': 'kv522 from merge {k1}'
}},
'k8': ('tuple3', ),
'k9': {'set3', },
'k10': ({
'{k1}': [0,
1,
2,
(
'tuple in list in dict in tuple in dict',
'hello {k2}',
{'k1': '{k1}'}
),
[0, 1, 2, '{k1}', 3, (True, False), ['00', '{k1}']],
4]
},
4),
'k11': {
'k11.2': {
'k11.2.2': {
'add me': '{k1}'
},
},
},
}
context.set_defaults(add_me)
assert context == {
'k1': 'v1',
'k2': 'v2_{ctx1}',
'k3': bytes('v3{ctx1}', encoding='utf-8'),
'k4': [
1,
2,
'3_{ctx4}here',
{'key4.1': 'value4.1',
'{ctx2}_key4.2': 'value_{ctx3}_4.2',
'key4.3': {
'4.3.1': '4.3.1value',
'4.3.2': '4.3.2_{ctx1}_value'}}
],
'k5': {'key5.1': {'kv511': 'value5.1',
'kv522': 'kv522 from merge v1'},
'key5.2': 'value5.2'},
'k6': ('six6.1', False, [0, 1, 2], 77, 'six_{ctx1}_end'),
'k7': 'simple string to close 7',
'k8': ('tuple1', 'tuple2'),
'k9': {'set1', 'set2'},
'k10': (
1,
2,
{'10.1': '10.1v',
'10.2': '{10.2v}',
},
3),
'k11': {
'k11.1': '11.1v',
'k11.2': {
'k11.2.1': '11.2.1v',
'k11.2.2': {
'k11.2.2.1': '11.2.2.1v',
'add me': 'v1'
},
},
},
'k12': 'end'
}
# ------------------- set_defaults -------------------------------------------#
| 32.194112
| 79
| 0.547794
|
794ac2b2d01c183e0439a705180002060790a073
| 1,958
|
py
|
Python
|
verilator/detect2600_verilator/check.py
|
tacertain/Tutorials_MiSTer
|
ffa0c2d01d7c40b2dce893470e9144d39644f321
|
[
"Apache-2.0"
] | 50
|
2019-02-05T22:58:19.000Z
|
2022-03-15T05:15:23.000Z
|
verilator/detect2600_verilator/check.py
|
tacertain/Tutorials_MiSTer
|
ffa0c2d01d7c40b2dce893470e9144d39644f321
|
[
"Apache-2.0"
] | 3
|
2021-05-03T08:18:06.000Z
|
2021-08-09T01:27:48.000Z
|
verilator/detect2600_verilator/check.py
|
tacertain/Tutorials_MiSTer
|
ffa0c2d01d7c40b2dce893470e9144d39644f321
|
[
"Apache-2.0"
] | 15
|
2019-09-03T10:18:43.000Z
|
2022-01-19T06:09:41.000Z
|
import csv
import subprocess
size_table = { "00008400":"--","00006300":"--","00008000":"--","00010000":"--","00000800": "2K", "00001000" : "4K" }
cart_data = []
md5_name = {}
missing = []
match = []
incorrect = []
with open('romschecklist.chk') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
md5_name[row[0]]=row[1];
#print(md5_name)
with open('2600mapper.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count!=0:
the_cart = {}
print(row)
the_cart['MD5']=row[3]
the_cart['MAPPER']=row[4]
try:
the_cart['NAME']=md5_name[row[3]]
cart_data.append(the_cart)
except:
print("missing",row[0])
the_cart['NAME']=row[0]
missing.append(the_cart)
line_count=line_count+1;
print(cart_data)
for row in cart_data:
cart='Roms-1/'+row['NAME']
print(cart)
result=subprocess.check_output(['tmp/Vtop', cart])
print(result,row['MAPPER'])
x = csv.reader(result.split('\n'),delimiter=',')
mapper=''
size=''
line_count = 0
for y in x:
if (line_count==0):
# pull the size out
size=y[3]
sc=y[5]
print(y)
elif (line_count==1):
print(y)
mapper = y[1]
line_count=line_count+1;
if (mapper=="00"):
mapper=size_table[size]
if (sc=='1'):
mapper = mapper +"SC"
if (row['MAPPER']==mapper):
print('FOUND')
match.append(row)
else:
print('NOMATCH')
row['INCORRECT']=mapper
incorrect.append(row)
with open('missing.txt', 'w') as f:
for item in missing:
f.write("%s\n" % item)
with open('match.txt', 'w') as f:
for item in match:
f.write("%s\n" % item)
with open('incorrect.txt', 'w') as f:
for item in incorrect:
f.write("%s\n" % item)
| 20.610526
| 116
| 0.55618
|
794ac36bc53f15d59f7ef701d279b3402d7bc92d
| 1,034
|
py
|
Python
|
_/Chapter 7/myshop/orders/views.py
|
paullewallencom/django-978-1-7872-8366-4
|
8677b798412cb28389ecf211b8af9692bb34bfcc
|
[
"Apache-2.0"
] | 26
|
2019-10-04T20:37:43.000Z
|
2021-11-15T19:54:29.000Z
|
_/Chapter 7/myshop/orders/views.py
|
paullewallencom/django-978-1-7872-8366-4
|
8677b798412cb28389ecf211b8af9692bb34bfcc
|
[
"Apache-2.0"
] | 1
|
2022-01-14T11:29:11.000Z
|
2022-01-14T11:29:11.000Z
|
_/Chapter 7/myshop/orders/views.py
|
paullewallencom/django-978-1-7872-8366-4
|
8677b798412cb28389ecf211b8af9692bb34bfcc
|
[
"Apache-2.0"
] | 29
|
2019-05-19T11:43:02.000Z
|
2021-11-16T13:05:30.000Z
|
from django.shortcuts import render
from .models import OrderItem
from .forms import OrderCreateForm
from .tasks import order_created
from cart.cart import Cart
def order_create(request):
cart = Cart(request)
if request.method == 'POST':
form = OrderCreateForm(request.POST)
if form.is_valid():
order = form.save()
for item in cart:
OrderItem.objects.create(order=order,
product=item['product'],
price=item['price'],
quantity=item['quantity'])
# clear the cart
cart.clear()
# launch asynchronous task
order_created.delay(order.id)
return render(request, 'orders/order/created.html', {'order': order})
else:
form = OrderCreateForm()
return render(request, 'orders/order/create.html', {'cart': cart,
'form': form})
| 36.928571
| 81
| 0.526112
|
794ac4488173e551ff7ebec54fe253c853aa3281
| 35,062
|
py
|
Python
|
pyzoo/test/zoo/pipeline/nnframes/test_nn_classifier.py
|
jiaxinying/analytics-zoo
|
c3669b1736088df911c84b38fde3e90a571f51b7
|
[
"Apache-2.0"
] | 4
|
2018-06-19T05:38:30.000Z
|
2020-06-22T14:26:26.000Z
|
pyzoo/test/zoo/pipeline/nnframes/test_nn_classifier.py
|
jiaxinying/analytics-zoo
|
c3669b1736088df911c84b38fde3e90a571f51b7
|
[
"Apache-2.0"
] | 5
|
2021-06-08T23:28:18.000Z
|
2022-02-10T05:31:27.000Z
|
pyzoo/test/zoo/pipeline/nnframes/test_nn_classifier.py
|
jiaxinying/analytics-zoo
|
c3669b1736088df911c84b38fde3e90a571f51b7
|
[
"Apache-2.0"
] | 1
|
2018-09-05T02:16:10.000Z
|
2018-09-05T02:16:10.000Z
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import errno
import shutil
import pytest
from bigdl.nn.criterion import *
from bigdl.nn.layer import *
from bigdl.optim.optimizer import *
from numpy.testing import assert_allclose
from pyspark.ml import Pipeline, PipelineModel
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml.tuning import ParamGridBuilder
from pyspark.sql.types import *
from zoo.common.nncontext import *
from zoo.feature.common import *
from zoo.feature.image import *
from zoo.pipeline.api.keras import layers as ZLayer
from zoo.pipeline.api.keras.models import Model as ZModel
from zoo.pipeline.api.keras.optimizers import Adam as KAdam
from zoo.pipeline.nnframes import *
from zoo.util.tf import *
class TestNNClassifer():
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
sparkConf = init_spark_conf().setMaster("local[1]").setAppName("testNNClassifer")
self.sc = init_nncontext(sparkConf)
self.sqlContext = SQLContext(self.sc)
assert(self.sc.appName == "testNNClassifer")
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
self.sc.stop()
def get_estimator_df(self):
data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", ArrayType(DoubleType(), False), False)])
df = self.sqlContext.createDataFrame(data, schema)
return df
def get_classifier_df(self):
data = self.sc.parallelize([
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0),
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0)])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", DoubleType(), False)])
df = self.sqlContext.createDataFrame(data, schema)
return df
def get_pipeline_df(self):
data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0), 1.0),
((1.0, 2.0), (2.0, 1.0), 2.0),
((2.0, 1.0), (1.0, 2.0), 1.0),
((1.0, 2.0), (2.0, 1.0), 2.0)])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label1", ArrayType(DoubleType(), False), False),
StructField("label2", DoubleType(), False)])
df = self.sqlContext.createDataFrame(data, schema)
return df
def test_nnEstimator_construct_with_differnt_params(self):
linear_model = Sequential().add(Linear(2, 2))
mse_criterion = MSECriterion()
df = self.get_estimator_df()
for e in [NNEstimator(linear_model, mse_criterion),
NNEstimator(linear_model, mse_criterion, [2], [2]),
NNEstimator(linear_model, mse_criterion, SeqToTensor([2]), SeqToTensor([2]))]:
nnModel = e.setBatchSize(4).setMaxEpoch(1).fit(df)
res = nnModel.transform(df)
assert type(res).__name__ == 'DataFrame'
def test_nnClassifier_construct_with_differnt_params(self):
linear_model = Sequential().add(Linear(2, 2))
mse_criterion = MSECriterion()
df = self.get_classifier_df()
for e in [NNClassifier(linear_model, mse_criterion),
NNClassifier(linear_model, mse_criterion, [2]),
NNClassifier(linear_model, mse_criterion, SeqToTensor([2]))]:
nnModel = e.setBatchSize(4).setMaxEpoch(1).fit(df)
res = nnModel.transform(df)
assert type(res).__name__ == 'DataFrame'
def test_nnModel_construct_with_differnt_params(self):
linear_model = Sequential().add(Linear(2, 2))
df = self.get_estimator_df()
for e in [NNModel(linear_model),
NNModel(linear_model, [2]),
NNModel(linear_model, SeqToTensor([2]))]:
res = e.transform(df)
assert type(res).__name__ == 'DataFrame'
assert e.getBatchSize() == 4
def test_nnClassiferModel_construct_with_differnt_params(self):
linear_model = Sequential().add(Linear(2, 2))
df = self.get_classifier_df()
for e in [NNClassifierModel(linear_model),
NNClassifierModel(linear_model, [2]),
NNClassifierModel(linear_model, SeqToTensor([2]))]:
res = e.transform(df)
assert type(res).__name__ == 'DataFrame'
assert e.getBatchSize() == 4
def test_all_set_get_methods(self):
linear_model = Sequential().add(Linear(2, 2))
mse_criterion = MSECriterion()
estimator = NNEstimator(linear_model, mse_criterion, SeqToTensor([2]), SeqToTensor([2]))
assert estimator.setBatchSize(30).getBatchSize() == 30
assert estimator.setMaxEpoch(40).getMaxEpoch() == 40
assert estimator.setLearningRate(1e-4).getLearningRate() == 1e-4
assert estimator.setFeaturesCol("abcd").getFeaturesCol() == "abcd"
assert estimator.setLabelCol("xyz").getLabelCol() == "xyz"
assert isinstance(estimator.setOptimMethod(Adam()).getOptimMethod(), Adam)
nn_model = NNModel(linear_model, SeqToTensor([2]))
assert nn_model.setBatchSize(20).getBatchSize() == 20
linear_model = Sequential().add(Linear(2, 2))
classNLL_criterion = ClassNLLCriterion()
classifier = NNClassifier(linear_model, classNLL_criterion, SeqToTensor([2]))
assert classifier.setBatchSize(20).getBatchSize() == 20
assert classifier.setMaxEpoch(50).getMaxEpoch() == 50
assert classifier.setLearningRate(1e-5).getLearningRate() == 1e-5
assert classifier.setLearningRateDecay(1e-9).getLearningRateDecay() == 1e-9
assert classifier.setCachingSample(False).isCachingSample() is False
nn_classifier_model = NNClassifierModel(linear_model, SeqToTensor([2]))
assert nn_classifier_model.setBatchSize((20)).getBatchSize() == 20
def test_nnEstimator_fit_nnmodel_transform(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion, SeqToTensor([2]), ArrayToTensor([2]))\
.setBatchSize(4).setLearningRate(0.2).setMaxEpoch(40)
df = self.get_estimator_df()
nnModel = estimator.fit(df)
assert nnModel.getBatchSize() == 4
res = nnModel.transform(df)
assert type(res).__name__ == 'DataFrame'
res.registerTempTable("nnModelDF") # Compatible with spark 1.6
results = self.sqlContext.table("nnModelDF")
count = results.rdd.count()
data = results.rdd.collect()
for i in range(count):
row_label = data[i][1]
row_prediction = data[i][2]
assert_allclose(row_label[0], row_prediction[0], atol=0, rtol=1e-1)
assert_allclose(row_label[1], row_prediction[1], atol=0, rtol=1e-1)
def test_nnEstimator_fit_gradient_clipping(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion, SeqToTensor([2]), ArrayToTensor([2])) \
.setBatchSize(4).setLearningRate(0.2).setMaxEpoch(2)\
.setConstantGradientClipping(0.1, 0.2)
df = self.get_estimator_df()
estimator.fit(df)
estimator.clearGradientClipping()
estimator.fit(df)
estimator.setGradientClippingByL2Norm(1.2)
estimator.fit(df)
def test_nnEstimator_fit_with_Cache_Disk(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion, SeqToTensor([2]), ArrayToTensor([2])) \
.setBatchSize(1).setLearningRate(0.2).setMaxEpoch(2) \
.setDataCacheLevel("DISK_AND_DRAM", 2)
data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))] * 10)
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", ArrayType(DoubleType(), False), False)])
df = self.sqlContext.createDataFrame(data, schema)
estimator.fit(df)
def test_nnEstimator_fit_with_non_default_featureCol(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion, SeqToTensor([2]), SeqToTensor([2]))\
.setBatchSize(4)\
.setLearningRate(0.01).setMaxEpoch(1) \
.setFeaturesCol("abcd").setLabelCol("xyz").setPredictionCol("tt")
data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))])
schema = StructType([
StructField("abcd", ArrayType(DoubleType(), False), False),
StructField("xyz", ArrayType(DoubleType(), False), False)])
df = self.sqlContext.createDataFrame(data, schema)
nnModel = estimator.fit(df)
res = nnModel.transform(df)
assert type(res).__name__ == 'DataFrame'
assert res.select("abcd", "xyz", "tt").count() == 4
def test_nnEstimator_fit_with_different_OptimMethods(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion, SeqToTensor([2]), SeqToTensor([2]))\
.setBatchSize(4)\
.setLearningRate(0.01).setMaxEpoch(1) \
.setPredictionCol("tt")
df = self.get_estimator_df()
for opt in [SGD(learningrate=1e-3, learningrate_decay=0.0,),
Adam(), LBFGS(), Adagrad(), Adadelta()]:
nnModel = estimator.setOptimMethod(opt).fit(df)
res = nnModel.transform(df)
assert type(res).__name__ == 'DataFrame'
assert res.select("features", "label", "tt").count() == 4
def test_nnEstimator_fit_with_adam_lr_schedile(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
df = self.get_estimator_df()
nnModel = NNEstimator(model, criterion, SeqToTensor([2]), SeqToTensor([2])) \
.setBatchSize(4) \
.setLearningRate(0.01).setMaxEpoch(1) \
.setPredictionCol("tt") \
.setOptimMethod(KAdam(
schedule=Plateau("Loss", factor=0.1, patience=2, mode="min", epsilon=0.01,
cooldown=0, min_lr=1e-15))) \
.fit(df)
res = nnModel.transform(df)
assert type(res).__name__ == 'DataFrame'
def test_nnEstimator_create_with_feature_size(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion, [2], [2])\
.setBatchSize(4).setLearningRate(0.2).setMaxEpoch(1)
df = self.get_estimator_df()
nnModel = estimator.fit(df)
assert nnModel.getBatchSize() == 4
def test_nnEstimator_fit_with_train_val_summary(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))])
val_data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", ArrayType(DoubleType(), False), False)])
df = self.sqlContext.createDataFrame(data, schema)
val_df = self.sqlContext.createDataFrame(val_data, schema)
tmp_dir = tempfile.mkdtemp()
train_summary = TrainSummary(log_dir=tmp_dir, app_name="estTest")
train_summary.set_summary_trigger("LearningRate", SeveralIteration(1))
val_summary = ValidationSummary(log_dir=tmp_dir, app_name="estTest")
estimator = NNEstimator(model, criterion, SeqToTensor([2]), SeqToTensor([2]))\
.setBatchSize(4) \
.setMaxEpoch(5) \
.setTrainSummary(train_summary)
assert (estimator.getValidation() is None)
estimator.setValidation(EveryEpoch(), val_df, [MAE()], 2) \
.setValidationSummary(val_summary)
assert (estimator.getValidation() is not None)
nnModel = estimator.fit(df)
res = nnModel.transform(df)
lr_result = train_summary.read_scalar("LearningRate")
mae_result = val_summary.read_scalar("MAE")
assert isinstance(estimator.getTrainSummary(), TrainSummary)
assert type(res).__name__ == 'DataFrame'
assert len(lr_result) == 5
assert len(mae_result) == 4
def test_NNEstimator_checkpoint(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
df = self.get_estimator_df()
try:
tmp_dir = tempfile.mkdtemp()
estimator = NNEstimator(model, criterion).setMaxEpoch(5)\
.setBatchSize(4)\
.setCheckpoint(tmp_dir, EveryEpoch(), False)
checkpoint_config = estimator.getCheckpoint()
assert checkpoint_config[0] == tmp_dir
assert "EveryEpoch" in str(checkpoint_config)
assert checkpoint_config[2] is False
estimator.fit(df)
assert len(os.listdir(tmp_dir)) > 0
finally:
try:
shutil.rmtree(tmp_dir) # delete directory
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
def test_NNEstimator_multi_input(self):
zx1 = ZLayer.Input(shape=(1, ))
zx2 = ZLayer.Input(shape=(1, ))
zz = ZLayer.merge([zx1, zx2], mode="concat")
zy = ZLayer.Dense(2)(zz)
zmodel = ZModel([zx1, zx2], zy)
criterion = MSECriterion()
df = self.get_estimator_df()
estimator = NNEstimator(zmodel, criterion, [[1], [1]]).setMaxEpoch(5) \
.setBatchSize(4)
nnmodel = estimator.fit(df)
nnmodel.transform(df).collect()
def test_NNEstimator_works_with_VectorAssembler_multi_input(self):
if self.sc.version.startswith("2"):
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.getOrCreate()
df = spark.createDataFrame(
[(1, 35, 109.0, Vectors.dense([2.0, 5.0, 0.5, 0.5]), 1.0),
(2, 58, 2998.0, Vectors.dense([4.0, 10.0, 0.5, 0.5]), 2.0),
(3, 18, 123.0, Vectors.dense([3.0, 15.0, 0.5, 0.5]), 1.0)],
["user", "age", "income", "history", "label"])
assembler = VectorAssembler(
inputCols=["user", "age", "income", "history"],
outputCol="features")
df = assembler.transform(df)
x1 = ZLayer.Input(shape=(1,))
x2 = ZLayer.Input(shape=(2,))
x3 = ZLayer.Input(shape=(2, 2,))
user_embedding = ZLayer.Embedding(5, 10)(x1)
flatten = ZLayer.Flatten()(user_embedding)
dense1 = ZLayer.Dense(2)(x2)
gru = ZLayer.LSTM(4, input_shape=(2, 2))(x3)
merged = ZLayer.merge([flatten, dense1, gru], mode="concat")
zy = ZLayer.Dense(2)(merged)
zmodel = ZModel([x1, x2, x3], zy)
criterion = ClassNLLCriterion()
classifier = NNClassifier(zmodel, criterion, [[1], [2], [2, 2]]) \
.setOptimMethod(Adam()) \
.setLearningRate(0.1) \
.setBatchSize(2) \
.setMaxEpoch(10)
nnClassifierModel = classifier.fit(df)
print(nnClassifierModel.getBatchSize())
res = nnClassifierModel.transform(df).collect()
def test_NNModel_transform_with_nonDefault_featureCol(self):
model = Sequential().add(Linear(2, 2))
nnModel = NNModel(model, SeqToTensor([2]))\
.setFeaturesCol("abcd").setPredictionCol("dcba")
data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))])
schema = StructType([
StructField("abcd", ArrayType(DoubleType(), False), False),
StructField("xyz", ArrayType(DoubleType(), False), False)])
df = self.sqlContext.createDataFrame(data, schema)
res = nnModel.transform(df)
assert type(res).__name__ == 'DataFrame'
assert res.select("abcd", "dcba").count() == 4
def test_nnModel_set_Preprocessing(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion, [2], [2])\
.setBatchSize(4).setLearningRate(0.2).setMaxEpoch(1)
df = self.get_estimator_df()
nnModel = estimator.fit(df)
newTransformer = ChainedPreprocessing([SeqToTensor([2]), TensorToSample()])
nnModel.setSamplePreprocessing(newTransformer)
res = nnModel.transform(df)
assert type(res).__name__ == 'DataFrame'
assert res.count() == 4
def test_NNModel_save_load_BigDL_model(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion).setMaxEpoch(1).setBatchSize(4)
df = self.get_estimator_df()
nnModel = estimator.fit(df)
try:
tmp_dir = tempfile.mkdtemp()
modelPath = os.path.join(tmp_dir, "model")
nnModel.model.save(modelPath)
loaded_model = Model.load(modelPath)
resultDF = NNModel(loaded_model).transform(df)
assert resultDF.count() == 4
finally:
try:
shutil.rmtree(tmp_dir) # delete directory
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
def test_NNModel_save_load(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
estimator = NNEstimator(model, criterion).setMaxEpoch(1).setBatchSize(4)
df = self.get_estimator_df()
nnModel = estimator.fit(df)
try:
tmp_dir = tempfile.mkdtemp()
modelPath = os.path.join(tmp_dir, "model")
nnModel.save(modelPath)
loaded_model = NNModel.load(modelPath)
assert loaded_model.transform(df).count() == 4
finally:
try:
shutil.rmtree(tmp_dir) # delete directory
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
def test_nnclassifier_fit_nnclassifiermodel_transform(self):
model = Sequential().add(Linear(2, 2))
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion, SeqToTensor([2])) \
.setBatchSize(4) \
.setLearningRate(0.2).setMaxEpoch(40)
df = self.get_classifier_df()
nnClassifierModel = classifier.fit(df)
assert(isinstance(nnClassifierModel, NNClassifierModel))
res = nnClassifierModel.transform(df)
assert type(res).__name__ == 'DataFrame'
res.registerTempTable("nnClassifierModelDF")
results = self.sqlContext.table("nnClassifierModelDF")
count = results.rdd.count()
data = results.rdd.collect()
for i in range(count):
row_label = data[i][1]
row_prediction = data[i][2]
assert row_label == row_prediction
def test_nnclassifier_fit_with_Sigmoid(self):
model = Sequential().add(Linear(2, 1)).add(Sigmoid())
criterion = BCECriterion()
classifier = NNClassifier(model, criterion, SeqToTensor([2])) \
.setBatchSize(4) \
.setLearningRate(0.2).setMaxEpoch(40)
data = self.sc.parallelize([
((2.0, 1.0), 0.0),
((1.0, 2.0), 1.0),
((2.0, 1.0), 0.0),
((1.0, 2.0), 1.0)])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", DoubleType(), False)])
df = self.sqlContext.createDataFrame(data, schema)
nnClassifierModel = classifier.fit(df)
assert(isinstance(nnClassifierModel, NNClassifierModel))
res = nnClassifierModel.transform(df)
res.registerTempTable("nnClassifierModelDF")
results = self.sqlContext.table("nnClassifierModelDF")
count = results.rdd.count()
data = results.rdd.collect()
for i in range(count):
row_label = data[i][1]
row_prediction = data[i][2]
assert row_label == row_prediction
def test_nnclassifierModel_set_Preprocessing(self):
model = Sequential().add(Linear(2, 2))
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion, SeqToTensor([2])) \
.setBatchSize(4) \
.setLearningRate(0.2).setMaxEpoch(1)
df = self.get_classifier_df()
nnClassifierModel = classifier.fit(df)
newTransformer = ChainedPreprocessing([SeqToTensor([2]), TensorToSample()])
nnClassifierModel.setSamplePreprocessing(newTransformer)
res = nnClassifierModel.transform(df)
assert type(res).__name__ == 'DataFrame'
assert res.count() == 4
def test_nnclassifier_create_with_size_fit_transform(self):
model = Sequential().add(Linear(2, 2))
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion, [2]) \
.setBatchSize(4) \
.setLearningRate(0.2).setMaxEpoch(40)
df = self.get_classifier_df()
nnClassifierModel = classifier.fit(df)
res = nnClassifierModel.transform(df)
assert type(res).__name__ == 'DataFrame'
def test_nnclassifier_fit_different_optimMethods(self):
model = Sequential().add(Linear(2, 2))
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion, SeqToTensor([2]))\
.setBatchSize(4) \
.setLearningRate(0.2).setMaxEpoch(1)
df = self.get_classifier_df()
for opt in [Adam(), SGD(learningrate=1e-2, learningrate_decay=1e-6,),
LBFGS(), Adagrad(), Adadelta()]:
nnClassifierModel = classifier.setOptimMethod(opt).fit(df)
res = nnClassifierModel.transform(df)
res.collect()
assert type(res).__name__ == 'DataFrame'
def test_nnClassifier_fit_with_train_val_summary(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
data = self.sc.parallelize([
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0),
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0)])
val_data = self.sc.parallelize([
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0)])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", DoubleType(), False)])
df = self.sqlContext.createDataFrame(data, schema)
val_df = self.sqlContext.createDataFrame(val_data, schema)
tmp_dir = tempfile.mkdtemp()
train_summary = TrainSummary(log_dir=tmp_dir, app_name="nnTest")
train_summary.set_summary_trigger("LearningRate", SeveralIteration(1))
val_summary = ValidationSummary(log_dir=tmp_dir, app_name="nnTest")
classfier = NNClassifier(model, criterion, SeqToTensor([2]))\
.setBatchSize(4) \
.setTrainSummary(train_summary).setMaxEpoch(5) \
.setValidation(EveryEpoch(), val_df, [Top1Accuracy()], 2) \
.setValidationSummary(val_summary)
nnModel = classfier.fit(df)
res = nnModel.transform(df)
lr_result = train_summary.read_scalar("LearningRate")
top1_result = val_summary.read_scalar("Top1Accuracy")
assert isinstance(classfier.getTrainSummary(), TrainSummary)
assert type(res).__name__ == 'DataFrame'
assert len(lr_result) == 5
assert len(top1_result) == 4
def test_nnestimator_with_param_maps(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
data = self.sc.parallelize([
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0),
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0)])
val_data = self.sc.parallelize([
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0)])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", DoubleType(), False)])
df = self.sqlContext.createDataFrame(data, schema)
val_df = self.sqlContext.createDataFrame(val_data, schema)
classfier = NNEstimator(model, criterion, SeqToTensor([2]))\
.setBatchSize(4).setMaxEpoch(5) \
.setValidation(EveryEpoch(), val_df, [Top1Accuracy()], 2)
param = ParamGridBuilder().addGrid(classfier.learningRate, [1e-3, 1.0]).build()
print(param)
models = classfier.fit(df, params=param)
# print(models.model.get_weights())
assert len(models) == 2
w1 = models[0].model.get_weights()
w2 = models[1].model.get_weights()
for ww1, ww2 in zip(w1, w2):
diff = np.sum((ww1 - ww2) ** 2)
assert diff > 1e-2
def test_nnclassifier_with_param_maps(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
data = self.sc.parallelize([
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0),
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0)])
val_data = self.sc.parallelize([
((2.0, 1.0), 1.0),
((1.0, 2.0), 2.0)])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", DoubleType(), False)])
df = self.sqlContext.createDataFrame(data, schema)
val_df = self.sqlContext.createDataFrame(val_data, schema)
print(model.get_weights())
classfier = NNClassifier(model, criterion, SeqToTensor([2]))\
.setBatchSize(4).setMaxEpoch(5) \
.setValidation(EveryEpoch(), val_df, [Top1Accuracy()], 2)
param = ParamGridBuilder().addGrid(classfier.learningRate, [1e-3, 1.0]).build()
models = classfier.fit(df, params=param)
assert len(models) == 2
w1 = models[0].model.get_weights()
w2 = models[1].model.get_weights()
for ww1, ww2 in zip(w1, w2):
diff = np.sum((ww1 - ww2) ** 2)
assert diff > 1e-2
def test_nnclassifier_in_pipeline(self):
if self.sc.version.startswith("1"):
from pyspark.mllib.linalg import Vectors
df = self.sqlContext.createDataFrame(
[(Vectors.dense([2.0, 1.0]), 1.0),
(Vectors.dense([1.0, 2.0]), 2.0),
(Vectors.dense([2.0, 1.0]), 1.0),
(Vectors.dense([1.0, 2.0]), 2.0),
], ["features", "label"])
scaler = MinMaxScaler().setInputCol("features").setOutputCol("scaled")
model = Sequential().add(Linear(2, 2))
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion)\
.setBatchSize(4) \
.setLearningRate(0.01).setMaxEpoch(1).setFeaturesCol("scaled")
pipeline = Pipeline(stages=[scaler, classifier])
pipelineModel = pipeline.fit(df)
res = pipelineModel.transform(df)
assert type(res).__name__ == 'DataFrame'
# TODO: Add test for ML Vector once infra is ready.
def test_NNClassifierModel_save_load_BigDL_model(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
classifier = NNClassifier(model, criterion).setMaxEpoch(1).setBatchSize(4)
df = self.get_classifier_df()
nnClassifierModel = classifier.fit(df)
try:
tmp_dir = tempfile.mkdtemp()
modelPath = os.path.join(tmp_dir, "model")
nnClassifierModel.model.save(modelPath)
loaded_model = Model.load(modelPath)
resultDF = NNClassifierModel(loaded_model).transform(df)
assert resultDF.count() == 4
finally:
try:
shutil.rmtree(tmp_dir) # delete directory
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
def test_NNClassifierModel_save_load(self):
model = Sequential().add(Linear(2, 2))
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion, [2]).setMaxEpoch(1).setBatchSize(4)
df = self.get_classifier_df()
nnClassifierModel = classifier.fit(df)
try:
tmp_dir = tempfile.mkdtemp()
modelPath = os.path.join(tmp_dir, "model")
nnClassifierModel.save(modelPath)
loaded_model = NNClassifierModel.load(modelPath)
assert (isinstance(loaded_model, NNClassifierModel))
assert loaded_model.transform(df).count() == 4
finally:
try:
shutil.rmtree(tmp_dir) # delete directory
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
def test_NNModel_NNClassifier_pipeline_save_load(self):
if self.sc.version.startswith("2.3") or self.sc.version.startswith("2.4"):
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml.linalg import Vectors
df = self.sqlContext.createDataFrame(
[(Vectors.dense([2.0, 1.0]), 1.0),
(Vectors.dense([1.0, 2.0]), 2.0),
(Vectors.dense([2.0, 1.0]), 1.0),
(Vectors.dense([1.0, 2.0]), 2.0),
], ["features", "label"])
scaler = MinMaxScaler().setInputCol("features").setOutputCol("scaled")
model = Sequential().add(Linear(2, 2))
criterion = ClassNLLCriterion()
classifier = NNClassifier(model, criterion)\
.setBatchSize(4) \
.setLearningRate(0.01).setMaxEpoch(1).setFeaturesCol("scaled")
pipeline = Pipeline(stages=[scaler, classifier])
pipeline_model = pipeline.fit(df)
try:
tmp_dir = tempfile.mkdtemp()
modelPath = os.path.join(tmp_dir, "model")
pipeline_model.save(modelPath)
loaded_model = PipelineModel.load(modelPath)
df2 = self.sqlContext.createDataFrame(
[(Vectors.dense([2.0, 1.0]), 1.0),
(Vectors.dense([1.0, 2.0]), 2.0),
(Vectors.dense([2.0, 1.0]), 1.0),
(Vectors.dense([1.0, 2.0]), 2.0),
], ["features", "label"])
assert loaded_model.transform(df2).count() == 4
finally:
try:
shutil.rmtree(tmp_dir) # delete directory
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
def test_input_node_of_tfnet_from_session(self):
import tensorflow as tff
input1 = tff.placeholder(dtype=tff.float32, shape=(None, 2))
input2 = tff.placeholder(dtype=tff.float32, shape=(None, 2))
hidden = tff.layers.dense(input1, 4)
output = tff.layers.dense(hidden, 1)
sess = tff.Session()
sess.run(tff.global_variables_initializer())
tmp_dir = tempfile.mkdtemp()
modelPath = os.path.join(tmp_dir, "model")
raised_error = False
try:
export_tf(sess, modelPath, inputs=[input1, input2], outputs=[output])
except ValueError as v:
assert (((str(v)).find((input2.name)[0:-2])) != -1)
raised_error = True
finally:
try:
shutil.rmtree(modelPath) # delete directory
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
if not raised_error:
raise ValueError("we do not find this error, test failed")
def test_XGBClassifierModel_predict(self):
from sys import platform
if platform in ("darwin", "win32"):
return
resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
path = os.path.join(resource_path, "xgbclassifier/")
modelPath = path + "XGBClassifer.bin"
filePath = path + "test.csv"
model = XGBClassifierModel.loadModel(modelPath, 2)
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.getOrCreate()
df = spark.read.csv(filePath, sep=",", inferSchema=True, header=True)
model.setFeaturesCol(["age", "gender", "jointime", "star"])
predict = model.transform(df)
predict.count()
if __name__ == "__main__":
pytest.main()
| 40.628042
| 96
| 0.587331
|
794ac455a8adc30cfc215aae1c51c385a0ba1c07
| 8,621
|
py
|
Python
|
jobs/ztb_data_job.py
|
masling/stock
|
f7a0add2b7533ff43d1ed1a238ec14f55b39e488
|
[
"Apache-2.0"
] | null | null | null |
jobs/ztb_data_job.py
|
masling/stock
|
f7a0add2b7533ff43d1ed1a238ec14f55b39e488
|
[
"Apache-2.0"
] | null | null | null |
jobs/ztb_data_job.py
|
masling/stock
|
f7a0add2b7533ff43d1ed1a238ec14f55b39e488
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Masling'
# 每天的涨跌停 数据来源金融界及东方财富网
import xlwt
import pandas as pd
import tushare as ts
import datetime
import libs.common as common
import libs.stock_ztb as ztb
ts.set_token('0d60b78ce095601b582c78f71b954e0e87a6e352b738a6a225d71649')
# c代码 ,m 1沪 0深 ,n 名称,p 最新价格(里),zdp 涨跌幅, amount 成交额,ltsz 流通市值,tshare 总市值,hs 换手率,lbc 连板数,fbt 首次封板时间,
# lbt 最后封板时间,fund 封板资金,zbc 炸版次数,hybk 所属行业 ,zttj 涨停统计 ,'zf 振幅,zs 涨速' 'ztf ,ztp 涨停价,nh 是否新高,
# lb 量比,cc 入选理由,zz 涨速'
east_data_index = {"c": "代码", "n": "名称", "p": "最新价", "zdp": "涨跌幅", "amount": "成交额", "ltsz": "流通市值",
"tshare": "总市值", "hs": "换手率", "lbc": "连板数", "fbt": "首次封板时间", "lbt": "最后封板时间",
"fund": "封单金额", "zbc": "打开次数", "hybk": "所属行业", "zttj": "涨停统计", "zf": "振幅", "zs": "涨速",
"ztp": "涨停价", "nh": "是否新高"}
jrj_data_index = [u'代码', u'名称', u'最新价', u'涨跌幅', u'封成比', u'封流比', u'封单金额', u'最后封板时间', u'首次封板时间',
u'打开次数', u'振幅', u'涨停强度']
def save_excel(date, data_today, data_sum):
# data is list type
w = xlwt.Workbook(encoding='gbk')
lb_st = w.add_sheet('连板股票')
date_group = data_sum.groupby(['日期']).groups
lb_group = data_sum.groupby(['连板数']).groups
point_y = 0
point_x = 1
style_title = xlwt.XFStyle()
style_title.font.bold = True
style_title.font.height = 20 * 14
lb_list = list(lb_group.keys())[::-1]
lb_dic = {}
for lbs in lb_list:
lb_st.write(point_x, point_y, lbs, style_title)
lb_dic[point_x] = 1
point_x += 1
point_y = 1
point_x = 0
style = xlwt.XFStyle()
style.alignment.wrap = 1
lb_st.col(0).width = 20 * 100
for df_date in date_group.keys():
lb_st.write(point_x, point_y, df_date.strftime('%Y-%m-%d'), style_title)
lb_st.col(point_y).width = 256 * 20
point_i_x = 1
for lbs in lb_list:
df = data_sum[(data_sum['日期'].astype(str) == df_date.strftime('%Y-%m-%d')) & (data_sum['连板数'] == lbs)]
list_stock = df['名称'].values.tolist()
lb_st.write(point_i_x, point_y, "\r\n".join(list_stock), style)
lb_dic[point_i_x] = max(lb_dic[point_i_x], len(list_stock))
point_i_x += 1
point_y += 1
for key, value in lb_dic.items():
lb_st.row(key).height_mismatch = True
lb_st.row(key).height = 20 * value * 15 # 20为基准数
ztb_st = w.add_sheet(date + '涨停板')
excel_filename = date + ".xls"
point_x = 0
point_y = 0
for columns in data_today.columns:
ztb_st.write(point_x, point_y, columns, style_title)
point_y += 1
point_x += 1
ztb_st.col(0).width = 150 * 20
ztb_st.col(7).width = 200 * 20
ztb_st.col(8).width = 200 * 20
ztb_st.col(9).width = 200 * 20
ztb_st.col(10).width = 200 * 20
ztb_st.col(13).width = 200 * 20
ztb_st.col(14).width = 200 * 20
style_title.font.bold = False
data_today['涨停统计'] = data_today['涨停统计'].map(lambda x: eval(x))
for row in data_today.values:
point_y = 0
for col in row:
print(point_x, point_y, col)
if isinstance(col, datetime.date):
ztb_st.write(point_x, point_y, col.strftime('%Y-%m-%d'))
else:
ztb_st.write(point_x, point_y, col)
point_y += 1
point_x += 1
w.save(excel_filename)
def insert_or_update(data, primary_keys, tables='stock_quotations'):
values = ['?'] * len(data.columns)
update_sql = ""
for column in data.columns:
if column not in primary_keys:
update_sql += "`%s`=?," % column
if len(update_sql) > 0:
update_sql = update_sql.rstrip(",")
insert_sql = "insert into %s (%s) values (%s) on duplicate key UPDATE %s" % (tables,
",".join(data.columns),
",".join(values),
update_sql)
insert_sql = insert_sql.replace("?", "%s")
data_copy = data.drop(primary_keys, axis=1)
with common.conn() as db:
for index, row in data.iterrows():
str_data = [str(x) for x in row.values.tolist()]
str_data2 = [str(x) for x in data_copy.loc[index,].values.tolist()]
params = tuple(str_data + str_data2)
try:
db.execute(insert_sql, params)
except Exception as e:
print("error :", e)
def stat_all(date_time): # tmp_datetime
db = common.engine()
# today='2018-04-16'
# 填补以前的数据
# x=pd.date_range('20170101','20180312')
# date_list = [ for i in list(pd.date_range('20170401','20171231'))
date = date_time.strftime('%Y%m%d')
if common.is_holiday(date_time):
common.logger.info('Holiday')
return
common.logger.info("start")
obj = ztb.GetZDT()
obj.today = date
data_jrj = obj.get_jrj_data() # 读取金融届网站的涨停数据
data_jrj = pd.DataFrame(data_jrj, columns=jrj_data_index)
data_jrj['日期'] = date
# 去掉ST 新股 退市股 科创板
data_jrj = data_jrj[
~(data_jrj['名称'].str.contains('ST') |
data_jrj['名称'].str.startswith("N") |
data_jrj['名称'].str.endswith("退") |
data_jrj['代码'].str.startswith("68")
)]
data_jrj['封成比'] = data_jrj['封成比'].map(lambda x: round(x * 100, 3))
data_jrj['封流比'] = data_jrj['封流比'].map(lambda x: round(x * 100, 3))
insert_or_update(data_jrj, ['日期', '代码'])
print(data_jrj)
data_east = obj.get_east_data() # 读取金融届网站的涨停数据
df_ztb = pd.DataFrame.from_dict(data_east["ztb"], orient='columns')
df_ztb = df_ztb.drop(['m'], axis=1) # 根据列名进行删除(去掉不需要的列)
df_ztb = df_ztb.rename(columns=east_data_index) # 列名重命名
df_ztb['日期'] = date
# 去掉ST 新股 退市股 科创板
df_ztb = df_ztb[
~(df_ztb['名称'].str.contains('ST') |
df_ztb['名称'].str.startswith("N") |
df_ztb['名称'].str.endswith("退") |
df_ztb['代码'].str.startswith("68")
)]
df_ztb['涨跌幅'] = df_ztb['涨跌幅'].map(lambda x: round(x, 3))
df_ztb['换手率'] = df_ztb['换手率'].map(lambda x: round(x, 3))
df_ztb['流通市值'] = df_ztb['流通市值'].map(lambda x: round(x, 3))
df_ztb['总市值'] = df_ztb['总市值'].map(lambda x: round(x, 3))
insert_or_update(df_ztb, ['日期', '代码'])
print(df_ztb)
sql_update_stock = "update stockInfo s INNER JOIN (select `代码`, `名称`,`所属行业`,`成交额`,`流通市值`,`连板数`,`涨停统计` " \
"from stock_quotations where `代码` in (select `代码` from stockInfo) and " \
"`日期` = (select max(`日期`) from stock_quotations)) q on s.`代码`=q.`代码` set " \
"s.`名称`=q.`名称`,s.`所属行业`=q.`所属行业`,s.`成交额`=q.`成交额`,s.`流通市值`=q.`流通市值`," \
"s.`连板数`=q.`连板数`,s.`涨停统计`=q.`涨停统计`"
common.insert(sql_update_stock)
sql_insert_stock = "insert into stockInfo (`代码`, `名称`,`所属行业`,`成交额`,`流通市值`,`连板数`,`涨停统计`) " \
"select `代码`, `名称`,`所属行业`,`成交额`,`流通市值`,`连板数`,`涨停统计` from stock_quotations " \
"where `代码` not in (select `代码` from stockInfo) and " \
"`日期` = (select max(`日期`) from stock_quotations) "
common.insert(sql_insert_stock)
sum_df = pd.read_sql("SELECT `日期`,CONCAT(名称,'[',代码,']') as 名称,连板数 from stock_quotations where `连板数`>1", db)
today = pd.read_sql("select `日期`, `代码`, `名称`, `最新价`, `涨跌幅`, `封成比`, `封流比`, `封单金额`, `成交额`, `流通市值`,"
" `总市值`, `换手率`, `连板数`, `首次封板时间`, `最后封板时间`, `打开次数`, `涨停统计`, `涨停强度`, `振幅`, "
" `所属行业` from stock_quotations where 日期='%s' order by `涨停强度` desc" % date, db)
today['涨停统计'].astype(str)
today['涨停统计'] = today['涨停统计'].map(lambda x: str("{ct}/{days}".format(**eval(x))))
save_excel(date, today, sum_df)
# pro = ts.pro_api()
# pd.read_sql("select * from stockInfo where ")
# common.select()
# final_data['concept'] = ''
# for code in final_data['ts_code'].values:
# concept = pro.concept_detail(ts_code=code)
# # print(code)
# # print(concept['concept_name'].values)
# concept_str = '/'.join(cept for cept in concept['concept_name'].values)
# # print(concept_str)
# if len(concept_str) == 0:
# concept_str = '暂无概念数据'
# final_data.loc[final_data['ts_code'] == code, 'concept'] += concept_str
# main函数入口
if __name__ == '__main__':
# stat_all('20210409')
# 使用方法传递。
tmp_datetime = common.run_with_args(stat_all)
| 41.647343
| 114
| 0.554808
|
794ac5320b0439c006ae983c177023eae28cf09d
| 1,001
|
py
|
Python
|
uamobile/data/cidr/ezweb.py
|
SandySalvatore/uamobile
|
51c637effc65b863c8f1897d971a13bb099bdb84
|
[
"MIT"
] | null | null | null |
uamobile/data/cidr/ezweb.py
|
SandySalvatore/uamobile
|
51c637effc65b863c8f1897d971a13bb099bdb84
|
[
"MIT"
] | null | null | null |
uamobile/data/cidr/ezweb.py
|
SandySalvatore/uamobile
|
51c637effc65b863c8f1897d971a13bb099bdb84
|
[
"MIT"
] | null | null | null |
DATA = [ '111.107.116.64/26',
'106.162.214.160/29',
'111.107.116.192/28',
'210.230.128.224/28',
'219.108.158.0/27',
'219.125.146.0/28',
'61.117.2.32/29',
'61.117.2.40/29',
'219.108.158.40/29',
'111.86.142.0/26',
'111.86.141.64/26',
'111.86.141.128/26',
'111.86.141.192/26',
'27.90.136.0/27',
'27.90.136.32/27',
'27.90.136.64/27',
'27.90.136.96/27',
'27.90.136.128/27',
'27.90.136.160/27',
'27.90.136.192/27',
'27.90.137.192/27',
'27.90.137.224/27',
'27.90.136.224/27',
'27.90.137.0/27',
'27.90.137.32/27',
'27.90.137.64/27',
'27.90.137.96/27',
'27.90.137.128/27',
'27.90.137.160/27',
'111.86.143.192/27',
'111.86.143.224/27',
'111.86.147.0/27',
'111.86.142.128/27',
'111.86.142.160/27',
'111.86.142.192/27',
'111.86.142.224/27',
'111.86.143.0/27',
'111.86.143.32/27',
'111.86.147.32/27',
'111.86.147.64/27',
'111.86.147.96/27',
'111.86.147.128/27',
'111.86.147.160/27',
'111.86.147.192/27',
'111.86.147.224/27']
| 21.297872
| 29
| 0.546454
|
794ac57238462f0fc1ff2db1a817f51ccc2dab7b
| 64,766
|
py
|
Python
|
Lib/Plugin.py
|
iCH3F/ToonTime
|
7efeb94f748706df72b766b2ce5dade3dc171fed
|
[
"MIT"
] | 2
|
2020-05-31T23:29:54.000Z
|
2022-01-11T18:11:07.000Z
|
Lib/Plugin.py
|
iCH3F/ToonTime
|
7efeb94f748706df72b766b2ce5dade3dc171fed
|
[
"MIT"
] | null | null | null |
Lib/Plugin.py
|
iCH3F/ToonTime
|
7efeb94f748706df72b766b2ce5dade3dc171fed
|
[
"MIT"
] | 1
|
2020-05-31T23:19:16.000Z
|
2020-05-31T23:19:16.000Z
|
# -*- coding: utf-8 -*-
import re
import sys
import requests
from itertools import chain
from base64 import b64decode
from time import time, sleep
from urlparse import parse_qsl
from string import ascii_uppercase
from urllib import quote_plus, urlencode
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
from Lib.Common import *
from Lib.SimpleTrakt import SimpleTrakt
# Disable urllib3's "InsecureRequestWarning: Unverified HTTPS request is being made" warnings
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
PLUGIN_ID = int(sys.argv[1])
PLUGIN_URL = sys.argv[0]
BASEURL = 'https://www.thewatchcartoononline.tv'
# Due to a recent bug on the server end, the mobile URL is now only used on 'makeLatestCatalog()'.
BASEURL_MOBILE = 'https://m.wcostream.com' # Mobile version of one of their domains (seems to be the only one).
PROPERTY_CATALOG_PATH = 'wnt2.catalogPath'
PROPERTY_CATALOG = 'wnt2.catalog'
PROPERTY_EPISODE_LIST_URL = 'wnt2.listURL'
PROPERTY_EPISODE_LIST_DATA = 'wnt2.listData'
PROPERTY_LATEST_MOVIES = 'wnt2.latestMovies'
PROPERTY_INFO_ITEMS = 'wnt2.infoItems'
PROPERTY_SESSION_COOKIE = 'wnt2.cookie'
ADDON = xbmcaddon.Addon()
# Show catalog: whether to show the catalog categories or to go straight to the "ALL" section with all items visible.
ADDON_SHOW_CATALOG = ADDON.getSetting('showCatalog') == 'true'
# Use Latest Releases date: whether to sort the Latest Releases items by their date, or with a catalog.
ADDON_LATEST_DATE = ADDON.getSetting('useLatestDate') == 'true'
# Use Latest Releases thumbs: whether to show a little thumbnail available for the Latest Releases items only.
ADDON_LATEST_THUMBS = ADDON.getSetting('showLatestThumbs') == 'true'
# Use poster images for each catalog folder. Makes for a better experience on custom Kodi skins.
ADDON_CATALOG_THUMBS = ADDON.getSetting('showCatalogThumbs') == 'true'
ADDON_ICON = ADDON.getAddonInfo('icon')
ADDON_ICON_DICT = {'icon': ADDON_ICON, 'thumb': ADDON_ICON, 'poster': ADDON_ICON}
ADDON_TRAKT_ICON = 'special://home/addons/plugin.video.watchnixtoons2/resources/traktIcon.png'
# To let the source website know it's this plugin. Also used inside "makeLatestCatalog()" and "actionResolve()".
WNT2_USER_AGENT = 'Mozilla/5.0 (compatible; WatchNixtoons2/0.4.1; ' \
'+https://github.com/doko-desuka/plugin.video.watchnixtoons2)'
MEDIA_HEADERS = None # Initialized in 'actionResolve()'.
# Url paths: paths to parts of the website, to be added to the BASEURL url.
# Also used to tell what kind of catalog is loaded in memory.
# In case they change in the future it'll be easier to modify in here.
URL_PATHS = {
'latest': 'latest', # No path used, 'makeLatestCatalog()' uses the homepage of the mobile website.
'popular': 'popular', # No path used, 'makePopularCatalog()' uses the hompage of the desktop website.
'dubbed': '/dubbed-anime-list',
'cartoons': '/cartoon-list',
'subbed': '/subbed-anime-list',
'movies': '/movie-list',
'latestmovies': '/anime/movies',
'ova': '/ova-list',
'search': '/search',
'genre': '/search-by-genre'
}
def actionMenu(params):
def _menuItem(title, data, color):
item = xbmcgui.ListItem('[B][COLOR ' + color + ']' + title + '[/COLOR][/B]', label2 = title)
item.setArt(ADDON_ICON_DICT)
item.setInfo('video', {'title': title, 'plot': title})
return (buildURL(data), item, True)
xbmcplugin.addDirectoryItems(
PLUGIN_ID,
(
_menuItem('Latest Releases', {'action': 'actionCatalogMenu', 'path': URL_PATHS['latest']}, 'mediumaquamarine'),
_menuItem( # Make the Latest Movies menu go straight to the item list, no catalog.
'Latest Movies', {'action': 'actionLatestMoviesMenu', 'path': URL_PATHS['latestmovies']}, 'mediumaquamarine'
),
_menuItem('Popular & Ongoing Series', {'action': 'actionCatalogMenu', 'path': URL_PATHS['popular']}, 'mediumaquamarine'),
_menuItem('Dubbed Anime', {'action': 'actionCatalogMenu', 'path': URL_PATHS['dubbed']}, 'lightgreen'),
_menuItem('Cartoons', {'action': 'actionCatalogMenu', 'path': URL_PATHS['cartoons']}, 'lightgreen'),
_menuItem('Subbed Anime', {'action': 'actionCatalogMenu', 'path': URL_PATHS['subbed']}, 'lightgreen'),
_menuItem('Movies', {'action': 'actionCatalogMenu', 'path': URL_PATHS['movies']}, 'lightgreen'),
_menuItem('OVA Series', {'action': 'actionCatalogMenu', 'path': URL_PATHS['ova']}, 'lightgreen'),
_menuItem('Search', {'action': 'actionSearchMenu', 'path': 'search'}, 'lavender'), # Non-web path.
_menuItem('Settings', {'action': 'actionShowSettings','path': 'settings'}, 'lavender') # Non-web path.
)
)
xbmcplugin.endOfDirectory(PLUGIN_ID)
def actionCatalogMenu(params):
xbmcplugin.setContent(PLUGIN_ID, 'tvshows')
catalog = getCatalogProperty(params)
if ADDON_SHOW_CATALOG:
def _catalogMenuItemsMake():
items = [ ]
if ADDON_CATALOG_THUMBS:
# The catalog folders will each get a letter image, taken from the web (this way
# these images don't have to be distributed w/ the add-on, if they're not needed).
# After they're downloaded, the images exist in Kodi's image cache folders.
THUMBS_BASEURL = 'https://doko-desuka.github.io/128h/'
artDict = {'thumb': None}
miscItem = None
for sectionName in sorted(catalog.iterkeys()):
if catalog[sectionName]:
item = xbmcgui.ListItem(sectionName)
# Correct the address for the '#' (miscellaneous, non-letter) category.
artDict['thumb'] = THUMBS_BASEURL + ('0' if sectionName == '#' else sectionName) + '.png'
item.setArt(artDict)
item.setInfo('video', {'plot': sectionName})
items.append(
(
buildURL({'action': 'actionCatalogSection', 'path': params['path'], 'section': sectionName}),
item,
True
)
)
else:
items = [
(
buildURL({'action': 'actionCatalogSection', 'path': params['path'], 'section': sectionName}),
xbmcgui.ListItem(sectionName),
True
)
for sectionName in sorted(catalog.iterkeys()) if len(catalog[sectionName])
]
# See if an "All" folder is necessary (when there's more than one folder in the catalog).
if len(items) > 1:
sectionAll = (
buildURL({'action': 'actionCatalogSection', 'path': params['path'], 'section': 'ALL'}),
xbmcgui.ListItem('All'),
True
)
if ADDON_CATALOG_THUMBS:
artDict['thumb'] = THUMBS_BASEURL + 'ALL.png'
sectionAll[1].setArt(artDict)
sectionAll[1].setInfo('video', {'plot': 'All'})
return [sectionAll] + items
else:
return items
items = _catalogMenuItemsMake()
if items:
if len(items) > 1:
xbmcplugin.addDirectoryItems(PLUGIN_ID, items)
else:
# Conveniency when a search leads to only 1 result, show it already without the catalog screen.
params['section'] = 'ALL'
actionCatalogSection(params)
return
else:
xbmcplugin.addDirectoryItem(PLUGIN_ID, '', xbmcgui.ListItem('(No Results)'), isFolder=False)
xbmcplugin.endOfDirectory(PLUGIN_ID)
setViewMode()
else:
params['section'] = 'ALL'
actionCatalogSection(params)
def actionCatalogSection(params):
catalog = getCatalogProperty(params)
path = params['path']
# Set up a boolean indicating if the catalog items are already playable, instead of being folders
# with more items inside.
# This is true for the OVA, movies, latest-episodes, movie-search and episode-search catalogs.
# Items in these catalogs link to the video player pages already.
isSpecial = (
path in {URL_PATHS['ova'], URL_PATHS['movies'], URL_PATHS['latest']}
or params.get('searchType', 'series') != 'series' # not series = movies or episodes search
)
if isSpecial:
action = 'actionResolve'
isFolder = False
else:
action = 'actionEpisodesMenu'
isFolder = True
thumb = params.get('thumb', ADDON_ICON)
if path != URL_PATHS['latest'] or not ADDON_LATEST_THUMBS:
artDict = {'icon': thumb, 'thumb': thumb, 'poster': thumb} if thumb else None
else:
artDict = {'icon': thumb, 'thumb': 'DefaultVideo.png', 'poster': 'DefaultVideo.png'} if thumb else None
# Persistent property with item metadata, used with the "Show Information" context menu.
infoItems = getWindowProperty(PROPERTY_INFO_ITEMS) or { }
if 'query' not in params and ADDON.getSetting('cleanupEpisodes') == 'true':
listItemFunc = makeListItemClean
else:
listItemFunc = makeListItem
if params['section'] == 'ALL':
sectionItems = chain.from_iterable(catalog[sectionName] for sectionName in sorted(catalog))
else:
sectionItems = catalog[params['section']]
def _sectionItemsGen():
if ADDON_LATEST_THUMBS and path == URL_PATHS['latest']:
# Special-case for the 'Latest Releases' catalog, which has some thumbnails available.
# Each 'entry' is (URL, htmlTitle, thumb).
NO_THUMB = '-120-72.jpg' # As seen on 2019-04-15.
for entry in sectionItems:
entryURL = entry[0]
entryArt = (
artDict if entry[2].startswith(NO_THUMB) else {'icon':ADDON_ICON,'thumb':entry[2],'poster':entry[2]}
)
# If there's metadata for this entry (requested by the user with "Show Information"), use it.
if entryURL in infoItems:
itemPlot, itemThumb = infoItems[entryURL]
yield (
buildURL({'action': action, 'url': entryURL}),
listItemFunc(entry[1], entryURL, entryArt, itemPlot, isFolder, isSpecial, None),
isFolder
)
else:
yield (
buildURL({'action': action, 'url': entryURL}),
listItemFunc(entry[1], entryURL, entryArt, '', isFolder, isSpecial, params),
isFolder
)
else:
# Normal item listing, each 'entry' is (URL, htmlTitle).
for entry in sectionItems:
entryURL = entry[0]
if entryURL in infoItems:
itemPlot, itemThumb = infoItems[entryURL]
entryArt = {'icon': ADDON_ICON, 'thumb': itemThumb, 'poster': itemThumb}
yield (
buildURL({'action': action, 'url': entryURL}),
listItemFunc(entry[1], entryURL, entryArt, itemPlot, isFolder, isSpecial, None),
isFolder
)
else:
yield (
buildURL({'action': action, 'url': entryURL}),
listItemFunc(entry[1], entryURL, artDict, '', isFolder, isSpecial, params),
isFolder
)
xbmcplugin.addDirectoryItems(PLUGIN_ID, tuple(_sectionItemsGen()))
xbmcplugin.endOfDirectory(PLUGIN_ID)
setViewMode() # Set the skin layout mode, if the option is enabled.
def actionEpisodesMenu(params):
xbmcplugin.setContent(PLUGIN_ID, 'episodes')
# Memory-cache the last episode list, to help when the user goes back and forth while watching
# multiple episodes of the same show. This way only one web request is needed for the same show.
lastListURL = getRawWindowProperty(PROPERTY_EPISODE_LIST_URL)
if lastListURL and lastListURL == params['url']:
listData = getWindowProperty(PROPERTY_EPISODE_LIST_DATA)
else:
# New domain safety replace, in case the user is coming in from an old Kodi favorite item.
url = params['url'].replace('watchcartoononline.io', 'thewatchcartoononline.tv', 1)
r = requestHelper(url if url.startswith('http') else BASEURL + url)
html = r.text
plot, thumb = getPageMetadata(html)
dataStartIndex = html.find('"sidebar_right3"')
if dataStartIndex == -1:
raise Exception('Episode list scrape fail: ' + url)
# Episode list data: a tuple with the thumb, plot and an inner tuple of per-episode data.
listData = (
thumb,
plot,
tuple(
match.groups()
for match in re.finditer(
'''<a href="([^"]+).*?>([^<]+)''', html[dataStartIndex : html.find('"sidebar-all"')]
)
)
)
setRawWindowProperty(PROPERTY_EPISODE_LIST_URL, params['url'])
setWindowProperty(PROPERTY_EPISODE_LIST_DATA, listData)
def _episodeItemsGen():
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
showURL = params['url']
thumb = listData[0]
artDict = {'icon': thumb, 'thumb': thumb, 'poster': thumb} if thumb else None
plot = listData[1]
listItemFunc = makeListItemClean if ADDON.getSetting('cleanupEpisodes') == 'true' else makeListItem
itemParams = {'action': 'actionResolve', 'url': None}
listIter = iter(listData[2]) if ADDON.getSetting('reverseEpisodes') == 'true' else reversed(listData[2])
for URL, title in listIter:
item = listItemFunc(title, URL, artDict, plot, isFolder=False, isSpecial=False, oldParams=None)
itemParams['url'] = URL
itemURL = buildURL(itemParams)
playlist.add(itemURL, item)
yield (itemURL, item, False)
xbmcplugin.addDirectoryItems(PLUGIN_ID, tuple(_episodeItemsGen()))
xbmcplugin.endOfDirectory(PLUGIN_ID)
def actionLatestMoviesMenu(params):
# Returns a list of links from a hidden "/anime/movies" area.
# Since this page is very large (130 KB), we memory cache it after it's been requested.
html = getRawWindowProperty(PROPERTY_LATEST_MOVIES)
if not html:
r = requestHelper(BASEURL + params['path'])
html = r.text
setRawWindowProperty(PROPERTY_LATEST_MOVIES, html)
# Similar scraping logic to 'actionEpisodesMenu()'.
dataStartIndex = html.find('"sidebar_right3"')
if dataStartIndex == -1:
raise Exception('Latest movies scrape fail: ' + url)
# Persistent property with item metadata.
infoItems = getWindowProperty(PROPERTY_INFO_ITEMS) or { }
def _movieItemsGen():
artDict = {'icon': ADDON_ICON, 'thumb': ADDON_ICON, 'poster': ADDON_ICON}
reIter = re.finditer(
'''<a href="([^"]+).*?>([^<]+)''', html[dataStartIndex : html.find('"sidebar-all"')]
)
# The page has like 6000 items going back to 2010, so we limit to only the latest 200.
for x in range(200):
entryURL, entryTitle = next(reIter).groups()
if entryURL in infoItems:
entryPlot, entryThumb = infoItems[entryURL]
yield (
buildURL({'action': 'actionResolve', 'url': entryURL}),
makeListItem(
unescapeHTMLText(entryTitle),
entryURL,
{'icon': ADDON_ICON, 'thumb': entryThumb, 'poster': entryThumb},
entryPlot,
isFolder = False,
isSpecial = True,
oldParams = None
),
False
)
else:
yield (
buildURL({'action': 'actionResolve', 'url': entryURL}),
makeListItem(
unescapeHTMLText(entryTitle),
entryURL,
artDict,
'',
isFolder = False,
isSpecial = True,
oldParams = params
),
False
)
xbmcplugin.addDirectoryItems(PLUGIN_ID, tuple(_movieItemsGen()))
xbmcplugin.endOfDirectory(PLUGIN_ID)
setViewMode()
# A sub menu, lists search options.
def actionSearchMenu(params):
def _modalKeyboard(heading):
kb = xbmc.Keyboard('', heading)
kb.doModal()
return kb.getText() if kb.isConfirmed() else ''
if 'searchType' in params:
# Support for the 'actionShowInfo()' function reloading this route, sending it an already searched query.
# This also supports external query calls, like from OpenMeta.
if 'query' in params:
query = params['query']
else:
query = _modalKeyboard(params.get('searchTitle', 'Search'))
if query:
historyTypeIDs = {'series':'0', 'movies':'1', 'episodes':'2'}
previousHistory = ADDON.getSetting('searchHistory')
if previousHistory:
# Limit search history to 40 items.
if previousHistory.count('\n') == 40:
previousHistory = previousHistory[:previousHistory.rfind('\n')] # Forget the oldest search result.
ADDON.setSetting('searchHistory', historyTypeIDs[params['searchType']] + query + '\n' + previousHistory)
else:
ADDON.setSetting('searchHistory', historyTypeIDs[params['searchType']] + query)
params['query'] = query
params['section'] = 'ALL' # Force an uncategorized display (results are usually few).
actionCatalogSection(params) # Send the search type and query for the catalog functions to use.
return
xbmcplugin.addDirectoryItems(
PLUGIN_ID,
(
(
buildURL({
'action': 'actionSearchMenu',
'path': URL_PATHS['search'], # A special, non-web path used by 'getCatalogProperty()'.
'searchType': 'series',
'searchTitle': 'Search Cartoon/Anime Name'
}),
xbmcgui.ListItem('[COLOR lavender][B]Search Cartoon/Anime Name[/B][/COLOR]'),
True
),
(
buildURL({
'action': 'actionSearchMenu',
'path': URL_PATHS['search'],
'searchType': 'movies',
'searchTitle': 'Search Movie Name'
}),
xbmcgui.ListItem('[COLOR lavender][B]Search Movie Name[/B][/COLOR]'),
True
),
(
buildURL({
'action': 'actionSearchMenu',
'path': URL_PATHS['search'],
'searchType': 'episodes',
'searchTitle': 'Search Episode Name'
}),
xbmcgui.ListItem('[COLOR lavender][B]Search Episode Name[/B][/COLOR]'),
True
),
(
buildURL({'action': 'actionGenresMenu', 'path': URL_PATHS['genre']}),
xbmcgui.ListItem('[COLOR lavender][B]Search by Genre[/B][/COLOR]'),
True
),
(
buildURL({'action': 'actionTraktMenu', 'path': 'trakt'}),
xbmcgui.ListItem('[COLOR lavender][B]Search by Trakt List[/B][/COLOR]'),
True
),
(
buildURL({'action': 'actionSearchHistory', 'path': 'searchHistory'}),
xbmcgui.ListItem('[COLOR lavender][B]Search History...[/B][/COLOR]'),
True
)
)
)
xbmcplugin.endOfDirectory(PLUGIN_ID)
# A sub menu, lists all previous searches along with their categories.
def actionSearchHistory(params):
history = ADDON.getSetting('searchHistory').split('\n') # Non-UI setting, it's just a big string.
# A blank string split creates a list with a blank string inside, so test if the first item is valid.
if history[0]:
# Use list indexes to map to 'searchType' and a label prefix.
historyTypeNames = ['series', 'movies', 'episodes']
historyPrefixes = ['(Cartoon/Anime)', '(Movie)', '(Episode)']
searchPath = URL_PATHS['search']
historyItems = tuple(
(
buildURL({
'query': itemQuery,
'searchType': historyTypeNames[itemType],
'path': searchPath,
'section': 'ALL',
'action': 'actionCatalogSection'
}),
xbmcgui.ListItem('[B]%s[/B] "%s"' % (historyPrefixes[itemType], itemQuery)),
True
)
for itemType, itemQuery in (
(int(itemString[0]), itemString[1:]) for itemString in history
)
)
clearHistoryItem = (
buildURL({'action': 'actionSearchHistoryClear'}), xbmcgui.ListItem('[B]Clear History...[/B]'), False
)
xbmcplugin.addDirectoryItems(PLUGIN_ID, (clearHistoryItem,) + historyItems)
else:
xbmcplugin.addDirectoryItem(PLUGIN_ID, '', xbmcgui.ListItem('(No History)'), isFolder=False)
xbmcplugin.endOfDirectory(PLUGIN_ID)
def actionSearchHistoryClear(params):
dialog = xbmcgui.Dialog()
if dialog.yesno('Clear Search History', 'Are you sure?'):
ADDON.setSetting('searchHistory', '')
dialog.notification('WatchNixtoons2', 'Search history cleared', xbmcgui.NOTIFICATION_INFO, 3000, False)
# Show the search menu afterwards.
xbmc.executebuiltin('Container.Update(' + PLUGIN_URL + '?action=actionSearchMenu,replace)')
# A sub menu, lists the genre categories in the genre search.
def actionGenresMenu(params):
r = requestHelper(BASEURL + URL_PATHS['genre'])
html = r.text
dataStartIndex = html.find(r'ddmcc">')
if dataStartIndex == -1:
raise Exception('Genres list scrape fail')
xbmcplugin.addDirectoryItems(
PLUGIN_ID,
tuple(
(
buildURL(
{
'action': 'actionCatalogMenu',
'path': '/search-by-genre/' + match.group(1).rsplit('/', 1)[1],
'searchType': 'genres'
}
),
xbmcgui.ListItem(match.group(2)),
True
)
for match in re.finditer('''<a.*?"([^"]+).*?>(.*?)</''', html[dataStartIndex : html.find(r'</div></div>')])
)
)
xbmcplugin.endOfDirectory(PLUGIN_ID)
def actionTraktMenu(params):
instance = SimpleTrakt.getInstance()
if instance.ensureAuthorized(ADDON):
def _traktMenuItemsGen():
traktIconDict = {'icon': ADDON_TRAKT_ICON, 'thumb': ADDON_TRAKT_ICON, 'poster': ADDON_TRAKT_ICON}
for listName, listURL, listDescription in instance.getUserLists(ADDON):
item = xbmcgui.ListItem(listName)
item.setArt(traktIconDict)
item.setInfo('video', {'title': listName, 'plot': listDescription})
yield (
buildURL({'action': 'actionTraktList', 'listURL': listURL}),
item,
True
)
xbmcplugin.addDirectoryItems(PLUGIN_ID, tuple(_traktMenuItemsGen()))
xbmcplugin.endOfDirectory(PLUGIN_ID) # Only finish the directory if the user is authorized it.
def actionTraktList(params):
instance = SimpleTrakt.getInstance()
if instance.ensureAuthorized(ADDON):
def _traktListItemsGen():
traktIconDict = {'icon': ADDON_TRAKT_ICON, 'thumb': ADDON_TRAKT_ICON, 'poster': ADDON_TRAKT_ICON}
for itemName, overview, searchType, query in sorted(instance.getListItems(params['listURL'], ADDON)):
item = xbmcgui.ListItem(itemName)
item.setInfo('video', {'title': itemName, 'plot': overview})
item.setArt(traktIconDict)
yield (
# Trakt items will lead straight to a show name search.
buildURL(
{
'action': 'actionCatalogMenu',
'path': URL_PATHS['search'],
'query': query,
'searchType': searchType,
}
),
item,
True
)
xbmcplugin.addDirectoryItems(PLUGIN_ID, tuple(_traktListItemsGen()))
xbmcplugin.endOfDirectory(PLUGIN_ID)
def actionTraktAbout(params):
xbmcgui.Dialog().ok(
'WatchNixtoons2',
'To search for items in your Trakt lists in WNT2, go to [B]Search > Search by Trakt List[/B] and pair your ' \
'account. Searching for an item this way does a name search, same as if you went and searched for that ' \
'name manually.'
)
def actionClearTrakt(params):
if 'watchnixtoons2' in xbmc.getInfoLabel('Container.PluginName'):
xbmc.executebuiltin('Dialog.Close(all)')
# Kinda buggy behavior.
# Need to wait a bit and recreate the xbmcaddon.Addon() reference, otherwise the settings
# don't seem to be changed.
# See https://forum.kodi.tv/showthread.php?tid=290353&pid=2425543#pid2425543
global ADDON
xbmc.sleep(500)
if SimpleTrakt.clearTokens(ADDON):
xbmcgui.Dialog().notification('WatchNixtoons2', 'Trakt tokens cleared', xbmcgui.NOTIFICATION_INFO, 3500, False)
else:
xbmcgui.Dialog().notification(
'WatchNixtoons2', 'Trakt tokens already cleared', xbmcgui.NOTIFICATION_INFO, 3500, False
)
ADDON = xbmcaddon.Addon()
def actionRestoreDatabase(params):
if not xbmcgui.Dialog().yesno(
'WatchNixtoons2',
'This will update the Kodi database to remember any WatchNixtoons2 episodes that were already watched, ' \
'but forgotten after an add-on update.\nProceed?',
nolabel = 'Cancel',
yeslabel = 'Ok'
):
return
# Action called from the settings dialog.
# This will update all the WatchNixtoons2 'strFilename' columns of table 'files' of Kodi's MyVideos###.db
# with the new BASEURL used by the add-on so that episodes are still considered as watched (playcount >= 1).
import xbmcvfs
try:
import sqlite3
except:
xbmcgui.Dialog().notification(
'WatchNixtoons2', 'sqlite3 not found', xbmcgui.NOTIFICATION_WARNING, 3000, True
)
return
# Find the 'MyVideos###.db' file.
dirs, files = xbmcvfs.listdir('special://database')
for file in files:
if 'MyVideos' in file and file.endswith('.db'):
path = xbmc.translatePath('special://database/' + file)
break
else:
xbmcgui.Dialog().notification(
'WatchNixtoons2', 'MyVideos database file not found', xbmcgui.NOTIFICATION_WARNING, 3000, True
)
return
# Update the database.
OLD_DOMAINS = getOldDomains()
NEW_DOMAIN = BASEURL.replace('https://', '', 1) # Make sure to strip the scheme from the current address.
replaceDomainFunc = lambda original, oldDomain: original.replace(oldDomain, NEW_DOMAIN)
totalUpdates = 0
try:
connection = sqlite3.connect(path)
except Exception as e:
xbmcDebug(e)
xbmcgui.Dialog().notification(
'WatchNixtoons2', 'Unable to connect to MyVideos database', xbmcgui.NOTIFICATION_WARNING, 3000, True
)
return
getCursor = connection.cursor()
setCursor = connection.cursor()
pattern = 'plugin://plugin.video.watchnixtoons2/%actionResolve%'
for idFile, strFilename in getCursor.execute(
"SELECT idFile,strFilename FROM files WHERE strFilename LIKE '%s'" % pattern
):
if any(oldDomain in strFilename for oldDomain in OLD_DOMAINS):
strFilename = reduce(replaceDomainFunc, OLD_DOMAINS, strFilename)
setCursor.execute("UPDATE files SET strFilename=? WHERE idFile=?", (strFilename, idFile))
totalUpdates += 1
try:
if totalUpdates:
connection.commit() # Only commit if needed.
connection.close()
except:
xbmcgui.Dialog().notification(
'WatchNixtoons2',
'Unable to update the database (file permission error?)',
xbmcgui.NOTIFICATION_WARNING,
3000,
True
)
return
# Bring a notification before finishing.
if totalUpdates:
xbmcgui.Dialog().ok('WatchNixtoons2', 'Database update complete (%i items updated).' % totalUpdates)
else:
xbmcgui.Dialog().ok('WatchNixtoons2', 'Finished. No updates needed (0 items updated).')
def actionUpdateFavourites(params):
if not xbmcgui.Dialog().yesno(
'WatchNixtoons2',
'This will update any of your Kodi Favourites created with older versions of WatchNixtoons2 so they can point ' \
'to the latest web address that the add-on uses.\nProceed?',
nolabel = 'Cancel',
yeslabel = 'Ok'
):
return
# Action called from the settings dialog.
# This will update all the Kodi favourites that use WatchNixtoons2 so that they use the new BASEURL.
import xbmcvfs
FAVOURITES_PATH = 'special://userdata/favourites.xml'
file = xbmcvfs.File(FAVOURITES_PATH)
favoritesText = file.read()
file.close()
originalText = favoritesText[:] # Get a backup copy of the content.
OLD_DOMAINS = getOldDomains()
NEW_DOMAIN = BASEURL.replace('https://', '', 1) # Make sure to strip the scheme.
replaceDomainFunc = lambda original, oldDomain: original.replace(oldDomain, NEW_DOMAIN)
if any(oldDomain in originalText for oldDomain in OLD_DOMAINS):
favoritesText = reduce(replaceDomainFunc, getOldDomains(), favoritesText)
try:
file = xbmcvfs.File(FAVOURITES_PATH, 'w')
file.write(favoritesText)
file.close()
except:
try:
# Try again, in case this was some weird encoding error and not a write-permission error.
file = xbmcvfs.File(FAVOURITES_PATH, 'w')
file.write(originalText)
file.close()
detail = ' (original was restored)'
except:
detail = ''
xbmcgui.Dialog().notification(
'WatchNixtoons2', 'Error while writing to file' + detail, xbmcgui.NOTIFICATION_WARNING, 3000, True
)
return
if 'watchnixtoons2' in xbmc.getInfoLabel('Container.PluginName'):
xbmc.executebuiltin('Dialog.Close(all)')
xbmcgui.Dialog().ok(
'WatchNixtoons2', 'One or more items updated succesfully. Kodi will now reload the Favourites file...'
)
xbmc.executebuiltin('LoadProfile(%s)' % xbmc.getInfoLabel('System.ProfileName')) # Reloads 'favourites.xml'.
else:
xbmcgui.Dialog().ok('WatchNixtoons2', 'Finished. No old favorites found.')
def actionShowSettings(params):
# Modal dialog, so the program won't continue from this point until user closes\confirms it.
ADDON.openSettings()
# So right after it is a good time to update any settings globals.
global ADDON_SHOW_CATALOG
ADDON_SHOW_CATALOG = ADDON.getSetting('showCatalog') == 'true'
global ADDON_LATEST_DATE
# Set the catalog to be reloaded in case the user changed the "Order 'Latest Releases' By Date" setting.
newLatestDate = ADDON.getSetting('useLatestDate') == 'true'
if ADDON_LATEST_DATE != newLatestDate and URL_PATHS['latest'] in getRawWindowProperty(PROPERTY_CATALOG_PATH):
setRawWindowProperty(PROPERTY_CATALOG_PATH, '')
ADDON_LATEST_DATE = newLatestDate
global ADDON_LATEST_THUMBS
ADDON_LATEST_THUMBS = ADDON.getSetting('showLatestThumbs') == 'true'
def getPageMetadata(html):
# If we're on an episode or (old) movie page, see if there's a parent page with the actual metadata.
stringStartIndex = html.find('"header-tag"')
if stringStartIndex != -1:
parentURL = re.search('href="([^"]+)', html[stringStartIndex:]).group(1)
if '/anime/movies' not in parentURL:
r = requestHelper(parentURL if parentURL.startswith('http') else BASEURL + parentURL)
if r.ok:
html = r.text
# Thumbnail scraping.
thumb = ''
stringStartIndex = html.find('og:image" content="')
if stringStartIndex != -1:
thumbPath = html[stringStartIndex+19 : html.find('"', stringStartIndex+19)] # 19 = len('og:image" content="')
if thumbPath:
if thumbPath.startswith('http'):
thumb = thumbPath + getThumbnailHeaders()
elif thumbPath.startswith('/'):
thumb = BASEURL + thumbPath + getThumbnailHeaders()
# (Show) plot scraping.
plot = ''
stringStartIndex = html.find('Info:')
if stringStartIndex != -1:
match = re.search('</h3>\s*<p>(.*?)</p>', html[stringStartIndex:], re.DOTALL)
plot = unescapeHTMLText(match.group(1).strip()) if match else ''
return plot, thumb
def actionShowInfo(params):
xbmcgui.Dialog().notification('WatchNixtoons2', 'Requesting info...', ADDON_ICON, 2000, False)
# Get the desktop page for the item, whatever it is.
url = params['url'].replace('/m.', '/www.', 1) # Make sure the URL points to the desktop site.
r = requestHelper(url if url.startswith('http') else BASEURL + url)
html = r.text
plot, thumb = getPageMetadata(html)
# Use a persistent memory property holding a dictionary, and refresh the directory listing.
if plot or thumb:
infoItems = getWindowProperty(PROPERTY_INFO_ITEMS) or { }
infoItems[url] = (plot, (thumb or 'DefaultVideo.png'))
setWindowProperty(PROPERTY_INFO_ITEMS, infoItems)
oldParams = dict(parse_qsl(params['oldParams']))
xbmc.executebuiltin('Container.Update(%s,replace)' % (PLUGIN_URL + '?' + params['oldParams']))
else:
xbmcgui.Dialog().notification('WatchNixtoons2', 'No info found', ADDON_ICON, 1500, False)
def unescapeHTMLText(text):
text = text.encode('utf-8') if isinstance(text, unicode) else unicode(text, errors='ignore').encode('utf-8')
# Unescape HTML entities.
if r'&#' in text:
# Strings found by regex-searching on all lists in the source website. It's very likely to only be these.
return text.replace(r'‘', '‘').replace(r'”', '”').replace(r'–', '–').replace(r'&', '&')\
.replace(r'’', '’').replace(r'“', '“').replace(r'…', '…').replace(r' ', ' ')\
.replace(r'&', '&')
else:
return text.replace(r'&', '&')
def getTitleInfo(unescapedTitle):
# We need to interpret the full title of each episode's link's string
# for information like episode number, season and show title.
season = None
episode = None
multiPart = None
showTitle = unescapedTitle
episodeTitle = ''
seasonIndex = unescapedTitle.find('Season ') # 7 characters long.
if seasonIndex != -1:
season = unescapedTitle[seasonIndex+7 : unescapedTitle.find(' ', seasonIndex+7)]
if not season.isdigit():
# Handle inconsistently formatted episode title, with possibly ordinal season before or after
# the word "Season" (case unknown, inconsistent).
if season == 'Episode':
# Find the word to the left of "Season ", separated by spaces (spaces not included in the result).
season = unescapedTitle[unescapedTitle.rfind(' ', 0, seasonIndex-1) + 1 : seasonIndex-1]
showTitle = unescapedTitle[:seasonIndex+7].strip(' -–:') # Include the "nth Season" term in the title.
else:
showTitle = unescapedTitle[:seasonIndex].strip(' -–:')
season = {'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5'}.get(season.lower(), '')
else:
showTitle = unescapedTitle[:seasonIndex].strip(' -–:')
episodeIndex = unescapedTitle.find(' Episode ') # 9 characters long.
if episodeIndex != -1:
spaceIndex = unescapedTitle.find(' ', episodeIndex+9)
if spaceIndex > episodeIndex:
episodeSplit = unescapedTitle[episodeIndex+9 : spaceIndex].split('-') # For multipart episodes, like "42-43".
episode = filter(str.isdigit, episodeSplit[0])
multiPart = filter(str.isdigit, episodeSplit[1]) if len(episodeSplit) > 1 else None
# Get the episode title string (stripped of spaces, hyphens and en-dashes).
englishIndex = unescapedTitle.rfind(' English', spaceIndex)
if englishIndex != -1:
episodeTitle = unescapedTitle[spaceIndex+1 : englishIndex].strip(' -–:')
else:
episodeTitle = unescapedTitle[spaceIndex+1:].strip(' -–:')
# Safeguard for when season 1 is ocasionally omitted in the title.
if not season:
season = '1'
if episode:
return (showTitle[:episodeIndex].strip(' -'), season, episode, multiPart, episodeTitle.strip(' /'))
else:
englishIndex = unescapedTitle.rfind(' English')
if englishIndex != -1:
return (unescapedTitle[:englishIndex].strip(' -'), None, None, None, '')
else:
return (unescapedTitle.strip(' -'), None, None, None, '')
def makeListItem(title, url, artDict, plot, isFolder, isSpecial, oldParams):
unescapedTitle = unescapeHTMLText(title)
item = xbmcgui.ListItem(unescapedTitle)
isPlayable = False
if not (isFolder or isSpecial):
title, season, episode, multiPart, episodeTitle = getTitleInfo(unescapedTitle)
# Playable content.
isPlayable = True
itemInfo = {
'mediatype': 'episode' if episode else 'tvshow', 'tvshowtitle': title, 'title': episodeTitle, 'plot': plot
}
if episode and episode.isdigit():
itemInfo['season'] = int(season) if season.isdigit() else -1
itemInfo['episode'] = int(episode)
item.setInfo('video', itemInfo)
elif isSpecial:
isPlayable = True
item.setInfo('video', {'mediatype': 'movie', 'title': unescapedTitle, 'plot': plot})
else:
item.setInfo('video', {'mediatype': 'tvshow', 'title': unescapedTitle, 'plot': plot})
if artDict:
item.setArt(artDict)
# Add the context menu items, if necessary.
contextMenuList = None
if oldParams:
contextMenuList = [
(
'Nixtoons Information',
'RunPlugin('+PLUGIN_URL+'?action=actionShowInfo&url='+quote_plus(url)+'&oldParams='+quote_plus(urlencode(oldParams))+')'
)
]
if isPlayable:
item.setProperty('IsPlayable', 'true') # Allows the checkmark to be placed on watched episodes.
playChaptersItem = (
'Play Chapters',
'PlayMedia('+PLUGIN_URL+'?action=actionResolve&url='+quote_plus(url)+'&playChapters=1)'
)
if contextMenuList:
contextMenuList.append(playChaptersItem)
else:
contextMenuList = [playChaptersItem]
if contextMenuList:
item.addContextMenuItems(contextMenuList)
return item
# Variant of the 'makeListItem()' function that tries to format the item label using the season and episode.
def makeListItemClean(title, url, artDict, plot, isFolder, isSpecial, oldParams):
unescapedTitle = unescapeHTMLText(title)
isPlayable = False
if isFolder or isSpecial:
item = xbmcgui.ListItem(unescapedTitle)
if isSpecial:
isPlayable = True
item.setInfo('video', {'mediatype': 'video', 'title': unescapedTitle})
else:
title, season, episode, multiPart, episodeTitle = getTitleInfo(unescapedTitle)
if episode and episode.isdigit():
# The clean episode label will have this format: "SxEE Episode Name", with S and EE standing for digits.
item = xbmcgui.ListItem(
'[B]' + season + 'x' + episode.zfill(2) + ('-' + multiPart if multiPart else '') + '[/B] '
+ (episodeTitle or title)
)
itemInfo = {
'mediatype': 'episode',
'tvshowtitle': title,
'title': title,
'plot': plot,
'season': int(season) if season.isdigit() else -1,
'episode': int(episode)
}
else:
item = xbmcgui.ListItem(title)
itemInfo = {'mediatype': 'tvshow', 'tvshowtitle': title, 'title': title, 'plot': plot}
item.setInfo('video', itemInfo)
isPlayable = True
if artDict:
item.setArt(artDict)
# Add the context menu items, if necessary.
contextMenuList = None
if oldParams:
contextMenuList = [
(
'Show Information',
'RunPlugin('+PLUGIN_URL+'?action=actionShowInfo&url='+quote_plus(url)+'&oldParams='+quote_plus(urlencode(oldParams))+')'
)
]
if isPlayable:
item.setProperty('IsPlayable', 'true') # Allows the checkmark to be placed on watched episodes.
playChaptersItem = (
'Play Chapters',
'PlayMedia('+PLUGIN_URL+'?action=actionResolve&url='+quote_plus(url)+'&playChapters=1)'
)
if contextMenuList:
contextMenuList.append(playChaptersItem)
else:
contextMenuList = [playChaptersItem]
if contextMenuList:
item.addContextMenuItems(contextMenuList)
return item
'''
(1. The catalog is a dictionary of lists, used to store data between add-on states to make xbmcgui.ListItems:
{
(2. Sections, as in alphabet sections of items, A, B, C, D, E, F etc., each section holds a list of items.)
A: (
(item, item, item, ...) (3. Items, each item is a pair of <a> properties: (a.string, a['href']).)
)
B: (...)
C: (...)
}
'''
# Manually sorts items from an iterable into an alphabetised catalog.
# Iterable contains (URL, name) pairs that might refer to a series, episode, ova or movie.
def catalogFromIterable(iterable):
catalog = {key: [ ] for key in ascii_uppercase}
miscSection = catalog['#'] = [ ]
for item in iterable:
key = item[1][0].upper()
if key in catalog:
catalog[key].append(item)
else:
miscSection.append(item)
return catalog
def makeLatestCatalog(params):
# Returns a list of links from the "Latest 50 Releases" area, but from their mobile site as it has lots of items.
r = requestHelper(BASEURL_MOBILE) # Path unused, data is already on the homepage.
html = r.text
dataStartIndex = html.find('vList')
if dataStartIndex == -1:
raise Exception('(Mobile) Latest catalog scrape fail')
thumbHeaders = getThumbnailHeaders()
if ADDON_LATEST_DATE:
# Make the catalog dict only have a single section, "LATEST", with items listed as they are.
# This way the actionCatalogMenu() function will show this single section directly, with no alphabet categories.
return {
'LATEST': tuple(
(match.group(1), match.group(3), BASEURL_MOBILE + match.group(2) + thumbHeaders)
for match in re.finditer(
'''<a href="([^"]+).*?img src="([^"]+).*?div.*?div>(.*?)</div''', html[dataStartIndex : html.find('/ol')]
)
)
}
else:
return catalogFromIterable(
(match.group(1), match.group(3), BASEURL_MOBILE + match.group(2) + thumbHeaders)
for match in re.finditer(
'''<a href="([^"]+).*?img src="([^"]+).*?div.*?div>(.*?)</div''', html[dataStartIndex : html.find('/ol')]
)
)
def makePopularCatalog(params):
r = requestHelper(BASEURL) # We will scrape from the sidebar content on the homepage.
html = r.text
dataStartIndex = html.find('"sidebar-titles"')
if dataStartIndex == -1:
raise Exception('Popular catalog scrape fail: ' + params['path'])
return catalogFromIterable(
match.groups()
for match in re.finditer(
'''<a href="([^"]+).*?>([^<]+)''', html[dataStartIndex : html.find('</div>', dataStartIndex)]
)
)
def makeSeriesSearchCatalog(params):
r = requestHelper(
BASEURL+'/search', data={'catara': params['query'], 'konuara': 'series'}, extraHeaders={'Referer': BASEURL+'/'})
html = r.text
dataStartIndex = html.find('submit')
if dataStartIndex == -1:
raise Exception('Series search scrape fail: ' + params['query'])
return catalogFromIterable(
match.groups()
for match in re.finditer(
'''<a href="([^"]+)[^>]*>([^<]+)</a''',
html[dataStartIndex : html.find('cizgiyazisi', dataStartIndex)]
)
)
def makeMoviesSearchCatalog(params):
# Try a movie category search (same code as in 'makeGenericCatalog()').
r = requestHelper(BASEURL + URL_PATHS['movies'])
html = r.text
dataStartIndex = html.find('"ddmcc"')
if dataStartIndex == -1:
raise Exception('Movies search scrape fail: ' + params['query'])
lowerQuery = params['query'].lower()
return catalogFromIterable(
match.groups()
for match in re.finditer(
'''<a href="([^"]+).*?>([^<]+)''', html[dataStartIndex : html.find('/ul></ul', dataStartIndex)]
)
if lowerQuery in match.group(2).lower()
)
def makeEpisodesSearchCatalog(params):
r = requestHelper(
BASEURL+'/search', data={'catara': params['query'], 'konuara': 'episodes'}, extraHeaders={'Referer': BASEURL+'/'}
)
html = r.text
dataStartIndex = html.find('submit')
if dataStartIndex == -1:
raise Exception('Episode search scrape fail: ' + params['query'])
return catalogFromIterable(
match.groups()
for match in re.finditer(
'''<a href="([^"]+)[^>]*>([^<]+)</a''',
html[dataStartIndex : html.find('cizgiyazisi', dataStartIndex)],
re.DOTALL
)
)
def makeSearchCatalog(params):
searchType = params.get('searchType', 'series')
if searchType == 'series':
return makeSeriesSearchCatalog(params)
elif searchType == 'movies':
return makeMoviesSearchCatalog(params)
else:
return makeEpisodesSearchCatalog(params)
def makeGenericCatalog(params):
# The movies path is missing some items when scraped from BASEURL_MOBILE, so we use the BASEURL
# (full website) in here.
r = requestHelper(BASEURL + params['path'])
html = r.text
dataStartIndex = html.find('"ddmcc"')
if dataStartIndex == -1:
raise Exception('Generic catalog scrape fail: ' + params['path'])
return catalogFromIterable(
match.groups()
for match in re.finditer(
'''<li><a href="([^"]+).*?>([^<]+)''', html[dataStartIndex : html.find('</div>', dataStartIndex)]
)
)
# Retrieves the catalog from a persistent XBMC window property between different add-on
# directories, or recreates the catalog based on one of the catalog functions.
def getCatalogProperty(params):
path = params['path']
def _rebuildCatalog():
func = CATALOG_FUNCS.get(path, makeGenericCatalog)
catalog = func(params)
setWindowProperty(PROPERTY_CATALOG, catalog)
if 'query' in params:
# For searches, store the query and search type in the catalog path so we can identify
# this particular search attempt.
setRawWindowProperty(PROPERTY_CATALOG_PATH, path + params['query'] + params['searchType'])
else:
setRawWindowProperty(PROPERTY_CATALOG_PATH, path)
setRawWindowProperty(PROPERTY_INFO_ITEMS, '') # Clear any previous info.
return catalog
# If these properties are empty (like when coming in from a favourites menu), or if
# a different catalog (a different URL path) is stored in this property, then reload it.
currentPath = getRawWindowProperty(PROPERTY_CATALOG_PATH)
if (
# "If we're coming in from a search and the search query and type are different, or if we're not
# coming in from a search and the paths are simply different, rebuild the catalog."
('query' in params and (params['query'] not in currentPath or params['searchType'] not in currentPath))
or ('query' not in params and currentPath != path)
):
catalog = _rebuildCatalog()
else:
catalog = getWindowProperty(PROPERTY_CATALOG)
if not catalog:
catalog = _rebuildCatalog()
return catalog
def actionResolve(params):
# Needs to be the BASEURL domain to get multiple video qualities.
url = params['url']
# Sanitize the URL since on some occasions it's a path instead of full address.
url = url if url.startswith('http') else (BASEURL + (url if url.startswith('/') else '/' + url))
r = requestHelper(url.replace('watchcartoononline.io', 'thewatchcartoononline.tv', 1)) # New domain safety replace.
content = r.content
def _decodeSource(subContent):
chars = subContent[subContent.find('[') : subContent.find(']')]
spread = int(re.search(r' - (\d+)\)\; }', subContent[subContent.find(' - '):]).group(1))
iframe = ''.join(
chr(
int(''.join(c for c in b64decode(char) if c.isdigit())) - spread
)
for char in chars.replace('"', '').split(',')
)
try:
return BASEURL + re.search(r'src="([^"]+)', iframe).group(1)
except:
return None # Probably a temporary block, or change in embedded code.
embedURL = None
# On rare cases an episode might have several "chapters", which are video players on the page.
embedURLPattern = b'onclick="myFunction'
embedURLIndex = content.find(embedURLPattern)
if 'playChapters' in params or ADDON.getSetting('chapterEpisodes') == 'true':
# Multi-chapter episode found (that is, multiple embedURLPattern statements found).
# Extract all chapters from the page.
embedURLPatternLen = len(embedURLPattern)
currentPlayerIndex = embedURLIndex
dataIndices = [ ]
while currentPlayerIndex != -1:
dataIndices.append(currentPlayerIndex)
currentPlayerIndex = content.find(embedURLPattern, currentPlayerIndex + embedURLPatternLen)
# If more than one "embedURL" statement found, make a selection dialog and call them "chapters".
if len(dataIndices) > 1:
selectedIndex = xbmcgui.Dialog().select(
'Select Chapter', ['Chapter '+str(n) for n in xrange(1, len(dataIndices)+1)]
)
else:
selectedIndex = 0
if selectedIndex != -1:
embedURL = _decodeSource(content[dataIndices[selectedIndex]:])
else:
return # User cancelled the chapter selection.
else:
# Normal / single-chapter episode.
embedURL = _decodeSource(content[embedURLIndex:])
# User asked to play multiple chapters, but only one chapter/video player found.
if embedURL and 'playChapters' in params:
xbmcgui.Dialog().notification('WatchNixtoons2', 'Only 1 chapter found...', ADDON_ICON, 2000, False)
# Notify a failure in solving the player obfuscation.
if not embedURL:
xbmcgui.Dialog().ok('WatchNixtoons2', 'Unable to find a playable source')
return
# Request the embedded player page.
r2 = requestHelper(unescapeHTMLText(embedURL)) # Sometimes a '&' symbol is present in this URL.
html = r2.text
# Notify about temporary blocks / failures.
if 'high volume of requests' in html:
xbmcgui.Dialog().ok(
'WatchNixtoons2 Fail (Server Response)',
'"We are getting extremely high volume of requests on our video servers so that we temporarily block for free videos for free users. I apologize for the inconvenience."'
)
return
# Find the stream URLs.
if 'getvid?evid' in html:
# Query-style stream getting.
sourceURL = re.search(b'"(/inc/embed/getvidlink[^"]+)', html, re.DOTALL).group(1)
# Inline code similar to 'requestHelper()'.
# The User-Agent for this next request is somehow encoded into the media tokens, so we make sure to use
# the EXACT SAME value later, when playing the media, or else we get a HTTP 404 / 500 error.
r3 = requestHelper(
BASEURL + sourceURL,
data = None,
extraHeaders = {
'User-Agent': WNT2_USER_AGENT, 'Accept': '*/*', 'Referer': embedURL, 'X-Requested-With': 'XMLHttpRequest'
}
)
if not r3.ok:
raise Exception('Sources XMLHttpRequest request failed')
jsonData = r3.json()
# Only two qualities are ever available: 480p ("SD") and 720p ("HD").
sourceURLs = [ ]
sdToken = jsonData.get('enc', '')
hdToken = jsonData.get('hd', '')
sourceBaseURL = jsonData.get('server', '') + '/getvid?evid='
if sdToken:
sourceURLs.append(('480 (SD)', sourceBaseURL + sdToken)) # Order the items as (LABEL, URL).
if hdToken:
sourceURLs.append(('720 (HD)', sourceBaseURL + hdToken))
# Use the same backup stream method as the source: cdn domain + SD stream.
backupURL = jsonData.get('cdn', '') + '/getvid?evid=' + (sdToken or hdToken)
else:
# Alternative video player page, with plain stream links in the JWPlayer javascript.
sourcesBlock = re.search('sources:\s*?\[(.*?)\]', html, re.DOTALL).group(1)
streamPattern = re.compile('\{\s*?file:\s*?"(.*?)"(?:,\s*?label:\s*?"(.*?)")?')
sourceURLs = [
# Order the items as (LABEL (or empty string), URL).
(sourceMatch.group(2), sourceMatch.group(1))
for sourceMatch in streamPattern.finditer(sourcesBlock)
]
# Use the backup link in the 'onError' handler of the 'jw' player.
backupMatch = streamPattern.search(html[html.find(b'jw.onError'):])
backupURL = backupMatch.group(1) if backupMatch else ''
mediaURL = None
if len(sourceURLs) == 1: # Only one quality available.
mediaURL = sourceURLs[0][1]
elif len(sourceURLs) > 0:
# Always force "select quality" for now.
playbackMethod = ADDON.getSetting('playbackMethod')
if playbackMethod == '0': # Select quality.
selectedIndex = xbmcgui.Dialog().select(
'Select Quality', [(sourceItem[0] or '?') for sourceItem in sourceURLs]
)
if selectedIndex != -1:
mediaURL = sourceURLs[selectedIndex][1]
else: # Auto-play user choice.
sortedSources = sorted(sourceURLs)
mediaURL = sortedSources[-1][1] if playbackMethod == '1' else sortedSources[0][1]
if mediaURL:
# Kodi headers for playing web streamed media.
global MEDIA_HEADERS
if not MEDIA_HEADERS:
MEDIA_HEADERS = {
'User-Agent': WNT2_USER_AGENT,
'Accept': 'video/webm,video/ogg,video/*;q=0.9,application/ogg;q=0.7,audio/*;q=0.6,*/*;q=0.5',
'Connection': 'keep-alive',
'Referer': BASEURL + '/'
}
# Try to un-redirect the chosen media URL.
# If it fails, try to un-resolve the backup URL. If not even the backup URL is working, abort playing.
mediaHead = solveMediaRedirect(mediaURL, MEDIA_HEADERS)
if not mediaHead:
mediaHead = solveMediaRedirect(backupURL, MEDIA_HEADERS)
if not mediaHead:
return xbmcplugin.setResolvedUrl(PLUGIN_ID, False, xbmcgui.ListItem())
# Need to use the exact same ListItem name & infolabels when playing or else Kodi replaces that item
# in the UI listing.
item = xbmcgui.ListItem(xbmc.getInfoLabel('ListItem.Label'))
item.setPath(mediaHead.url + '|' + '&'.join(key+'='+quote_plus(val) for key, val in MEDIA_HEADERS.iteritems()))
item.setMimeType(mediaHead.headers.get('Content-Type', 'video/mp4')) # Avoids Kodi's MIME request.
# When coming in from a Favourite item, there will be no metadata. Try to get at least a title.
itemTitle = xbmc.getInfoLabel('ListItem.Title')
if not itemTitle:
match = re.search(b'<h1[^>]+>([^<]+)</h1', content)
if match:
itemTitle = match.group(1).replace(' English Subbed', '', 1).replace( 'English Dubbed', '', 1)
else:
itemTitle = ''
episodeString = xbmc.getInfoLabel('ListItem.Episode')
if episodeString != '' and episodeString != '-1':
seasonInfoLabel = xbmc.getInfoLabel('ListItem.Season')
item.setInfo('video',
{
'tvshowtitle': xbmc.getInfoLabel('ListItem.TVShowTitle'),
'title': itemTitle,
'season': int(seasonInfoLabel) if seasonInfoLabel.isdigit() else -1,
'episode': int(episodeString),
'plot': xbmc.getInfoLabel('ListItem.Plot'),
'mediatype': 'episode'
}
)
else:
item.setInfo('video',
{
'title': itemTitle,
'plot': xbmc.getInfoLabel('ListItem.Plot'),
'mediatype': 'movie'
}
)
#xbmc.Player().play(listitem=item) # Alternative play method, lets you extend the Player class with your own.
xbmcplugin.setResolvedUrl(PLUGIN_ID, True, item)
else:
# Failed. No source found, or the user didn't select one from the dialog.
xbmcplugin.setResolvedUrl(PLUGIN_ID, False, xbmcgui.ListItem())
def buildURL(query):
'''
Helper function to build a Kodi xbmcgui.ListItem URL.
:param query: Dictionary of url parameters to put in the URL.
:returns: A formatted and urlencoded URL string.
'''
return (PLUGIN_URL + '?' + urlencode({k: v.encode('utf-8') if isinstance(v, unicode)
else unicode(v, errors='ignore').encode('utf-8')
for k, v in query.iteritems()}))
def setViewMode():
if ADDON.getSetting('useViewMode') == 'true':
viewModeID = ADDON.getSetting('viewModeID')
if viewModeID.isdigit():
xbmc.executebuiltin('Container.SetViewMode(' + viewModeID + ')')
def xbmcDebug(*args):
xbmc.log('WATCHNIXTOONS2 > ' + ' '.join((val if isinstance(val, str) else repr(val)) for val in args), xbmc.LOGWARNING)
def simpleRequest(url, requestFunc, headers):
return requestFunc(url, headers=headers, verify=False, timeout=10)
# Thumbnail HTTP headers for Kodi to use when grabbing thumbnail images.
def getThumbnailHeaders():
# Original code:
#return (
# '|User-Agent='+quote_plus(WNT2_USER_AGENT)
# + '&Accept='+quote_plus('image/webp,*/*')
# + '&Referer='+quote_plus(BASEURL+'/')
#)
cookieProperty = getRawWindowProperty(PROPERTY_SESSION_COOKIE)
cookies = ('&Cookie=' + quote_plus(cookieProperty)) if cookieProperty else ''
# Since it's a constant value, it can be precomputed.
return '|User-Agent=Mozilla%2F5.0+%28compatible%3B+WatchNixtoons2%2F0.4.1%3B' \
'+%2Bhttps%3A%2F%2Fgithub.com%2Fdoko-desuka%2Fplugin.video.watchnixtoons2%29' \
'&Accept=image%2Fwebp%2C%2A%2F%2A&Referer=https%3A%2F%2Fwww.thewatchcartoononline.tv%2F' + cookies
def getOldDomains():
# Old possible domains, in the order of likeliness.
return (
'www.wcostream.com', 'm.wcostream.com', 'www.watchcartoononline.io', 'm.watchcartoononline.io'
)
def solveMediaRedirect(url, headers):
# Use HEAD requests to fulfill possible 302 redirections.
# Returns the final stream HEAD response.
while True:
try:
mediaHead = simpleRequest(url, requests.head, headers)
if 'Location' in mediaHead.headers:
url = mediaHead.headers['Location'] # Change the URL to the redirected location.
else:
mediaHead.raise_for_status()
return mediaHead # Return the response.
except:
return None # Return nothing on failure.
def requestHelper(url, data=None, extraHeaders=None):
myHeaders = {
'User-Agent': WNT2_USER_AGENT,
'Accept': 'text/html,application/xhtml+xml,application/xml,application/json;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'no-cache',
'Pragma': 'no-cache',
'DNT': '1'
}
if extraHeaders:
myHeaders.update(extraHeaders)
# At the moment it's a single response cookie, "__cfduid". Other cookies are set w/ Javascript by ads.
cookieProperty = getRawWindowProperty(PROPERTY_SESSION_COOKIE)
if cookieProperty:
cookieDict = dict(pair.split('=') for pair in cookieProperty.split('; '))
else:
cookieDict = None
startTime = time()
if data:
response = requests.post(url, data=data, headers=myHeaders, verify=False, cookies=cookieDict, timeout=10)
else:
response = requests.get(url, headers=myHeaders, verify=False, cookies=cookieDict, timeout=10)
# Store the session cookie(s), if any.
if not cookieProperty and response.cookies:
setRawWindowProperty(
PROPERTY_SESSION_COOKIE, '; '.join(pair[0]+'='+pair[1] for pair in response.cookies.get_dict().iteritems())
)
elapsed = time() - startTime
if elapsed < 1.5:
sleep(1.5 - elapsed)
return response
#def getRandomUserAgent():
# # Random user-agent logic. Thanks to http://edmundmartin.com/random-user-agent-requests-python/
# from random import choice
# desktop_agents = (
# 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
# 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
# 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',
# 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
# 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
# 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
# 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'
# )
# return choice(desktop_agents)
# Defined after all the functions exist.
CATALOG_FUNCS = {
URL_PATHS['latest']: makeLatestCatalog,
URL_PATHS['popular']: makePopularCatalog,
URL_PATHS['search']: makeSearchCatalog
}
def main():
'''
Main add-on routing function, calls a certain action (function).
The 'action' parameter is the direct name of the function.
'''
params = dict(parse_qsl(sys.argv[2][1:], keep_blank_values=True))
globals()[params.get('action', 'actionMenu')](params) # Defaults to 'actionMenu()'.
| 42.386126
| 181
| 0.605379
|
794ac688acc3387f549226d07d0fc9dfa6794ae4
| 1,155
|
py
|
Python
|
database/schemas/user.py
|
DiegoLing33/prestij.xyz-api
|
69a11a2c93dd98975f9becbc4b8f596e4941a05f
|
[
"MIT"
] | 2
|
2020-10-28T14:00:05.000Z
|
2020-10-30T11:55:27.000Z
|
database/schemas/user.py
|
DiegoLing33/prestij.xyz-api
|
69a11a2c93dd98975f9becbc4b8f596e4941a05f
|
[
"MIT"
] | null | null | null |
database/schemas/user.py
|
DiegoLing33/prestij.xyz-api
|
69a11a2c93dd98975f9becbc4b8f596e4941a05f
|
[
"MIT"
] | null | null | null |
# ██╗░░░░░██╗███╗░░██╗░██████╗░░░░██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗
# ██║░░░░░██║████╗░██║██╔════╝░░░░██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝
# ██║░░░░░██║██╔██╗██║██║░░██╗░░░░██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░
# ██║░░░░░██║██║╚████║██║░░╚██╗░░░██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░
# ███████╗██║██║░╚███║╚██████╔╝░░░██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗
# ╚══════╝╚═╝╚═╝░░╚══╝░╚═════╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝
#
# Developed by Yakov V. Panov (C) Ling • Black 2020
# @site http://ling.black
from datetime import datetime
from typing import Optional, List
from pydantic.main import BaseModel
from .user_group import UserGroup
from .user_meta import UserMeta
from ..core.schemas import CoreSchema
class UserBase(BaseModel):
"""
The base schema class
"""
login: str
group_id: int
class UserCreate(BaseModel):
"""
The create schema class
"""
login: str
password: str
class User(UserBase, CoreSchema):
id: int
created: datetime
group: UserGroup
meta: List[UserMeta]
class Config:
orm_mode = True
arbitrary_types_allowed = True
| 25.108696
| 75
| 0.41039
|
794ac6a3389aee903d2040ab138922e0b6b2557f
| 10,730
|
py
|
Python
|
custom/world_vision/__init__.py
|
bglar/commcare-hq
|
972129fc26864c08c7bef07874bd2a7218550bff
|
[
"BSD-3-Clause"
] | 1
|
2017-02-10T03:14:51.000Z
|
2017-02-10T03:14:51.000Z
|
custom/world_vision/__init__.py
|
bglar/commcare-hq
|
972129fc26864c08c7bef07874bd2a7218550bff
|
[
"BSD-3-Clause"
] | null | null | null |
custom/world_vision/__init__.py
|
bglar/commcare-hq
|
972129fc26864c08c7bef07874bd2a7218550bff
|
[
"BSD-3-Clause"
] | null | null | null |
from custom.world_vision.reports.child_report import ChildTTCReport
from custom.world_vision.reports.mixed_report import MixedTTCReport
from custom.world_vision.reports.mother_report import MotherTTCReport
from collections import OrderedDict
DEFAULT_REPORT_CLASS = MixedTTCReport
WORLD_VISION_DOMAINS = ('wvindia2', )
CUSTOM_REPORTS = (
('TTC App Reports', (
MixedTTCReport,
MotherTTCReport,
ChildTTCReport
)),
)
REASON_FOR_CLOSURE_MAPPING = OrderedDict((
('change_of_location', 'Migration'),
('end_of_pregnancy', 'End of care (Postpartum Completed)'),
('not_pregnant', 'Not Pregnant (mostly incorrect registrations)'),
('abortion', 'Abortion'),
('death', 'Death'),
('unknown', 'Unknown')
))
CLOSED_CHILD_CASES_BREAKDOWN = {
'death': 'Death',
'change_of_location': 'Migration',
'end_of_care': 'End of care'
}
MOTHER_DEATH_MAPPING = {
'seizure': 'Seizure or fits',
'high_bp': 'High blood pressure',
'bleeding_postpartum': 'Excessive bleeding post-delivery',
'fever_or_infection_post_delivery': 'Fever or infection post-delivery',
'during_caeserian_surgery': 'During Caeserian Surgery',
'other': 'Other reason',
}
CHILD_DEATH_TYPE = {
'newborn_death': 'Newborn deaths (< 1 month)',
'infant_death': 'Infant deaths (< 1 year)',
'child_death': 'Child deaths (> 1yr)'
}
CHILD_CAUSE_OF_DEATH = OrderedDict((
('ari', 'ARI'),
('fever', 'Fever'),
('dysentery', 'Dysentery or diarrhea'),
('injury', 'Injury or accident'),
('malnutrition', 'Malnutrition'),
('cholera', 'Cholera'),
('measles', 'Measles'),
('meningitis', 'Meningitis'),
('other', 'Other'),
('', 'Unknown')
))
FAMILY_PLANNING_METHODS = {
'condom': 'Condom',
'iud': 'IUD',
'ocp': 'Contraceptive Pill',
'injection': 'Depo-provera injection or implant',
'permanent': 'Vasectomy or ligation',
'natural': 'Natural methods',
'other': 'Others',
'not_wish_to_disclose': 'Does not wish to disclose'
}
MOTHER_INDICATOR_TOOLTIPS = {
"mother_registration_details": {
"total": "Includes cases that were opened or closed within the date range, or remained open throughout "
"the period",
"total": "Total cases (both open and closed) irrespective of any date filters. Location filters "
"still apply.",
"no_date_opened": "Total open cases irrespective of any date filters. Location filters still apply.",
"no_date_closed": "Total closed cases irrespective of any date filters. Location filters still apply.",
"new_registrations": "Cases open between today and 30 days from today"
},
"ante_natal_care_service_details": {
"no_anc": "Pregnant mothers who didn't get a single ANC checkup",
"anc_1": "Pregnant mothers who completed ANC1",
"anc_2": "Pregnant mothers who completed ANC1 and ANC2",
"anc_3": "Pregnant mothers who completed ANC1, ANC2 and ANC3",
"anc_4": "Pregnant mothers who completed ANC1, ANC2, ANC3 and ANC4",
"tt_1": "Pregnant mothers who got Tetanus 1 shot",
"tt_2": "Pregnant mothers who got Tetanus 1 and Tetanus 2 shots",
"tt_booster": "Pregnant mothers who got Tetanus Booster shot",
"tt_completed": "Pregnant mothers who got Tetanus 2 or Tetanus Booster",
"ifa_tablets": "Pregnant mothers who reported consuming IFA tablets currently",
"100_tablets": "Mothers who completed 100 IFA tablets",
"clinically_anemic": "Pregnant mothers who are currently identified as anemic by the Front Line Worker",
"danger_signs": "Pregnant mothers who reported experiencing danger signs currently, "
"hence referred to health center",
"knows_closest_facility": "Pregnant mothers who reported they know their nearest health facility",
"no_anc_eligible": "Mothers more than 2.75 months pregnant (end of 1st Trimester)",
"anc_1_eligible": "Mothers more than 2.75 months pregnant (end of 1st Trimester)",
"anc_2_eligible": "Mothers currently more than 5.5 months pregnant (2nd Trimester) and completed ANC1",
"anc_3_eligible": "Mothers currently more than 7.3 months pregnant (3rd Trimester) "
"and completed ANC1 and ANC2",
"anc_4_eligible": "Mothers currently more than 8 months pregnant (end of 3rd Trimester) "
"and completed ANC1, ANC2 and ANC3",
"tt_1_eligible": "Pregnant women who did not get 2 tetanus shots in the last 5 years",
"tt_2_eligible": "Pregnant women who got Tetatnus 1 shots",
"tt_booster_eligible": "Pregnant women who got 2 tetanus shots during previous pregnancy "
"in the last 5 years",
"tt_completed_eligible": "Pregnant women eligible to get Tetanus 2 shot or Tetanus Booster shot",
"ifa_tablets_eligible": "Women currently pregnant",
"100_tablets_eligible": "Women who have delivered in the selected date range",
"clinically_anemic_eligible": "Currently pregnant women",
"danger_signs_eligible": "Currently pregnant women",
"knows_closest_facility_eligible": "Currently pregnant women"
},
"pregnant_women_breakdown_by_trimester": {
"total_pregnant": "Currently pregnant women",
"trimester_1": "Women less than 2.75 months pregnant",
"trimester_2": "Women more than 2.75 months and less than 6.4 months pregnant",
"trimester_3": "Women more than 6.4 months pregnant"
},
"delivery_details": {
"total_delivery": "Includes live births and still births",
"trained_traditional_birth_attendant": "Deliveries at health center or done by trained birth attendant "
"elsewhere",
"institutional_deliveries": "Deliveries at health center or hospital",
"home_deliveries": "Deliveries at home or on route",
"abortions": "Number of reported abortions"
},
"postnatal_care_details": {
"pnc_1": "Mothers visited by Front Line Worker within 48 hours of delivery",
"pnc_2": "Mothers visited by Front Line Worker within 2-4 days of delivery",
"pnc_3": "Mothers visited by Front Line Worker within 5-7 days of delivery",
"pnc_4": "Mothers visited by Front Line Worker within 21-42 days of delivery",
"pnc_1_eligible": "Mothers who have delivered",
"pnc_2_eligible": "Mothers who have delivered 2 or more days ago",
"pnc_3_eligible": "Mothers who have delivered 5 or more days ago",
"pnc_4_eligible": "Mothers who have delivered 21 or more days ago"
}
}
CHILD_INDICATOR_TOOLTIPS = {
"child_registration_details": {
"total": "Includes cases that were opened or closed within the date range, or remained open "
"throughout the period",
"total": "Total cases (both open and closed) irrespective of any date filters. Location filters "
"still apply.",
"no_date_opened": "Total open cases irrespective of any date filters. Location filters still apply.",
"no_date_closed": "Total closed cases irrespective of any date filters. Location filters still apply.",
"new_registration": "Cases open between today and 30 days from today"
},
"immunization_details": {
"bcg_eligible": "All children in date range",
"opv0_eligible": "All children in date range",
"hep0_eligible": "All children in date range",
"opv1_eligible": "Children more than 1.3 months old",
"hep1_eligible": "Children more than 1.3 months old",
"dpt1_eligible": "Children more than 1.3 months old",
"opv2_eligible": "Children more than 2.5 months old",
"hep2_eligible": "Children more than 2.5 months old",
"dpt2_eligible": "Children more than 2.5 months old",
"opv3_eligible": "Children more than 3.5 months old",
"hep3_eligible": "Children more than 3.5 months old",
"dpt3_eligible": "Children more than 3.5 months old",
"measles_eligible": "Children more than 9 months old",
"vita1_eligible": "Children more than 9 months old",
"vita2_eligible": "Children more than 18 months old",
"dpt_opv_booster_eligible": "Children more than 18 months old",
"vita3_eligible": "Children more than 23 months old",
"fully_immunized": "Children who received all vaccines from BCG to Measles",
"fully_immunized_eligible": "Children more than 9 months old"
},
"nutrition_details": {
"colostrum_feeding": "Children who had colostrum milk within 1 hour of birth",
"exclusive_breastfeeding": "Children currently less than 6 months old and exclusively breastfed",
"complementary_feeding": "Children between 6-24 months old who are receiving complementary feeding",
"supplementary_feeding": "Children currently less than 6 months old who are receiving supplementary "
"feeding in addition to breast milk",
"colostrum_feeding_total_eligible": "Children who reported about colostrum feeding (both yes and no)",
"exclusive_breastfeeding_total_eligible": "Children currently less than 6 months old",
"complementary_feeding_total_eligible": "Children currently between 6-24 months old",
"supplementary_feeding_total_eligible": "Children currently less than 6 months old"
},
"ebf_stopping_details": {
"stopped_0_1": "Children currently less than 6 months old who stopped EBF when they were less than "
"1 month old",
"stopped_1_3": "Children currently less than 6 months old who stopped EBF when they were between "
"1-3 months old",
"stopped_3_5": "Children currently less than 6 months old who stopped EBF when they were between "
"3-5 months old",
"stopped_5_6": "Children currently less than 6 months old who stopped EBF when they were between "
"5-6 months old"
},
"child_health_indicators": {
"ari_cases": "Children who reported Penumonia between the last two visits by Front Line Worker",
"diarrhea_cases": "Children who reported Diarrhoea between the last two visits by Front Line Worker",
"ors": "Children who reported having ORS when they had Diarrhoea the last time",
"zinc": "Children who reported having Zinc when they had Diarrhoea the last time",
"deworming": "Children who got deworming does in the last 6 months",
"deworming_total_eligible": "Children more than 1 year old"
}
}
| 53.118812
| 112
| 0.67055
|
794ac918c0f31aced8b153bf76a35063d319b331
| 2,448
|
py
|
Python
|
maro/rl/models/torch/mlp_representation.py
|
zhawan/maro
|
d8c98deea4296cdcb90efd1fb59bc571cec3a2ef
|
[
"MIT"
] | null | null | null |
maro/rl/models/torch/mlp_representation.py
|
zhawan/maro
|
d8c98deea4296cdcb90efd1fb59bc571cec3a2ef
|
[
"MIT"
] | null | null | null |
maro/rl/models/torch/mlp_representation.py
|
zhawan/maro
|
d8c98deea4296cdcb90efd1fb59bc571cec3a2ef
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
class MLPRepresentation(nn.Module):
"""
Deep Q network.
Choose multi-layer full connection with dropout as the basic network architecture.
"""
def __init__(self, name: str, input_dim: int, hidden_dims: [int], output_dim: int, dropout_p: float):
"""
Init deep Q network.
Args:
name (str): Network name.
input_dim (int): Network input dimension.
hidden_dims ([int]): Network hiddenlayer dimension. The length of `hidden_dims` means the
hidden layer number, which requires larger than 1.
output_dim (int): Network output dimension.
dropout_p (float): Dropout parameter.
"""
super().__init__()
self._name = name
self._dropout_p = dropout_p
self._input_dim = input_dim
self._hidden_dims = hidden_dims if hidden_dims is not None else []
self._output_dim = output_dim
self._layers = self._build_layers([input_dim] + self._hidden_dims)
if len(self._hidden_dims) == 0:
self._head = nn.Linear(self._input_dim, self._output_dim)
else:
self._head = nn.Linear(hidden_dims[-1], self._output_dim)
self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self._net = nn.Sequential(*self._layers, self._head).to(self._device)
def forward(self, x):
return self._net(x.to(self._device)).double()
@property
def input_dim(self):
return self._input_dim
@property
def name(self):
return self._name
@property
def output_dim(self):
return self._output_dim
def _build_basic_layer(self, input_dim, output_dim):
"""
Build basic layer.
BN -> Linear -> LeakyReLU -> Dropout
"""
return nn.Sequential(nn.Linear(input_dim, output_dim),
nn.LeakyReLU(),
nn.Dropout(p=self._dropout_p))
def _build_layers(self, layer_dims: []):
"""
Build multi basic layer.
BasicLayer1 -> BasicLayer2 -> ...
"""
layers = []
for input_dim, output_dim in zip(layer_dims, layer_dims[1:]):
layers.append(self._build_basic_layer(input_dim, output_dim))
return layers
| 33.534247
| 105
| 0.602124
|
794ac919c3ec01a8602699480bddb8e6e313c533
| 5,482
|
py
|
Python
|
model/cpn/ade.cpn.R50_v1c.v37/train.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | null | null | null |
model/cpn/ade.cpn.R50_v1c.v37/train.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | 1
|
2021-06-08T20:36:43.000Z
|
2021-06-08T20:36:43.000Z
|
model/cpn/ade.cpn.R50_v1c.v37/train.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | null | null | null |
from __future__ import division
import os.path as osp
import sys
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from config import config
from dataloader import get_train_loader
from network import CPNet
from datasets import ADE
from utils.init_func import init_weight, group_weight
from engine.lr_policy import PolyLR
from engine.logger import get_logger
from engine.engine import Engine
# from seg_opr.sync_bn import DataParallelModel, Reduce, BatchNorm2d
from seg_opr.loss_opr import AutoOhemCrossEntropy2d
try:
from apex.parallel import SyncBatchNorm, DistributedDataParallel
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex .")
logger = get_logger()
torch.manual_seed(config.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(config.seed)
parser = argparse.ArgumentParser()
with Engine(custom_parser=parser) as engine:
args = parser.parse_args()
cudnn.benchmark = True
if engine.distributed:
torch.cuda.set_device(engine.local_rank)
# data loader
train_loader, train_sampler = get_train_loader(engine, ADE)
# config network and criterion
criterion = AutoOhemCrossEntropy2d(reduction='mean',
ignore_label=-1, drop_ratio=0.3)
if engine.distributed:
logger.info('Use the Multi-Process-SyncBatchNorm')
BatchNorm2d = SyncBatchNorm
# else:
# BatchNorm2d = BatchNorm2d
model = CPNet(config.num_classes, criterion=criterion,
pretrained_model=config.pretrained_model,
norm_layer=BatchNorm2d)
init_weight(model.business_layer, nn.init.kaiming_normal_,
BatchNorm2d, config.bn_eps, config.bn_momentum,
mode='fan_in', nonlinearity='relu')
# group weight and config optimizer
base_lr = config.lr
params_list = []
params_list = group_weight(params_list, model.backbone,
BatchNorm2d, base_lr)
for module in model.business_layer:
params_list = group_weight(params_list, module, BatchNorm2d,
base_lr * 10)
# config lr policy
total_iteration = config.nepochs * config.niters_per_epoch
lr_policy = PolyLR(base_lr, config.lr_power, total_iteration)
optimizer = torch.optim.SGD(params_list,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
if engine.distributed:
if torch.cuda.is_available():
model.cuda()
model = DistributedDataParallel(model)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = DataParallelModel(model, engine.devices)
model.to(device)
engine.register_state(dataloader=train_loader, model=model,
optimizer=optimizer)
if engine.continue_state_object:
engine.restore_checkpoint()
optimizer.zero_grad()
model.train()
for epoch in range(engine.state.epoch, config.nepochs):
if engine.distributed:
train_sampler.set_epoch(epoch)
bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout,
bar_format=bar_format)
dataloader = iter(train_loader)
for idx in pbar:
engine.update_iteration(epoch, idx)
minibatch = dataloader.next()
imgs = minibatch['data']
gts = minibatch['label']
imgs = imgs.cuda(non_blocking=True)
gts = gts.cuda(non_blocking=True)
loss = model(imgs, gts)
# reduce the whole loss over multi-gpu
if engine.distributed:
dist.all_reduce(loss, dist.ReduceOp.SUM)
loss = loss / engine.world_size
# else:
# loss = Reduce.apply(*loss) / len(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
current_idx = epoch * config.niters_per_epoch + idx
lr = lr_policy.get_lr(current_idx)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr
for i in range(2, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr * 10
print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \
+ ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \
+ ' lr=%.2e' % lr \
+ ' loss=%.2f' % loss.item()
pbar.set_description(print_str, refresh=False)
if (epoch >= config.nepochs - 20) or (
epoch % config.snapshot_iter == 0):
if engine.distributed and (engine.local_rank == 0):
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
elif not engine.distributed:
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
| 35.597403
| 82
| 0.609814
|
794ac9b5d77445bad9f987d710f17899a6976849
| 28,804
|
py
|
Python
|
patches/mario.py
|
unhold/game-and-watch-patch
|
dc33f2228d7c791a746502aef27a5331c0076503
|
[
"BSD-3-Clause"
] | 48
|
2021-08-19T19:34:51.000Z
|
2022-03-29T02:02:35.000Z
|
patches/mario.py
|
unhold/game-and-watch-patch
|
dc33f2228d7c791a746502aef27a5331c0076503
|
[
"BSD-3-Clause"
] | 5
|
2021-09-04T12:15:46.000Z
|
2022-01-21T07:47:06.000Z
|
patches/mario.py
|
unhold/game-and-watch-patch
|
dc33f2228d7c791a746502aef27a5331c0076503
|
[
"BSD-3-Clause"
] | 11
|
2021-10-15T23:36:08.000Z
|
2022-03-05T12:38:23.000Z
|
from pathlib import Path
from PIL import Image
import patches
from .compression import lzma_compress
from .exception import BadImageError, InvalidStockRomError
from .firmware import Device, ExtFirmware, Firmware, IntFirmware
from .tileset import bytes_to_tilemap, decode_backdrop, tilemap_to_bytes
from .utils import (
fds_remove_crc_gaps,
printd,
printe,
printi,
round_down_word,
round_up_page,
seconds_to_frames,
)
build_dir = Path("build") # TODO: expose this properly or put in better location
class MarioGnW(Device, name="mario"):
class Int(IntFirmware):
STOCK_ROM_SHA1_HASH = "efa04c387ad7b40549e15799b471a6e1cd234c76"
# Note: this isn't the ACTUAL Stock ROM end, this is actually
# pointing to where some rwdata is, but this data will be relocated
# and compressed. This variable is used in the linker scripts as to
# where to start putting novel code.
STOCK_ROM_END = 0x18100
KEY_OFFSET = 0x106F4
NONCE_OFFSET = 0x106E4
RWDATA_OFFSET = 0x180A4
RWDATA_LEN = 36
RWDATA_ITCM_IDX = 0
RWDATA_DTCM_IDX = 1
class Ext(ExtFirmware):
STOCK_ROM_SHA1_HASH = "eea70bb171afece163fb4b293c5364ddb90637ae"
ENC_END = 0xF_E000
def _verify(self):
h = self.hash(self[:-8192])
if h != self.STOCK_ROM_SHA1_HASH:
raise InvalidStockRomError
class FreeMemory(Firmware):
FLASH_BASE = 0x240F2124
FLASH_LEN = 0x24100000 - FLASH_BASE
def argparse(self, parser):
group = parser.add_argument_group("Timeout patches")
mgroup = group.add_mutually_exclusive_group()
mgroup.add_argument(
"--disable-sleep", action="store_true", help="Disables sleep timer"
)
mgroup.add_argument(
"--sleep-time",
type=float,
default=None,
help="Go to sleep after this many seconds of inactivity.. "
"Valid range: [1, 1092]",
)
group.add_argument(
"--hard-reset-time",
type=float,
default=None,
help="Hold power button for this many seconds to perform hard reset.",
)
group.add_argument(
"--mario-song-time",
type=float,
default=None,
help="Hold the A button for this many seconds on the time "
"screen to launch the mario drawing song easter egg.",
)
group = parser.add_argument_group("ROM Hacks and Graphical Mods")
group.add_argument(
"--smb1",
type=Path,
default="build/smb1.nes",
help="Override SMB1 ROM with your own file.",
)
mgroup = group.add_mutually_exclusive_group()
mgroup.add_argument(
"--smb1-graphics",
nargs="*",
default=[],
type=Path,
help="ROM hacks where just the graphical assets will be used.",
)
mgroup.add_argument(
"--smb1-graphics-glob",
action="store_true",
help='Add all IPS files from the "ips/" folder',
)
mgroup = group.add_mutually_exclusive_group()
mgroup.add_argument(
"--clock-tileset",
type=Path,
default=None,
help="Override the clock tileset",
)
mgroup.add_argument(
"--clock-tileset-index",
type=Path,
default=None,
help="Override the clock tileset",
)
# group.add_argument(
# "--iconset",
# type=Path,
# default=Path("build/iconset.png"),
# help="Override the iconset",
# )
group = parser.add_argument_group("Low level flash savings flags")
group.add_argument(
"--no-save",
action="store_true",
help="Don't use up 2 pages (8192 bytes) of extflash for non-volatile saves. "
"High scores and brightness/volume configurations will NOT survive homebrew launches.",
)
group.add_argument("--no-smb2", action="store_true", help="Remove SMB2 rom.")
group.add_argument(
"--no-mario-song",
action="store_true",
help="Remove the mario song easter egg.",
)
group.add_argument(
"--no-sleep-images",
action="store_true",
help="Remove the 5 sleeping images.",
)
group = parser.add_argument_group("High level flash savings flags")
group.add_argument(
"--slim",
action="store_true",
help="Remove mario song and sleeping images from extflash.",
)
group.add_argument(
"--clock-only",
action="store_true",
help="Everything in --slim plus remove SMB2.",
)
group.add_argument(
"--internal-only",
action="store_true",
help="Configuration so no external flash is used.",
)
self.args = parser.parse_args()
############
# Validate #
############
if self.args.sleep_time and (
self.args.sleep_time < 1 or self.args.sleep_time > 1092
):
parser.error("--sleep-time must be in range [1, 1092]")
if self.args.mario_song_time and (
self.args.mario_song_time < 1 or self.args.mario_song_time > 1092
):
parser.error("--mario_song-time must be in range [1, 1092]")
if len(self.args.smb1_graphics) > 8:
parser.error("A maximum of 8 SMB1 graphics mods can be specified.")
if self.args.smb1_graphics_glob:
ips_folder = Path("ips")
self.args.smb1_graphics = list(ips_folder.glob("*.ips"))
self.args.smb1_graphics.extend(list(ips_folder.glob("*.IPS")))
if self.args.internal_only:
self.args.slim = True
self.args.extended = True
self.args.no_save = True
if self.args.clock_only:
self.args.slim = True
self.args.no_smb2 = True
if self.args.slim:
self.args.no_mario_song = True
self.args.no_sleep_images = True
return self.args
def patch(self):
printi("Invoke custom bootloader prior to calling stock Reset_Handler.")
self.internal.replace(0x4, "bootloader")
printi("Intercept button presses for macros.")
self.internal.bl(0x6B52, "read_buttons")
printi("Mute clock audio on first boot.")
self.internal.asm(0x49E0, "mov.w r1, #0x00000")
if self.args.debug:
# Override fault handlers for easier debugging via gdb.
printi("Overriding handlers for debugging.")
self.internal.replace(0x8, "NMI_Handler")
self.internal.replace(0xC, "HardFault_Handler")
if self.args.hard_reset_time:
hard_reset_time_ms = int(round(self.args.hard_reset_time * 1000))
printi(
f"Hold power button for {hard_reset_time_ms} milliseconds to perform hard reset."
)
self.internal.asm(0x9CEE, f"movw r1, #{hard_reset_time_ms}")
if self.args.sleep_time:
printi(f"Setting sleep time to {self.args.sleep_time} seconds.")
sleep_time_frames = seconds_to_frames(self.args.sleep_time)
self.internal.asm(0x6C3C, f"movw r2, #{sleep_time_frames}")
if self.args.disable_sleep:
printi("Disable sleep timer")
self.internal.replace(0x6C40, 0x91, size=1)
if self.args.mario_song_time:
printi(f"Setting Mario Song time to {self.args.mario_song_time} seconds.")
mario_song_frames = seconds_to_frames(self.args.mario_song_time)
self.internal.asm(0x6FC4, f"cmp.w r0, #{mario_song_frames}")
if not self.args.encrypt:
# Disable OTFDEC
self.internal.nop(0x10688, 2)
self.internal.nop(0x1068E, 1)
# Dump the tileset
tileset_addr, tileset_size = 0x9_8B84, 0x1_0000
palette_addr = 0xB_EC68
palette = self.external[palette_addr : palette_addr + 320]
tileset_bytes = self.external[tileset_addr : tileset_addr + tileset_size]
tileset = bytes_to_tilemap(tileset_bytes, palette=palette)
tileset.save(build_dir / "tileset.png")
tileset_index = bytes_to_tilemap(tileset_bytes)
tileset_index.save(build_dir / "tileset_index.png")
# Override tileset
if self.args.clock_tileset:
with Image.open(self.args.clock_tileset) as tileset:
if tileset.height != 256 or tileset.width != 256:
raise BadImageError(
"Clock tileset image must have height=256, width=256"
)
tileset = tileset.convert("RGB")
if tileset.getpixel((255, 255))[:3] != (95, 115, 255):
raise BadImageError(
"Clock tileset image color is corrupt. Possibly due to some gamma issue."
)
self.external[
tileset_addr : tileset_addr + tileset_size
] = tilemap_to_bytes(tileset, palette)
# Dump the iconset
iconset_addr, iconset_size = 0xAACE4, 0x3F00
palette_addr = 0xB_EC68
palette = self.external[palette_addr : palette_addr + 320]
iconset = bytes_to_tilemap(
self.external[iconset_addr : iconset_addr + iconset_size],
palette=palette,
bpp=4,
)
iconset.save(build_dir / "iconset.png")
# Override iconset
# with Image.open(self.args.iconset) as iconset:
# if iconset.height != 128 or iconset.width !=256:
# raise BadImageError("Iconset image must have height=128, width=256")
# iconset = iconset.convert("RGB")
# if iconset.getpixel((255, 127))[:3] != (95, 115, 255):
# raise BadImageError("Iconset image color is corrupt. Possibly due to some gamma issue.")
# self.external[iconset_addr : iconset_addr + iconset_size] = \
# tilemap_to_bytes(iconset, palette, bpp=4)[:iconset_size]
# Dump BALL logo
# ball_logo_addr, ball_logo_size = 0x1_13CC, 768
# palette_addr = 0xB_EC68
# palette = self.external[palette_addr : palette_addr + 320]
# ball_logo = bytes_to_tilemap(
# self.external[ball_logo_addr : ball_logo_addr + ball_logo_size],
# palette=palette,
# width=128,
# bpp=2,
# )
# ball_logo.save(build_dir / "ball_logo.png")
if self.args.smb1_graphics:
printi("Intercept prepare_clock_rom")
self.internal.bl(0x690E, "prepare_clock_rom")
self.internal.nop(0x1_0EF0, 2)
table = self.internal.address("SMB1_GRAPHIC_MODS", sub_base=True)
for file_path in self.args.smb1_graphics:
if file_path.suffix.lower() == ".nes":
rom = file_path.read_bytes()
if len(rom) == 40976:
# Remove the NES header
rom = rom[16:]
assert len(rom) == 40960
graphics = rom[0x8000:0x9EC0]
graphics_compressed = lzma_compress(graphics)
loc = self.move_to_int(
graphics_compressed, len(graphics_compressed), None
)
loc += self.internal.FLASH_BASE
elif file_path.suffix.lower() == ".ips":
patch = file_path.read_bytes()
patch = patches.ips.strip_header(patch)
loc = self.move_to_int(patch, len(patch), None)
loc += self.internal.FLASH_BASE
else:
raise ValueError(
f"Don't know how to handle extension for {file_path}."
)
# Update the SMB1_GRAPHIC_MODS table
self.internal.replace(table, loc, size=4)
table += 4
printd("Compressing and moving stuff stuff to internal firmware.")
compressed_len = self.external.compress(
0x0, 7772
) # Dst expects only 7772 bytes, not 7776
self.internal.bl(0x665C, "memcpy_inflate")
self.move_ext(0x0, compressed_len, 0x7204)
# Note: the 4 bytes between 7772 and 7776 is padding.
self.ext_offset -= 7776 - round_down_word(compressed_len)
# SMB1 ROM (plus loading custom ROM)
printd("Compressing and moving SMB1 ROM to compressed_memory.")
smb1_addr, smb1_size = 0x1E60, 40960
# Adding the header for patching convenience.
(build_dir / "smb1.nes").write_bytes(
b"NES\x1a\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ self.external[smb1_addr : smb1_addr + smb1_size]
)
smb1 = self.args.smb1.read_bytes()
if len(smb1) == 40976:
# Remove the NES header
smb1 = smb1[16:]
if len(smb1) != smb1_size:
raise ValueError(f"Unknown length {len(smb1)} of file {self.args.smb1}")
self.external[smb1_addr : smb1_addr + smb1_size] = smb1
patch_smb1_refr = self.internal.address("SMB1_ROM", sub_base=True)
self.move_to_compressed_memory(
smb1_addr, smb1_size, [0x7368, 0x10954, 0x7218, patch_smb1_refr]
)
# I think these are all scenes for the clock, but not 100% sure.
# The giant lookup table references all these
self.move_to_compressed_memory(0xBE60, 11620, None)
# Starting here are BALL references
self.move_to_compressed_memory(0xEBC4, 528, 0x4154)
self.rwdata_lookup(0xEBC4, 528)
self.move_to_compressed_memory(0xEDD4, 100, 0x4570)
references = {
0xEE38: 0x4514,
0xEE78: 0x4518,
0xEEB8: 0x4520,
0xEEF8: 0x4524,
}
for external, internal in references.items():
self.move_to_compressed_memory(external, 64, internal)
references = [
0x2AC,
0x2B0,
0x2B4,
0x2B8,
0x2BC,
0x2C0,
0x2C4,
0x2C8,
0x2CC,
0x2D0,
]
self.move_to_compressed_memory(0xEF38, 128 * 10, references)
self.move_to_compressed_memory(0xF438, 96, 0x456C)
self.move_to_compressed_memory(0xF498, 180, 0x43F8)
# This is the first thing passed into the drawing engine.
self.move_to_compressed_memory(0xF54C, 1100, 0x43FC)
self.move_to_compressed_memory(0xF998, 180, 0x4400)
self.move_to_compressed_memory(0xFA4C, 1136, 0x4404)
self.move_to_compressed_memory(0xFEBC, 864, 0x450C)
self.move_to_compressed_memory(0x1_021C, 384, 0x4510)
self.move_to_compressed_memory(0x1_039C, 384, 0x451C)
self.move_to_compressed_memory(0x1_051C, 384, 0x4410)
self.move_to_compressed_memory(0x1_069C, 384, 0x44F8)
self.move_to_compressed_memory(0x1_081C, 384, 0x4500)
self.move_to_compressed_memory(0x1_099C, 384, 0x4414)
self.move_to_compressed_memory(0x1_0B1C, 384, 0x44FC)
self.move_to_compressed_memory(0x1_0C9C, 384, 0x4504)
self.move_to_compressed_memory(0x1_0E1C, 384, 0x440C)
self.move_to_compressed_memory(0x1_0F9C, 384, 0x4408)
self.move_to_compressed_memory(0x1_111C, 192, 0x44F4)
self.move_to_compressed_memory(0x1_11DC, 192, 0x4508)
self.move_to_compressed_memory(0x1_129C, 304, 0x458C)
self.move_to_compressed_memory(
0x1_13CC, 768, 0x4584
) # BALL logo tile idx tight
self.move_to_compressed_memory(0x1_16CC, 1144, 0x4588)
self.move_to_compressed_memory(0x1_1B44, 768, 0x4534)
self.move_to_compressed_memory(0x1_1E44, 32, 0x455C)
self.move_to_compressed_memory(0x1_1E64, 32, 0x4558)
self.move_to_compressed_memory(0x1_1E84, 32, 0x4554)
self.move_to_compressed_memory(0x1_1EA4, 32, 0x4560)
self.move_to_compressed_memory(0x1_1EC4, 32, 0x4564)
self.move_to_compressed_memory(0x1_1EE4, 64, 0x453C)
self.move_to_compressed_memory(0x1_1F24, 64, 0x4530)
self.move_to_compressed_memory(0x1_1F64, 64, 0x4540)
self.move_to_compressed_memory(0x1_1FA4, 64, 0x4544)
self.move_to_compressed_memory(0x1_1FE4, 64, 0x4548)
self.move_to_compressed_memory(0x1_2024, 64, 0x454C)
self.move_to_compressed_memory(0x1_2064, 64, 0x452C)
self.move_to_compressed_memory(0x1_20A4, 64, 0x4550)
self.move_to_compressed_memory(0x1_20E4, 21 * 96, 0x4574)
self.move_to_compressed_memory(0x1_28C4, 192, 0x4578)
self.move_to_compressed_memory(0x1_2984, 640, 0x457C)
# This is a 320 byte palette used for BALL, but the last 160 bytes are empty
self.move_to_compressed_memory(0x1_2C04, 320, 0x4538)
mario_song_len = 0x85E40 # 548,416 bytes
if self.args.no_mario_song:
# This isn't really necessary, but we keep it here because its more explicit.
printe("Erasing Mario Song")
self.external.replace(0x1_2D44, b"\x00" * mario_song_len)
self.rwdata_erase(0x1_2D44, mario_song_len)
self.ext_offset -= mario_song_len
self.internal.asm(0x6FC8, "b 0x1c")
else:
references = [
# Banners
0x11A00,
0x11A00 + 4,
0x11A00 + 8,
0x11A00 + 12,
0x11A00 + 16,
0x11A00 + 20,
0x11A00 + 24,
# Audio
0x1199C,
]
self.move_ext(0x1_2D44, mario_song_len, references)
self.rwdata_lookup(0x1_2D44, mario_song_len)
# Each tile is 16x16 pixels, stored as 256 bytes in row-major form.
# These index into one of the palettes starting at 0xbec68.
printe("Compressing clock graphics")
compressed_len = self.external.compress(0x9_8B84, 0x1_0000)
self.internal.bl(0x678E, "memcpy_inflate")
printe("Moving clock graphics")
self.move_ext(0x9_8B84, compressed_len, 0x7350)
self.ext_offset -= 0x1_0000 - round_down_word(compressed_len)
# Note: the clock uses a different palette; this palette only applies
# to ingame Super Mario Bros 1 & 2
printe("Moving NES emulator palette.")
self.move_to_compressed_memory(0xA_8B84, 192, 0xB720)
# Note: UNKNOWN* represents a block of data that i haven't decoded
# yet. If you know what the block of data is, please let me know!
self.move_to_compressed_memory(0xA_8C44, 8352, 0xBC44)
printe("Moving iconset.")
# MODIFY THESE IF WE WANT CUSTOM GAME ICONS
self.move_to_compressed_memory(0xA_ACE4, 16128, [0xCEA8, 0xD2F8])
printe("Moving menu stuff (icons? meta?)")
references = [
0x0_D010,
0x0_D004,
0x0_D2D8,
0x0_D2DC,
0x0_D2F4,
0x0_D2F0,
]
self.move_to_compressed_memory(0xA_EBE4, 116, references)
# Dump a playable version of SMB2
smb2_addr, smb2_size = 0xA_EC58, 0x1_0000
smb2_end = smb2_addr + smb2_size
smb2 = self.external[smb2_addr:smb2_end].copy()
smb2 = fds_remove_crc_gaps(smb2)
(build_dir / "smb2.fds").write_bytes(smb2)
if self.args.no_smb2:
printe("Erasing SMB2 ROM")
self.external.replace(
smb2_addr,
b"\x00" * smb2_size,
)
self.ext_offset -= smb2_size
else:
printe("Compressing and moving SMB2 ROM.")
compressed_len = self.external.compress(smb2_addr, smb2_size)
self.internal.bl(0x6A12, "memcpy_inflate")
self.move_to_compressed_memory(smb2_addr, compressed_len, 0x7374)
self.ext_offset -= smb2_size - round_down_word(
compressed_len
) # Move by the space savings.
# Round to nearest page so that the length can be used as an imm
compressed_len = round_up_page(compressed_len)
# Update the length of the compressed data (doesn't matter if its too large)
self.internal.asm(0x6A0A, f"mov.w r2, #{compressed_len}")
self.internal.asm(0x6A1E, f"mov.w r3, #{compressed_len}")
# Not sure what this data is
self.move_to_compressed_memory(0xBEC58, 8 * 2, 0x10964)
printe("Moving Palettes")
# There are 80 colors, each in BGRA format, where A is always 0
# These are referenced by the scene table.
self.move_to_compressed_memory(0xBEC68, 320, None) # Day palette [0600, 1700]
self.move_to_compressed_memory(0xBEDA8, 320, None) # Night palette [1800, 0400)
self.move_to_compressed_memory(
0xBEEE8, 320, None
) # Underwater palette (between 1200 and 2400 at XX:30)
self.move_to_compressed_memory(
0xBF028, 320, None
) # Unknown palette. Maybe bowser castle? need to check...
self.move_to_compressed_memory(0xBF168, 320, None) # Dawn palette [0500, 0600)
# These are scene headers, each containing 2x uint32_t's.
# They are MOSTLY [0x36, 0xF], but there are a few like [0x30, 0xF] and [0x20, 0xF],
# Referenced by the scene table
self.move_to_compressed_memory(0xBF2A8, 45 * 8, None)
# IDK what this is.
self.move_to_compressed_memory(0xBF410, 144, 0x1658C)
# SCENE TABLE
# Goes in chunks of 20 bytes (5 addresses)
# Each scene is represented by 5 pointers:
# 1. Pointer to a 2x uint32_t header (I think it's total tile (w, h) )
# The H is always 15, which would be 240 pixels tall.
# The W is usually 54, which would be 864 pixels (probably the flag pole?)
# 2. RLE something. Usually 32 bytes.
# 3. RLE something
# 4. RLE something
# 5. Palette
#
# The RLE encoded data could be background tilemap, animation routine, etc.
lookup_table_start = 0xB_F4A0
lookup_table_end = 0xB_F838
lookup_table_len = lookup_table_end - lookup_table_start # 46 * 5 * 4 = 920
for addr in range(lookup_table_start, lookup_table_end, 4):
self.external.lookup(addr)
# Now move the table
self.move_to_compressed_memory(lookup_table_start, lookup_table_len, 0xDF88)
# Not sure what this is
references = [
0xE8F8,
0xF4EC,
0xF4F8,
0x10098,
0x105B0,
]
self.move_to_compressed_memory(0xBF838, 280, references)
self.move_to_compressed_memory(0xBF950, 180, [0xE2E4, 0xF4FC])
self.move_to_compressed_memory(0xBFA04, 8, 0x1_6590)
self.move_to_compressed_memory(0xBFA0C, 784, 0x1_0F9C)
# MOVE EXTERNAL FUNCTIONS
new_loc = self.move_ext(0xB_FD1C, 14244, None)
references = [ # internal references to external functions
0x00D330,
0x00D310,
0x00D308,
0x00D338,
0x00D348,
0x00D360,
0x00D368,
0x00D388,
0x00D358,
0x00D320,
0x00D350,
0x00D380,
0x00D378,
0x00D318,
0x00D390,
0x00D370,
0x00D340,
0x00D398,
0x00D328,
]
for reference in references:
self.internal.lookup(reference)
references = [ # external references to external functions
0xC_1174,
0xC_313C,
0xC_049C,
0xC_1178,
0xC_220C,
0xC_3490,
0xC_3498,
]
for reference in references:
reference = reference - 0xB_FD1C + new_loc
try:
self.internal.lookup(reference)
except (IndexError, KeyError):
self.external.lookup(reference)
# BALL sound samples.
self.move_to_compressed_memory(0xC34C0, 6168, 0x43EC)
self.rwdata_lookup(0xC34C0, 6168)
self.move_to_compressed_memory(0xC4CD8, 2984, 0x459C)
self.move_to_compressed_memory(0xC5880, 120, 0x4594)
total_image_length = 193_568
references = [
0x1097C,
0x1097C + 4,
0x1097C + 8,
0x1097C + 12,
0x1097C + 16,
]
for name, index in [
("mario_sleeping", 0xC_58F8),
("mario_juggling", 0xC_D858),
("bowser_sleeping", 0xD_6C78),
("pizza", 0xE_16F8),
("minions_sleeping", 0xE_C318),
]:
img, _ = decode_backdrop(self.external[index:])
img.save(build_dir / f"backdrop_{name}.png")
if self.args.no_sleep_images:
# Images Notes:
# * In-between images are just zeros.
#
# start: 0x900C_58F8 end: 0x900C_D83F mario sleeping
# start: 0x900C_D858 end: 0x900D_6C65 mario juggling
# start: 0x900D_6C78 end: 0x900E_16E2 bowser sleeping
# start: 0x900E_16F8 end: 0x900E_C301 mario and luigi eating pizza
# start: 0x900E_C318 end: 0x900F_4D04 minions sleeping
# zero_padded_end: 0x900f_4d18
# Total Image Length: 193_568 bytes
printe("Deleting sleeping images.")
self.external.replace(0xC58F8, b"\x00" * total_image_length)
for reference in references:
self.internal.replace(reference, b"\x00" * 4) # Erase image references
self.ext_offset -= total_image_length
else:
self.move_ext(0xC58F8, total_image_length, references)
# Definitely at least contains part of the TIME graphic on startup screen.
self.move_to_compressed_memory(0xF4D18, 2880, 0x10960)
# What is this data?
# The memcpy to this address is all zero, so i guess its not used?
self.external.replace(0xF5858, b"\x00" * 34728) # refence at internal 0x7210
self.ext_offset -= 34728
if self.compressed_memory_pos:
# Compress and copy over compressed_memory
self.internal.rwdata.append(
self.compressed_memory[: self.compressed_memory_pos].copy(),
self.compressed_memory.FLASH_BASE,
)
# Compress, insert, and reference the modified rwdata
self.int_pos += self.internal.rwdata.write_table_and_data(
0x17DB4, data_offset=self.int_pos
)
# Shorten the external firmware
# This rounds the negative self.ext_offset towards zero.
self.ext_offset = round_up_page(self.ext_offset)
if self.args.no_save:
# Disable nvram loading
for nop in [0x495E, 0x49A6, 0x49B2]:
self.internal.nop(nop, 2)
# self.internal.b(0x4988, 0x49be) # If you still want the first-startup "Press TIME Button" screen
self.internal.b(0x4988, 0x49C0) # Skips Press TIME Button screen
# Disable nvram saving
# This just skips the body of the nvram_write_bank function
self.internal.b(0x48BE, 0x4912)
self.ext_offset -= 8192
else:
printi("Update NVRAM read addresses")
self.internal.asm(
0x4856,
"ite ne; "
f"movne.w r4, #{hex(0xff000 + self.ext_offset)}; "
f"moveq.w r4, #{hex(0xfe000 + self.ext_offset)}",
)
printi("Update NVRAM write addresses")
self.internal.asm(
0x48C0,
"ite ne; "
f"movne.w r4, #{hex(0xff000 + self.ext_offset)}; "
f"moveq.w r4, #{hex(0xfe000 + self.ext_offset)}",
)
# Finally, shorten the firmware
printi("Updating end of OTFDEC pointer")
self.internal.add(0x1_06EC, self.ext_offset)
self.external.shorten(self.ext_offset)
internal_remaining_free = len(self.internal) - self.int_pos
compressed_memory_free = (
len(self.compressed_memory) - self.compressed_memory_pos
)
return internal_remaining_free, compressed_memory_free
| 39.242507
| 111
| 0.594119
|
794ac9f59a5ba7fdc2e4ed869476ced19ffbaf50
| 338
|
py
|
Python
|
import.py
|
muhdzakirahmat/unit6proj
|
5c1e260a2b5146f5e2a33e4e140404df74a80030
|
[
"Apache-2.0"
] | null | null | null |
import.py
|
muhdzakirahmat/unit6proj
|
5c1e260a2b5146f5e2a33e4e140404df74a80030
|
[
"Apache-2.0"
] | null | null | null |
import.py
|
muhdzakirahmat/unit6proj
|
5c1e260a2b5146f5e2a33e4e140404df74a80030
|
[
"Apache-2.0"
] | null | null | null |
"""
python import.py export.zip challenges,teams,both,metadata
"""
from Unit6 import create_app
from Unit6.utils import import_ctf
import sys
app = create_app()
with app.app_context():
if len(sys.argv) == 3:
segments = sys.argv[2].split(',')
else:
segments = None
import_ctf(sys.argv[1], segments=segments)
| 19.882353
| 58
| 0.677515
|
794aca4f26a8139ea16fbc6032d37ac6f93deced
| 9,117
|
py
|
Python
|
lib/datasets/imdb.py
|
alkymi-io/faster-rcnn.pytorch
|
2613b8d643d90ae16f9593a357ab4ba0de7f82a6
|
[
"MIT"
] | 6
|
2018-11-28T08:09:40.000Z
|
2020-12-06T10:07:29.000Z
|
lib/datasets/imdb.py
|
alkymi-io/faster-rcnn.pytorch
|
2613b8d643d90ae16f9593a357ab4ba0de7f82a6
|
[
"MIT"
] | 1
|
2020-09-24T19:21:24.000Z
|
2020-09-24T19:21:24.000Z
|
lib/datasets/imdb.py
|
alkymi-io/faster-rcnn.pytorch
|
2613b8d643d90ae16f9593a357ab4ba0de7f82a6
|
[
"MIT"
] | 1
|
2019-05-22T01:33:36.000Z
|
2019-05-22T01:33:36.000Z
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import PIL
from lib.model.utils.cython_bbox import bbox_overlaps
import numpy as np
import scipy.sparse
from lib.model.utils.config import cfg
import pdb
ROOT_DIR = osp.join(osp.dirname(__file__), '..', '..')
class imdb(object):
"""Image database."""
def __init__(self, name, classes=None):
self._name = name
self._num_classes = 0
if not classes:
self._classes = []
else:
self._classes = classes
self._image_index = []
self._obj_proposer = 'gt'
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
def set_proposal_method(self, method):
method = eval('self.' + method + '_roidb')
self.roidb_handler = method
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def image_id_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def _get_widths(self):
return [PIL.Image.open(self.image_path_at(i)).size[0]
for i in range(self.num_images)]
def append_flipped_images(self):
num_images = self.num_images
widths = self._get_widths()
for i in range(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
def evaluate_recall(self, candidate_boxes=None, thresholds=None,
area='all', limit=None):
"""Evaluate detection proposal recall metrics.
Returns:
results: dictionary of results with keys
'ar': average recall
'recalls': vector recalls at each IoU overlap threshold
'thresholds': vector of IoU overlap thresholds
'gt_overlaps': vector of all ground-truth overlaps
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3,
'96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}
area_ranges = [[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2], # 512-inf
]
assert area in areas, 'unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for i in range(self.num_images):
# Checking for max_overlaps == 1 avoids including crowd annotations
# (...pretty hacking :/)
max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)
gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &
(max_gt_overlaps == 1))[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
gt_areas = self.roidb[i]['seg_areas'][gt_inds]
valid_gt_inds = np.where((gt_areas >= area_range[0]) &
(gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
if candidate_boxes is None:
# If candidate_boxes is not supplied, the default is to use the
# non-ground-truth boxes from this roidb
non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]
boxes = self.roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(gt_boxes.shape[0]):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert (gt_ovr >= 0)
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert (_gt_overlaps[j] == gt_ovr)
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps}
def create_roidb_from_box_list(self, box_list, gt_roidb):
assert len(box_list) == self.num_images, \
'Number of boxes must match number of ground-truth images'
roidb = []
for i in range(self.num_images):
boxes = box_list[i]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:
gt_boxes = gt_roidb[i]['boxes']
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
overlaps = scipy.sparse.csr_matrix(overlaps)
roidb.append({
'boxes': boxes,
'gt_classes': np.zeros((num_boxes,), dtype=np.int32),
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': np.zeros((num_boxes,), dtype=np.float32),
})
return roidb
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in range(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],
b[i]['seg_areas']))
return a
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
| 34.274436
| 74
| 0.595591
|
794acb039946667442f68855501e53ee0b4b9987
| 46,094
|
py
|
Python
|
plenum/test/helper.py
|
cam-parra/indy-plenum
|
a891defac546488c6ec2f4a12d23894742d1427f
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/helper.py
|
cam-parra/indy-plenum
|
a891defac546488c6ec2f4a12d23894742d1427f
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/helper.py
|
cam-parra/indy-plenum
|
a891defac546488c6ec2f4a12d23894742d1427f
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import itertools
import os
import random
import string
from _signal import SIGINT
from contextlib import contextmanager
from functools import partial
from itertools import permutations, combinations
from shutil import copyfile
from sys import executable
from time import sleep
from typing import Tuple, Iterable, Dict, Optional, List, Any, Sequence, Union
import base58
import pytest
from indy.pool import set_protocol_version
from common.serializers.serialization import invalid_index_serializer
from plenum.common.signer_simple import SimpleSigner
from plenum.common.timer import QueueTimer
from plenum.config import Max3PCBatchWait
from psutil import Popen
import json
import asyncio
from indy.ledger import sign_and_submit_request, sign_request, submit_request, build_node_request, \
build_pool_config_request
from indy.error import ErrorCode, IndyError
from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_file
from plenum.common.constants import DOMAIN_LEDGER_ID, OP_FIELD_NAME, REPLY, REQNACK, REJECT, \
CURRENT_PROTOCOL_VERSION
from plenum.common.exceptions import RequestNackedException, RequestRejectedException, CommonSdkIOException, \
PoolLedgerTimeoutException
from plenum.common.messages.node_messages import Reply, PrePrepare, Prepare, Commit
from plenum.common.txn_util import get_req_id, get_from
from plenum.common.types import f, OPERATION
from plenum.common.util import getNoInstances, get_utc_epoch
from plenum.common.config_helper import PNodeConfigHelper
from plenum.common.request import Request
from plenum.server.node import Node
from plenum.server.replica import Replica
from plenum.test import waits
from plenum.test.msgs import randomMsg
from plenum.test.spy_helpers import getLastClientReqReceivedForNode, getAllArgs, getAllReturnVals, \
getAllMsgReceivedForNode
from plenum.test.test_node import TestNode, TestReplica, \
getPrimaryReplica
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventuallyAll, eventually
from stp_core.loop.looper import Looper
from stp_core.network.util import checkPortAvailable
logger = getlogger()
# noinspection PyUnresolvedReferences
def ordinal(n):
return "%d%s" % (
n, "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
def random_string(length: int) -> str:
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def send_reqs_batches_and_get_suff_replies(
looper: Looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
num_reqs: int,
num_batches=1,
**kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs)
else:
requests = []
for _ in range(num_batches - 1):
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, num_reqs // num_batches))
rem = num_reqs % num_batches
if rem == 0:
rem = num_reqs // num_batches
requests.extend(
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, rem))
return requests
# noinspection PyIncorrectDocstring
def checkResponseCorrectnessFromNodes(receivedMsgs: Iterable, reqId: int,
fValue: int) -> bool:
"""
the client must get at least :math:`f+1` responses
"""
msgs = [(msg[f.RESULT.nm][f.REQ_ID.nm], msg[f.RESULT.nm][f.IDENTIFIER.nm])
for msg in getRepliesFromClientInbox(receivedMsgs, reqId)]
groupedMsgs = {}
for tpl in msgs:
groupedMsgs[tpl] = groupedMsgs.get(tpl, 0) + 1
assert max(groupedMsgs.values()) >= fValue + 1
def getRepliesFromClientInbox(inbox, reqId) -> list:
return list({_: msg for msg, _ in inbox if
msg[OP_FIELD_NAME] == REPLY and msg[f.RESULT.nm]
[f.REQ_ID.nm] == reqId}.values())
def checkLastClientReqForNode(node: TestNode, expectedRequest: Request):
recvRequest = getLastClientReqReceivedForNode(node)
assert recvRequest
assert expectedRequest.as_dict == recvRequest.as_dict
# noinspection PyIncorrectDocstring
def assertLength(collection: Iterable[Any], expectedLength: int):
assert len(
collection) == expectedLength, "Observed length was {} but " \
"expected length was {}". \
format(len(collection), expectedLength)
def assertEquality(observed: Any, expected: Any, details=None):
assert observed == expected, "Observed value was {} but expected value " \
"was {}, details: {}".format(observed, expected, details)
def randomOperation():
return {
"type": "buy",
"amount": random.randint(10, 100000)
}
def random_requests(count):
return [randomOperation() for _ in range(count)]
def random_request_objects(count, protocol_version):
req_dicts = random_requests(count)
return [Request(operation=op, protocolVersion=protocol_version) for op in req_dicts]
def buildCompletedTxnFromReply(request, reply: Reply) -> Dict:
txn = request.operation
txn.update(reply)
return txn
async def msgAll(nodes):
# test sending messages from every node to every other node
# TODO split send and check so that the messages can be sent concurrently
for p in permutations(nodes, 2):
await sendMessageAndCheckDelivery(p[0], p[1])
def sendMessage(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None):
"""
Sends message from one node to another
:param nodes:
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:return:
"""
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
async def sendMessageAndCheckDelivery(sender: Node,
reciever: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
"""
Sends message from one node to another and checks that it was delivered
:param sender: sender
:param reciever: recepient
:param msg: optional message - by default random one generated
:param customTimeout:
:return:
"""
logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name))
msg = msg if msg else randomMsg()
rid = sender.nodestack.getRemote(reciever.name).uid
sender.nodestack.send(msg, rid)
timeout = customTimeout or waits.expectedNodeToNodeMessageDeliveryTime()
await eventually(checkMessageReceived, msg, reciever, method,
retryWait=.1,
timeout=timeout,
ratchetSteps=10)
def sendMessageToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None):
"""
Sends message from one node to all others
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:return:
"""
for node in nodes:
if node != sender:
sendMessage(sender, node, msg)
async def sendMessageAndCheckDeliveryToAll(nodes,
sender: Node,
msg: Optional[Tuple] = None,
method=None,
customTimeout=None):
"""
Sends message from one node to all other and checks that it was delivered
:param nodes:
:param sender: sender
:param msg: optional message - by default random one generated
:param customTimeout:
:return:
"""
customTimeout = customTimeout or waits.expectedNodeToAllNodesMessageDeliveryTime(
len(nodes))
for node in nodes:
if node != sender:
await sendMessageAndCheckDelivery(sender, node, msg, method, customTimeout)
break
def checkMessageReceived(msg, receiver, method: str = None):
allMsgs = getAllMsgReceivedForNode(receiver, method)
assert msg in allMsgs
def addNodeBack(node_set,
looper: Looper,
node: Node,
tconf,
tdir) -> TestNode:
config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir)
restartedNode = TestNode(node.name,
config_helper=config_helper,
config=tconf,
ha=node.nodestack.ha,
cliha=node.clientstack.ha)
for node in node_set:
if node.name != restartedNode.name:
node.nodestack.reconnectRemoteWithName(restartedNode.name)
node_set.append(restartedNode)
looper.add(restartedNode)
return restartedNode
def checkPropagateReqCountOfNode(node: TestNode, digest: str):
assert digest in node.requests
assert node.quorums.propagate.is_reached(
len(node.requests[digest].propagates))
def requestReturnedToNode(node: TestNode, key: str,
instId: int):
params = getAllArgs(node, node.processOrdered)
# Skipping the view no and time from each ordered request
recvdOrderedReqs = [
(p['ordered'].instId, p['ordered'].valid_reqIdr[0]) for p in params]
expected = (instId, key)
return expected in recvdOrderedReqs
def checkRequestReturnedToNode(node: TestNode, key: str,
instId: int):
assert requestReturnedToNode(node, key, instId)
def checkRequestNotReturnedToNode(node: TestNode, key: str,
instId: int):
assert not requestReturnedToNode(node, key, instId)
def check_request_is_not_returned_to_nodes(txnPoolNodeSet, request):
instances = range(getNoInstances(len(txnPoolNodeSet)))
for node, inst_id in itertools.product(txnPoolNodeSet, instances):
checkRequestNotReturnedToNode(node,
request.key,
inst_id)
def checkPrePrepareReqSent(replica: TestReplica, req: Request):
prePreparesSent = getAllArgs(replica, replica.sendPrePrepare)
expectedDigest = TestReplica.batchDigest([req])
assert expectedDigest in [p["ppReq"].digest for p in prePreparesSent]
assert [req.digest, ] in \
[p["ppReq"].reqIdr for p in prePreparesSent]
def checkPrePrepareReqRecvd(replicas: Iterable[TestReplica],
expectedRequest: PrePrepare):
for replica in replicas:
params = getAllArgs(replica, replica._can_process_pre_prepare)
assert expectedRequest.reqIdr in [p['pre_prepare'].reqIdr for p in params]
def checkPrepareReqSent(replica: TestReplica, key: str,
view_no: int):
paramsList = getAllArgs(replica, replica.canPrepare)
rv = getAllReturnVals(replica,
replica.canPrepare)
args = [p["ppReq"].reqIdr for p in paramsList if p["ppReq"].viewNo == view_no]
assert [key] in args
idx = args.index([key])
assert rv[idx]
def checkSufficientPrepareReqRecvd(replica: TestReplica, viewNo: int,
ppSeqNo: int):
key = (viewNo, ppSeqNo)
assert key in replica.prepares
assert len(replica.prepares[key][1]) >= replica.quorums.prepare.value
def checkSufficientCommitReqRecvd(replicas: Iterable[TestReplica], viewNo: int,
ppSeqNo: int):
for replica in replicas:
key = (viewNo, ppSeqNo)
assert key in replica.commits
received = len(replica.commits[key][1])
minimum = replica.quorums.commit.value
assert received > minimum
def checkViewNoForNodes(nodes: Iterable[TestNode], expectedViewNo: int = None):
"""
Checks if all the given nodes have the expected view no
:param nodes: The nodes to check for
:param expectedViewNo: the view no that the nodes are expected to have
:return:
"""
viewNos = set()
for node in nodes:
logger.debug("{}'s view no is {}".format(node, node.viewNo))
viewNos.add(node.viewNo)
assert len(viewNos) == 1, 'Expected 1, but got {}. ' \
'ViewNos: {}'.format(len(viewNos), [(n.name, n.viewNo) for n in nodes])
vNo, = viewNos
if expectedViewNo is not None:
assert vNo >= expectedViewNo, \
'Expected at least {}, but got {}'.format(expectedViewNo, vNo)
return vNo
def waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=None,
customTimeout=None):
"""
Waits for nodes to come to same view.
Raises exception when time is out
"""
timeout = customTimeout or waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
return looper.run(eventually(checkViewNoForNodes,
txnPoolNodeSet,
expectedViewNo,
timeout=timeout))
def getNodeSuspicions(node: TestNode, code: int = None):
params = getAllArgs(node, TestNode.reportSuspiciousNode)
if params and code is not None:
params = [param for param in params
if 'code' in param and param['code'] == code]
return params
def checkDiscardMsg(processors, discardedMsg,
reasonRegexp, *exclude):
if not exclude:
exclude = []
for p in filterNodeSet(processors, exclude):
last = p.spylog.getLastParams(p.discard, required=False)
assert last
assert last['msg'] == discardedMsg
assert reasonRegexp in last['reason']
def countDiscarded(processor, reasonPat):
c = 0
for entry in processor.spylog.getAll(processor.discard):
if 'reason' in entry.params and (
(isinstance(
entry.params['reason'],
str) and reasonPat in entry.params['reason']),
(reasonPat in str(
entry.params['reason']))):
c += 1
return c
def filterNodeSet(nodeSet, exclude: List[Union[str, Node]]):
"""
Return a set of nodes with the nodes in exclude removed.
:param nodeSet: the set of nodes
:param exclude: the list of nodes or node names to exclude
:return: the filtered nodeSet
"""
return [n for n in nodeSet
if n not in
[nodeSet[x] if isinstance(x, str) else x for x in exclude]]
def whitelistNode(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistNode(toWhitelist, *codes)
def whitelistClient(toWhitelist: str, frm: Sequence[TestNode], *codes):
for node in frm:
node.whitelistClient(toWhitelist, *codes)
def assertExp(condition):
assert condition
def assertFunc(func):
assert func()
def checkLedgerEquality(ledger1, ledger2):
assertLength(ledger1, ledger2.size)
assertEquality(ledger1.root_hash, ledger2.root_hash)
assertEquality(ledger1.uncommitted_root_hash, ledger2.uncommitted_root_hash)
def checkAllLedgersEqual(*ledgers):
for l1, l2 in combinations(ledgers, 2):
checkLedgerEquality(l1, l2)
def checkStateEquality(state1, state2):
if state1 is None:
return state2 is None
assertEquality(state1.as_dict, state2.as_dict)
assertEquality(state1.committedHeadHash, state2.committedHeadHash)
assertEquality(state1.committedHead, state2.committedHead)
def check_seqno_db_equality(db1, db2):
assert db1.size == db2.size, \
"{} != {}".format(db1.size, db2.size)
assert {bytes(k): bytes(v) for k, v in db1._keyValueStorage.iterator()} == \
{bytes(k): bytes(v) for k, v in db2._keyValueStorage.iterator()}
def check_primaries_equality(node1, node2):
assert node1.primaries == node2.primaries, \
"{} != {}".format(node1.primaries, node2.primaries)
def check_last_ordered_3pc(node1, node2):
master_replica_1 = node1.master_replica
master_replica_2 = node2.master_replica
assert master_replica_1.last_ordered_3pc == master_replica_2.last_ordered_3pc, \
"{} != {}".format(master_replica_1.last_ordered_3pc,
master_replica_2.last_ordered_3pc)
return master_replica_1.last_ordered_3pc
def check_last_ordered_3pc_backup(node1, node2):
assert len(node1.replicas) == len(node2.replicas)
for i in range(1, len(node1.replicas)):
replica1 = node1.replicas[i]
replica2 = node2.replicas[i]
assert replica1.last_ordered_3pc == replica2.last_ordered_3pc, \
"{}: {} != {}: {}".format(replica1, replica1.last_ordered_3pc,
replica2, replica2.last_ordered_3pc)
def check_view_no(node1, node2):
assert node1.viewNo == node2.viewNo, \
"{} != {}".format(node1.viewNo, node2.viewNo)
def check_last_ordered_3pc_on_all_replicas(nodes, last_ordered_3pc):
for n in nodes:
for r in n.replicas.values():
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(r.last_ordered_3pc,
last_ordered_3pc)
def check_last_ordered_3pc_on_master(nodes, last_ordered_3pc):
for n in nodes:
assert n.master_replica.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(n.master_replica.last_ordered_3pc,
last_ordered_3pc)
def check_last_ordered_3pc_on_backup(nodes, last_ordered_3pc):
for n in nodes:
for i, r in n.replicas.items():
if i != 0:
assert r.last_ordered_3pc == last_ordered_3pc, \
"{} != {}".format(r.last_ordered_3pc,
last_ordered_3pc)
def randomText(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
def mockGetInstalledDistributions(packages):
ret = []
for pkg in packages:
obj = type('', (), {})()
obj.key = pkg
ret.append(obj)
return ret
def mockImportModule(moduleName):
obj = type(moduleName, (), {})()
obj.send_message = lambda *args: None
return obj
def initDirWithGenesisTxns(
dirName,
tconf,
tdirWithPoolTxns=None,
tdirWithDomainTxns=None,
new_pool_txn_file=None,
new_domain_txn_file=None):
os.makedirs(dirName, exist_ok=True)
if tdirWithPoolTxns:
new_pool_txn_file = new_pool_txn_file or tconf.poolTransactionsFile
copyfile(
os.path.join(
tdirWithPoolTxns, genesis_txn_file(
tconf.poolTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_pool_txn_file)))
if tdirWithDomainTxns:
new_domain_txn_file = new_domain_txn_file or tconf.domainTransactionsFile
copyfile(
os.path.join(
tdirWithDomainTxns, genesis_txn_file(
tconf.domainTransactionsFile)), os.path.join(
dirName, genesis_txn_file(new_domain_txn_file)))
def stopNodes(nodes: List[TestNode], looper=None, ensurePortsFreedUp=True):
if ensurePortsFreedUp:
assert looper, 'Need a looper to make sure ports are freed up'
for node in nodes:
node.stop()
if ensurePortsFreedUp:
ports = [[n.nodestack.ha[1], n.clientstack.ha[1]] for n in nodes]
waitUntilPortIsAvailable(looper, ports)
def waitUntilPortIsAvailable(looper, ports, timeout=5):
ports = itertools.chain(*ports)
def chk():
for port in ports:
checkPortAvailable(("", port))
looper.run(eventually(chk, retryWait=.5, timeout=timeout))
def run_script(script, *args):
s = os.path.join(os.path.dirname(__file__), '../../scripts/' + script)
command = [executable, s]
command.extend(args)
with Popen([executable, s]) as p:
sleep(4)
p.send_signal(SIGINT)
p.wait(timeout=1)
assert p.poll() == 0, 'script failed'
def viewNoForNodes(nodes):
viewNos = {node.viewNo for node in nodes}
assert 1 == len(viewNos)
return next(iter(viewNos))
def primaryNodeNameForInstance(nodes, instanceId):
primaryNames = {node.replicas[instanceId].primaryName for node in nodes}
assert 1 == len(primaryNames)
primaryReplicaName = next(iter(primaryNames))
return primaryReplicaName[:-2]
def nodeByName(nodes, name):
for node in nodes:
if node.name == name:
return node
raise Exception("Node with the name '{}' has not been found.".format(name))
def send_pre_prepare(view_no, pp_seq_no, nodes,
state_root=None, txn_root=None):
pre_prepare = PrePrepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
["requests digest"],
0,
"random digest",
DOMAIN_LEDGER_ID,
state_root or '0' * 44,
txn_root or '0' * 44,
0,
True
)
primary_node = getPrimaryReplica(nodes).node
non_primary_nodes = set(nodes) - {primary_node}
sendMessageToAll(nodes, primary_node, pre_prepare)
for non_primary_node in non_primary_nodes:
sendMessageToAll(nodes, non_primary_node, pre_prepare)
def send_prepare(view_no, pp_seq_no, nodes, state_root=None, txn_root=None):
prepare = Prepare(
0,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root or '0' * 44,
txn_root or '0' * 44
)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, prepare)
def send_commit(view_no, pp_seq_no, nodes):
commit = Commit(
0,
view_no,
pp_seq_no)
primary_node = getPrimaryReplica(nodes).node
sendMessageToAll(nodes, primary_node, commit)
def get_key_from_req(req: dict):
return Request(identifier=req[f.IDENTIFIER.nm],
reqId=req[f.REQ_ID.nm],
operation=req[OPERATION],
protocolVersion=req[f.PROTOCOL_VERSION.nm],
signature=req[f.SIG.nm]
if req.__contains__(f.SIG.nm) else None,
).key
def chk_all_funcs(looper, funcs, acceptable_fails=0, retry_wait=None,
timeout=None, override_eventually_timeout=False):
# TODO: Move this logic to eventuallyAll
def chk():
fails = 0
last_ex = None
for func in funcs:
try:
func()
except Exception as ex:
fails += 1
if fails >= acceptable_fails:
logger.debug('Too many fails, the last one: {}'.format(repr(ex)))
last_ex = ex
assert fails <= acceptable_fails, '{} out of {} failed. Last exception:' \
' {}'.format(fails, len(funcs), last_ex)
kwargs = {}
if retry_wait:
kwargs['retryWait'] = retry_wait
if timeout:
kwargs['timeout'] = timeout
if override_eventually_timeout:
kwargs['override_timeout_limit'] = override_eventually_timeout
looper.run(eventually(chk, **kwargs))
def check_request_ordered(node, request: Request):
# it's ok to iterate through all txns since this is a test
for seq_no, txn in node.domainLedger.getAllTxn():
if get_req_id(txn) is None:
continue
if get_from(txn) is None:
continue
if get_req_id(txn) != request.reqId:
continue
if get_from(txn) != request.identifier:
continue
return True
raise ValueError('{} request not ordered by node {}'.format(request, node.name))
def wait_for_requests_ordered(looper, nodes, requests):
node_count = len(nodes)
timeout_per_request = waits.expectedTransactionExecutionTime(node_count)
total_timeout = (1 + len(requests) / 10) * timeout_per_request
coros = [partial(check_request_ordered,
node,
request)
for (node, request) in list(itertools.product(nodes, requests))]
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=total_timeout))
def create_new_test_node(test_node_class, node_config_helper_class, name, conf,
tdir, plugin_paths, node_ha=None, client_ha=None):
config_helper = node_config_helper_class(name, conf, chroot=tdir)
return test_node_class(name,
config_helper=config_helper,
config=conf,
pluginPaths=plugin_paths,
ha=node_ha,
cliha=client_ha)
# ####### SDK
def sdk_gen_request(operation, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=None, **kwargs):
# Question: Why this method is called sdk_gen_request? It does not use
# the indy-sdk
return Request(operation=operation, reqId=random.randint(10, 1000000000),
protocolVersion=protocol_version, identifier=identifier,
**kwargs)
def sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did):
_, new_steward_did = sdk_wallet_new_steward
node_ip = '{}.{}.{}.{}'.format(
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240),
random.randint(1, 240))
data = {
'alias': node_alias,
'client_port': 50001,
'node_port': 50002,
'node_ip': node_ip,
'client_ip': node_ip,
'services': []
}
req = looper.loop.run_until_complete(
build_node_request(new_steward_did, node_did, json.dumps(data)))
return Request(**json.loads(req))
def sdk_random_request_objects(count, protocol_version, identifier=None,
**kwargs):
ops = random_requests(count)
return [sdk_gen_request(op, protocol_version=protocol_version,
identifier=identifier, **kwargs) for op in ops]
def sdk_sign_request_objects(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req.as_dict) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_sign_request_strings(looper, sdk_wallet, reqs: Sequence):
wallet_h, did = sdk_wallet
reqs_str = [json.dumps(req) for req in reqs]
reqs = [looper.loop.run_until_complete(sign_request(wallet_h, did, req))
for req in reqs_str]
return reqs
def sdk_signed_random_requests(looper, sdk_wallet, count):
_, did = sdk_wallet
reqs_obj = sdk_random_request_objects(count, identifier=did,
protocol_version=CURRENT_PROTOCOL_VERSION)
return sdk_sign_request_objects(looper, sdk_wallet, reqs_obj)
def sdk_send_signed_requests(pool_h, signed_reqs: Sequence):
return [(json.loads(req),
asyncio.ensure_future(submit_request(pool_h, req)))
for req in signed_reqs]
def sdk_send_random_requests(looper, pool_h, sdk_wallet, count: int):
reqs = sdk_signed_random_requests(looper, sdk_wallet, count)
return sdk_send_signed_requests(pool_h, reqs)
def sdk_send_random_request(looper, pool_h, sdk_wallet):
rets = sdk_send_random_requests(looper, pool_h, sdk_wallet, 1)
return rets[0]
def sdk_send_random_pool_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
reqs = [sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did) for _ in range(count)]
return [sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req) for req in reqs]
def sdk_send_random_pool_and_domain_requests(looper, pool_h, sdk_wallet_new_steward, count: int):
node_alias = random_string(7)
node_did = SimpleSigner(seed=random_string(32).encode()).identifier
req_gens = [
lambda: sdk_gen_request(random_requests(1)[0], identifier=sdk_wallet_new_steward[1]),
lambda: sdk_gen_pool_request(looper, sdk_wallet_new_steward, node_alias, node_did),
]
res = []
for i in range(count):
req = req_gens[i % len(req_gens)]()
res.append(sdk_sign_and_submit_req_obj(looper, pool_h, sdk_wallet_new_steward, req))
looper.runFor(0.1) # Give nodes some time to start ordering, so that requests are really alternating
return res
def sdk_sign_and_submit_req(pool_handle, sdk_wallet, req):
wallet_handle, sender_did = sdk_wallet
return json.loads(req), asyncio.ensure_future(
sign_and_submit_request(pool_handle, wallet_handle, sender_did, req))
def sdk_sign_and_submit_req_obj(looper, pool_handle, sdk_wallet, req_obj):
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_sign_and_submit_op(looper, pool_handle, sdk_wallet, op):
_, did = sdk_wallet
req_obj = sdk_gen_request(op, protocol_version=CURRENT_PROTOCOL_VERSION,
identifier=did)
s_req = sdk_sign_request_objects(looper, sdk_wallet, [req_obj])[0]
return sdk_send_signed_requests(pool_handle, [s_req])[0]
def sdk_get_reply(looper, sdk_req_resp, timeout=None):
req_json, resp_task = sdk_req_resp
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
try:
resp = looper.run(asyncio.wait_for(resp_task, timeout=timeout))
resp = json.loads(resp)
except IndyError as e:
resp = e.error_code
except TimeoutError as e:
resp = ErrorCode.PoolLedgerTimeout
return req_json, resp
# TODO: Check places where sdk_get_replies used without sdk_check_reply
# We need to be sure that test behaviour don't need to check response
# validity
def sdk_get_replies(looper, sdk_req_resp: Sequence, timeout=None):
resp_tasks = [resp for _, resp in sdk_req_resp]
# TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
if timeout is None:
timeout = waits.expectedTransactionExecutionTime(7)
def get_res(task, done_list):
if task in done_list:
try:
resp = json.loads(task.result())
except IndyError as e:
resp = e.error_code
else:
resp = ErrorCode.PoolLedgerTimeout
return resp
done, pending = looper.run(asyncio.wait(resp_tasks, timeout=timeout))
if pending:
for task in pending:
task.cancel()
ret = [(req, get_res(resp, done)) for req, resp in sdk_req_resp]
return ret
def sdk_check_reply(req_res):
req, res = req_res
if isinstance(res, ErrorCode):
if res == ErrorCode.PoolLedgerTimeout:
raise PoolLedgerTimeoutException('Got PoolLedgerTimeout for request {}'
.format(req))
else:
raise CommonSdkIOException('Got an error with code {} for request {}'
.format(res, req))
if not isinstance(res, dict):
raise CommonSdkIOException("Unexpected response format {}".format(res))
def _parse_op(res_dict):
if res_dict['op'] == REQNACK:
raise RequestNackedException('ReqNack of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if res_dict['op'] == REJECT:
raise RequestRejectedException('Reject of id {}. Reason: {}'
.format(req['reqId'], res_dict['reason']))
if 'op' in res:
_parse_op(res)
else:
for resps in res.values():
if isinstance(resps, str):
_parse_op(json.loads(resps))
elif isinstance(resps, dict):
_parse_op(resps)
else:
raise CommonSdkIOException("Unexpected response format {}".format(res))
def sdk_get_and_check_replies(looper, sdk_req_resp: Sequence, timeout=None):
rets = []
for req_res in sdk_get_replies(looper, sdk_req_resp, timeout):
sdk_check_reply(req_res)
rets.append(req_res)
return rets
def sdk_eval_timeout(req_count: int, node_count: int,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0):
timeout_per_request = customTimeoutPerReq or waits.expectedTransactionExecutionTime(node_count)
timeout_per_request += add_delay_to_timeout
# here we try to take into account what timeout for execution
# N request - total_timeout should be in
# timeout_per_request < total_timeout < timeout_per_request * N
# we cannot just take (timeout_per_request * N) because it is so huge.
# (for timeout_per_request=5 and N=10, total_timeout=50sec)
# lets start with some simple formula:
return (1 + req_count / 10) * timeout_per_request
def sdk_send_and_check(signed_reqs, looper, txnPoolNodeSet, pool_h, timeout=None):
if not timeout:
timeout = sdk_eval_timeout(len(signed_reqs), len(txnPoolNodeSet))
results = sdk_send_signed_requests(pool_h, signed_reqs)
sdk_replies = sdk_get_replies(looper, results, timeout=timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, count,
customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0,
override_timeout_limit=False, total_timeout=None):
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, count)
if not total_timeout:
total_timeout = sdk_eval_timeout(len(sdk_reqs), len(txnPoolNodeSet),
customTimeoutPerReq=customTimeoutPerReq,
add_delay_to_timeout=add_delay_to_timeout)
sdk_replies = sdk_get_replies(looper, sdk_reqs, timeout=total_timeout)
for req_res in sdk_replies:
sdk_check_reply(req_res)
return sdk_replies
def sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, **kwargs):
# This method assumes that `num_reqs` <= num_batches*MaxbatchSize
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
return sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool, sdk_wallet, num_reqs, **kwargs)
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_replies = []
for _ in range(num_batches - 1):
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_batch, **kwargs))
sdk_replies.extend(sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool, sdk_wallet,
reqs_in_last_batch, **kwargs))
return sdk_replies
def sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool, sdk_wallet,
num_reqs, num_batches=1, timeout=Max3PCBatchWait):
if num_reqs < num_batches:
raise BaseException(
'sdk_send_batches_of_random_and_check method assumes that `num_reqs` <= num_batches*MaxbatchSize')
if num_batches == 1:
sdk_reqs = sdk_send_random_requests(looper, sdk_pool, sdk_wallet, num_reqs)
looper.runFor(timeout)
return sdk_reqs
reqs_in_batch = num_reqs // num_batches
reqs_in_last_batch = reqs_in_batch + num_reqs % num_batches
sdk_reqs = []
for _ in range(num_batches - 1):
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_batch))
looper.runFor(timeout)
sdk_reqs.extend(sdk_send_random_requests(looper, sdk_pool, sdk_wallet, reqs_in_last_batch))
looper.runFor(timeout)
return sdk_reqs
def sdk_sign_request_from_dict(looper, sdk_wallet, op, reqId=None):
wallet_h, did = sdk_wallet
reqId = reqId or random.randint(10, 100000)
request = Request(operation=op, reqId=reqId,
protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=did)
req_str = json.dumps(request.as_dict)
resp = looper.loop.run_until_complete(sign_request(wallet_h, did, req_str))
return json.loads(resp)
def sdk_check_request_is_not_returned_to_nodes(looper, nodeSet, request):
instances = range(getNoInstances(len(nodeSet)))
coros = []
for node, inst_id in itertools.product(nodeSet, instances):
c = partial(checkRequestNotReturnedToNode,
node=node,
identifier=request['identifier'],
reqId=request['reqId'],
instId=inst_id
)
coros.append(c)
timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=timeout))
def sdk_json_to_request_object(json_req):
return Request(identifier=json_req['identifier'],
reqId=json_req['reqId'],
operation=json_req['operation'],
signature=json_req['signature'] if 'signature' in json_req else None,
protocolVersion=json_req['protocolVersion'] if 'protocolVersion' in json_req else None)
def sdk_json_couples_to_request_list(json_couples):
req_list = []
for json_couple in json_couples:
req_list.append(sdk_json_to_request_object(json_couple[0]))
return req_list
def sdk_get_bad_response(looper, reqs, exception, message):
with pytest.raises(exception) as e:
sdk_get_and_check_replies(looper, reqs)
assert message in e._excinfo[1].args[0]
def sdk_set_protocol_version(looper, version=CURRENT_PROTOCOL_VERSION):
looper.loop.run_until_complete(set_protocol_version(version))
# Context managers to be used with tconf fixture
@contextmanager
def perf_monitor_disabled(tconf):
old_unsafe = tconf.unsafe.copy()
tconf.unsafe.add("disable_view_change")
yield tconf
tconf.unsafe = old_unsafe
@contextmanager
def view_change_timeout(tconf, vc_timeout, catchup_timeout=None, propose_timeout=None):
old_catchup_timeout = tconf.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE
old_view_change_timeout = tconf.VIEW_CHANGE_TIMEOUT
old_propose_timeout = tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT
old_propagate_request_delay = tconf.PROPAGATE_REQUEST_DELAY
tconf.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE = \
0.6 * vc_timeout if catchup_timeout is None else catchup_timeout
tconf.VIEW_CHANGE_TIMEOUT = vc_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = vc_timeout if propose_timeout is None else propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = 0
yield tconf
tconf.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE = old_catchup_timeout
tconf.VIEW_CHANGE_TIMEOUT = old_view_change_timeout
tconf.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT = old_propose_timeout
tconf.PROPAGATE_REQUEST_DELAY = old_propagate_request_delay
@contextmanager
def max_3pc_batch_limits(tconf, size, wait=10000):
old_size = tconf.Max3PCBatchSize
old_wait = tconf.Max3PCBatchWait
tconf.Max3PCBatchSize = size
tconf.Max3PCBatchWait = wait
yield tconf
tconf.Max3PCBatchSize = old_size
tconf.Max3PCBatchWait = old_wait
@contextmanager
def freshness(tconf, enabled, timeout):
old_update_state = tconf.UPDATE_STATE_FRESHNESS
old_timeout = tconf.STATE_FRESHNESS_UPDATE_INTERVAL
tconf.UPDATE_STATE_FRESHNESS = enabled
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = timeout
yield tconf
tconf.UPDATE_STATE_FRESHNESS = old_update_state
tconf.STATE_FRESHNESS_UPDATE_INTERVAL = old_timeout
@contextmanager
def primary_disconnection_time(tconf, value):
old_tolarate_disconnection = tconf.ToleratePrimaryDisconnection
tconf.ToleratePrimaryDisconnection = value
yield tconf
tconf.ToleratePrimaryDisconnection = old_tolarate_disconnection
@contextmanager
def acc_monitor(tconf, acc_monitor_enabled=True, acc_monitor_timeout=3, acc_monitor_delta=0):
old_timeout = tconf.ACC_MONITOR_TIMEOUT
old_delta = tconf.ACC_MONITOR_TXN_DELTA_K
old_acc_monitor_enabled = tconf.ACC_MONITOR_ENABLED
tconf.ACC_MONITOR_TIMEOUT = acc_monitor_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = acc_monitor_delta
tconf.ACC_MONITOR_ENABLED = acc_monitor_enabled
yield tconf
tconf.ACC_MONITOR_TIMEOUT = old_timeout
tconf.ACC_MONITOR_TXN_DELTA_K = old_delta
tconf.ACC_MONITOR_ENABLED = old_acc_monitor_enabled
def create_pre_prepare_params(state_root,
ledger_id=DOMAIN_LEDGER_ID,
txn_root=None,
timestamp=None,
bls_multi_sig=None,
view_no=0,
pool_state_root=None,
pp_seq_no=0,
inst_id=0,
audit_txn_root=None,
reqs=None):
digest = Replica.batchDigest(reqs) if reqs is not None else "random digest"
req_idrs = [req.key for req in reqs] if reqs is not None else ["random request"]
params = [inst_id,
view_no,
pp_seq_no,
timestamp or get_utc_epoch(),
req_idrs,
init_discarded(0),
digest,
ledger_id,
state_root,
txn_root or '1' * 32,
0,
True,
pool_state_root or generate_state_root(),
audit_txn_root or generate_state_root()]
if bls_multi_sig:
params.append(bls_multi_sig.as_list())
return params
def create_pre_prepare_no_bls(state_root, view_no=0, pool_state_root=None, pp_seq_no=0, inst_id=0, audit_txn_root=None):
params = create_pre_prepare_params(state_root=state_root,
view_no=view_no,
pool_state_root=pool_state_root,
pp_seq_no=pp_seq_no,
inst_id=inst_id,
audit_txn_root=audit_txn_root)
return PrePrepare(*params)
def create_commit_params(view_no, pp_seq_no, inst_id=0):
return [inst_id, view_no, pp_seq_no]
def create_commit_no_bls_sig(req_key, inst_id=0):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no, inst_id=inst_id)
return Commit(*params)
def create_commit_with_bls_sig(req_key, bls_sig):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
params.append(bls_sig)
return Commit(*params)
def create_commit_bls_sig(bls_bft, req_key, pre_prepare):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
params = bls_bft.update_commit(params, pre_prepare)
return Commit(*params)
def create_prepare_params(view_no, pp_seq_no, state_root, inst_id=0):
return [inst_id,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root,
'1' * 32]
def create_prepare_from_pre_prepare(pre_prepare):
params = [pre_prepare.instId,
pre_prepare.viewNo,
pre_prepare.ppSeqNo,
pre_prepare.ppTime,
pre_prepare.digest,
pre_prepare.stateRootHash,
pre_prepare.txnRootHash,
pre_prepare.auditTxnRootHash]
return Prepare(*params)
def create_prepare(req_key, state_root, inst_id=0):
view_no, pp_seq_no = req_key
params = create_prepare_params(view_no, pp_seq_no, state_root, inst_id=inst_id)
return Prepare(*params)
def generate_state_root():
return base58.b58encode(os.urandom(32)).decode("utf-8")
def init_discarded(value=None):
"""init discarded field with value and return message like representation"""
discarded = []
if value:
discarded.append(value)
return invalid_index_serializer.serialize(discarded, toBytes=False)
def incoming_3pc_msgs_count(nodes_count: int = 4) -> int:
pre_prepare = 1 # Message from Primary
prepares = nodes_count - 2 # Messages from all nodes exclude primary and self node
commits = nodes_count - 1 # Messages from all nodes exclude self node
# The primary node receives the same number of messages. Doesn't get pre-prepare,
# but gets one more prepare
return pre_prepare + prepares + commits
class MockTimestamp:
def __init__(self, value=datetime.utcnow()):
self.value = value
def __call__(self):
return self.value
class MockTimer(QueueTimer):
def __init__(self, get_current_time: Optional[MockTimestamp] = None):
self._ts = get_current_time if get_current_time else MockTimestamp(0)
QueueTimer.__init__(self, self._ts)
def advance(self, seconds):
self._ts.value += seconds
self.service()
def update_time(self, value):
self._ts.value = value
self.service()
| 35.484219
| 120
| 0.657374
|
794acb0d5f6018dbf7359cd8cf1afecb132a88a6
| 8,025
|
py
|
Python
|
lxmls/sequences/bak/forward_backward.py
|
SimonSuster/lxmls-toolkit
|
6a57884f8b7c98da816a60eb88593e0a1585d434
|
[
"MIT"
] | 1
|
2015-09-20T05:16:38.000Z
|
2015-09-20T05:16:38.000Z
|
lxmls/sequences/bak/forward_backward.py
|
daviddao/LxMLS-labs-solution
|
78413c1ee61752ca33988c454e3b2c27326e7063
|
[
"MIT"
] | null | null | null |
lxmls/sequences/bak/forward_backward.py
|
daviddao/LxMLS-labs-solution
|
78413c1ee61752ca33988c454e3b2c27326e7063
|
[
"MIT"
] | null | null | null |
import numpy as np
######
# Computes the forward backward trellis for a given sequence.
# N - Length of sequence
# H - Number of hidden states
# Receives:
# Node potentials (N,H) vector
# Edge potentials (N-1,H,H)
#
# Emission probabilities: (length, num_states) array
# Initial probabilities: (num_states) array
# Transition probabilities: (length, num_states+1, num_states) array
#
# OR
#
# Transition probabilities: (length-1, num_states, num_states) array
# Final probabilities: (num_states) array
######
def run_forward(initial_scores, transition_scores, final_scores, emission_scores):
length = np.size(emission_scores, 0) # Length of the sequence.
num_states = np.size(initial_scores) # Number of states.
# Forward variables.
forward = np.zeros([length, num_states])
# Initialization.
forward[0,:] = emission_scores[0,:] * initial_scores
# Forward loop.
for pos in xrange(1,length):
for current_state in xrange(num_states):
forward[pos, current_state] = \
np.sum(forward[pos-1, :] * transition_scores[pos-1, current_state, :])
forward[pos, current_state] *= emission_scores[pos, current_state]
# Termination.
likelihood = sum(forward[length-1,:] * final_scores)
# print 'Likelihood =', likelihood
return likelihood, forward
def run_backward(initial_scores, transition_scores, final_scores, emission_scores):
length = np.size(emission_scores, 0) # Length of the sequence.
num_states = np.size(initial_scores) # Number of states.
# Backward variables.
backward = np.zeros([length, num_states])
# Initialization.
backward[length-1,:] = final_scores
# Backward loop.
for pos in xrange(length-2,-1,-1):
for current_state in xrange(num_states):
backward[pos, current_state] = \
sum(backward[pos+1, :] *
transition_scores[pos, :, current_state] *
emission_scores[pos+1, :])
# prob = 0.0
# for next_state in xrange(num_states):
# back = backward[pos+1, next_state]
# trans = transition_scores[pos, next_state, current_state];
# observation = emission_scores[pos+1, next_state];
# prob += trans * observation * back;
# backward[pos, current_state] = prob
# backward[0,:] *= initial_scores
#sanity_check_forward_backward(forward,backward)
# Termination.
likelihood = sum(backward[0,:] * initial_scores * emission_scores[0,:])
# print 'Likelihood =', likelihood
return likelihood, backward
def forward_backward(initial_scores, transition_scores, final_scores, emission_scores):
likelihood, forward = run_forward(initial_scores, transition_scores, final_scores, emission_scores)
print 'Likelihood =', likelihood
likelihood, backward = run_backward(initial_scores, transition_scores, final_scores, emission_scores)
print 'Likelihood =', likelihood
# length = np.size(emission_scores, 0) # Length of the sequence.
# num_states = np.size(initial_scores) # Number of states.
#
# forward = np.zeros([length, num_states])
# backward = np.zeros([length, num_states])
#
# forward[0,:] = emission_scores[0,:] * initial_scores
# ## Forward loop.
# for pos in xrange(1,length):
# for current_state in xrange(num_states):
# for prev_state in xrange(num_states):
# forward_v = forward[pos-1, prev_state]
# trans_v = transition_scores[pos-1, current_state, prev_state]
# prob = forward_v*trans_v
# forward[pos, current_state] += prob
# forward[pos, current_state] *= emission_scores[pos, current_state]
## forward[length-1,:] *= final_scores
# print 'Likelihood =', sum(forward[length-1,:] * final_scores)
#
# ## Backward loop.
## backward[length-1,:] = final_scores
# backward[length-1,:] = final_scores #1.0
# for pos in xrange(length-2,-1,-1):
# for current_state in xrange(num_states):
# prob = 0.0
# for next_state in xrange(num_states):
# back = backward[pos+1, next_state]
# trans = transition_scores[pos, next_state, current_state];
# observation = emission_scores[pos+1, next_state];
# prob += trans * observation * back;
# backward[pos, current_state] = prob
## backward[0,:] *= initial_scores
# #sanity_check_forward_backward(forward,backward)
# print 'Likelihood =', sum(backward[0,:] * initial_scores * emission_scores[0,:])
return forward,backward
######
# Computes the forward backward trellis for a given sequence and node and edge potentials
# N - Length of sequence
# H - Number of hidden states
# Receives:
# Node potentials (N,H) vector
# Edge potentials (N-1,H,H)
######
#def forward_backward(node_potentials,edge_potentials):
# H,N = node_potentials.shape
# forward = np.zeros([H,N],dtype=float)
# backward = np.zeros([H,N],dtype=float)
# forward[:,0] = node_potentials[:,0]
# ## Forward loop
# for pos in xrange(1,N):
# for current_state in xrange(H):
# for prev_state in xrange(H):
# forward_v = forward[prev_state,pos-1]
# trans_v = edge_potentials[prev_state,current_state,pos-1]
# prob = forward_v*trans_v
# forward[current_state,pos] += prob
# forward[current_state,pos] *= node_potentials[current_state,pos]
# ## Backward loop
# backward[:,N-1] = 1
# for pos in xrange(N-2,-1,-1):
# for current_state in xrange(H):
# prob = 0
# for next_state in xrange(H):
# back = backward[next_state,pos+1]
# trans = edge_potentials[current_state,next_state,pos];
# observation = node_potentials[next_state,pos+1];
# prob += trans * observation * back;
# backward[current_state,pos] = prob
# #sanity_check_forward_backward(forward,backward)
# return forward,backward
# def forward_backward_trans_probs(node_potentials,transitions_probs):
# H,N = node_potentials.shape
# forward = np.zeros([H,N],dtype=float)
# backward = np.zeros([H,N],dtype=float)
# forward[:,0] = node_potentials[:,0]
# ## Forward loop
# for pos in xrange(1,N):
# for current_state in xrange(H):
# for prev_state in xrange(H):
# forward_v = forward[prev_state,pos-1]
# trans_v = transitions_probs[current_state,prev_state]
# prob = forward_v*trans_v
# forward[current_state,pos] += prob
# forward[current_state,pos] *= node_potentials[current_state,pos]
# ## Backward loop
# backward[:,N-1] = 1
# for pos in xrange(N-2,-1,-1):
# for current_state in xrange(H):
# prob = 0
# for next_state in xrange(H):
# back = backward[next_state,pos+1]
# trans = transition_probs[next_state,current_state];
# observation = node_potentials[next_state,pos+1];
# prob += trans * observation * back;
# backward[current_state,pos] = prob
# #sanity_check_forward_backward(forward,backward)
# return forward,backward
#########
## For every position - pos the sum_states forward(pos,state)*backward(pos,state) = Likelihood
#########
def sanity_check_forward_backward(forward,backward):
N,H = forward.shape
likelihood = np.zeros([N,1])
for pos in xrange(N):
aux = 0
for current_state in xrange(H):
aux += forward[pos,current_state]*backward[pos,current_state]
likelihood[pos] = aux
for i in xrange(pos):
if abs(aux - likelihood[i]) > 0.001:
print "Likelihood for pos %i and pos %i mismatch: %f - %f"%(i,pos,likelihood[i],aux)
return False
print likelihood
return True
| 38.214286
| 105
| 0.62866
|
794acc9e2d5260575008284823c3db2dec34914a
| 5,404
|
py
|
Python
|
tmp.py
|
pandezhao/Paperwork
|
7a8a82eb2d85949c467b15ea634806269b0f902c
|
[
"MIT"
] | null | null | null |
tmp.py
|
pandezhao/Paperwork
|
7a8a82eb2d85949c467b15ea634806269b0f902c
|
[
"MIT"
] | null | null | null |
tmp.py
|
pandezhao/Paperwork
|
7a8a82eb2d85949c467b15ea634806269b0f902c
|
[
"MIT"
] | null | null | null |
import torch
from torchvision import datasets, transforms
from torch import nn, optim
from torch.nn import init, functional
from torch.nn.utils import clip_grad_norm
from torch.autograd import Variable
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from norm import show_data
import torch.nn.functional as F
class rnn(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bias, batch_first, dropout):
super(rnn, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.LSTM = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first,dropout=dropout)
# self.linear1 = nn.Linear(in_features=hidden_size, out_features=hidden_size)
self.linear2 = nn.Linear(in_features=hidden_size, out_features=10)
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.batch_norm = nn.BatchNorm1d(hidden_size)
def forward(self, input_, hx):
hx = torch.stack(hx, 0)
hx = [hx, hx]
_, (out, _) = self.LSTM(input_, hx)
output = self.batch_norm(out[-1])
output = self.tanh(output) # activation function can not be relu, must be tanh
output = self.linear2(output)
return output
data_path = "CNN dataset"
save_dir = "CNN saved"
use_gpu = True
epochs = 2
batch_size = 32
hidden_size = 100
def transform_flatten(tensor):
return tensor.view(-1,1).contiguous()
train_set = datasets.MNIST(root=data_path, train=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
, download=True)
test_set = datasets.MNIST(root=data_path, train=False,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
, download=True)
train_loader = DataLoader(train_set, batch_size = batch_size, shuffle = False)
test_loader = DataLoader(test_set, batch_size = batch_size, shuffle = False)
model = rnn(input_size=28, hidden_size=hidden_size, num_layers=2, bias=True, batch_first=False, dropout=0.0)
Loss = nn.CrossEntropyLoss()
optimizer = optim.RMSprop(params=model.parameters(), lr=1e-3, momentum=0.9)
train_Accuracy = []
train_Loss = []
test_Accuracy = []
test_Loss = []
if use_gpu:
model.cuda()
def compute_loss(data, label):
hx = torch.Tensor(batch_size, hidden_size).normal_(0, 0.001) # 这里的1的意思是input size, 比如对于这里, 由于每次输入一个像素,所以input size = 1. 所以是1
if use_gpu:
hx = hx.cuda()
hx = (hx, hx) # 所以input size = 1. 所以是1
output = model(input_=data,hx=hx)
# output = model(x=data)
loss = Loss(output, label)
accuracy = (output.max(1)[1] == label).float().mean()
return loss, accuracy
for epoch in range(epochs):
count = 0
for data, label in train_loader:
# data = data + torch.FloatTensor(0.0001 * numpy.random.randn(data.size(0),784,1))
model.train(True)
data = data.permute(2, 0, 3, 1)
data = Variable(data.view(28, batch_size, 28))
# print(data)
# data = Variable(data.reshape(batch_size,1,28,28))
# data = Variable(data)
label = Variable(label)
if use_gpu:
data = data.cuda()
label = label.cuda()
# model.zero_grad()
optimizer.zero_grad()
# loss, accuracy = compute_loss(data=data, label=label)
Train_loss, Train_accuracy = compute_loss(data, label)
# print(output)
# output = model(x=data)
Train_loss.backward()
clip_grad_norm(parameters = model.parameters(), max_norm=1)
optimizer.step()
count += 1
if count % 20 == 0:
train_Accuracy.append(Train_accuracy)
train_Loss.append(Train_loss)
print('Epoch:{},iteration:{},train_loss:{},train_accuracy:{},'.format(epoch, count, Train_loss, Train_accuracy))
if count % 20 == 1:
with torch.no_grad():
model.train(False)
Loss_sum = []
Accuracy_sum = []
count_tmp = 0
for test_data, test_label in test_loader:
test_data = test_data.permute(2, 0, 3, 1)
test_data = Variable(test_data.view(28, batch_size, 28))
test_label = Variable(test_label)
if use_gpu:
test_data = test_data.cuda()
test_label = test_label.cuda()
Tes_Loss, Tes_Accuracy = compute_loss(test_data, test_label)
Loss_sum.append(Tes_Loss)
Accuracy_sum.append(Tes_Accuracy)
count_tmp += 1
if count_tmp == 100:
break
test_Loss.append(sum(Loss_sum)/len(Loss_sum))
test_Accuracy.append(sum(Accuracy_sum)/len(Accuracy_sum))
show_data(train_Accuracy, train_Loss, test_Loss, test_Accuracy, scatter=False)
| 37.79021
| 129
| 0.599556
|
794accbe8738a4937be52abd3771a290cff29729
| 2,165
|
py
|
Python
|
VGG-19/vgg-19/tensornet/layers/linear.py
|
zfgao66/deeplearning-mpo-tensorflow
|
c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6
|
[
"MIT"
] | 24
|
2019-04-30T14:59:43.000Z
|
2021-11-16T03:47:38.000Z
|
VGG-19/vgg-19/tensornet/layers/linear.py
|
HC1022/deeplearning-mpo
|
c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6
|
[
"MIT"
] | null | null | null |
VGG-19/vgg-19/tensornet/layers/linear.py
|
HC1022/deeplearning-mpo
|
c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6
|
[
"MIT"
] | 9
|
2019-08-14T10:50:37.000Z
|
2022-03-15T14:41:52.000Z
|
import tensorflow as tf
from .auxx import get_var_wrap
def linear(inp,
out_size,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer,
biases_regularizer=None,
trainable=True,
cpu_variables=False,
scope=None):
""" linear layer
Args:
inp: input tensor, float - [batch_size, inp_size]
out_size: layer units count, int
weights_initializer: weights init function
weights_regularizer: weights regularizer function
biases_initializer: biases init function (if None then no biases will be used)
biases_regularizer: biases regularizer function
trainable: trainable variables flag, bool
cpu_variables: cpu variables flag, bool
scope: layer variable scope name, string
Returns:
out: output tensor, float - [batch_size, out_size]
"""
with tf.variable_scope(scope):
shape = inp.get_shape().as_list()
assert len(shape) == 2, 'Not 2D input tensor'
inp_size = shape[-1]
weights = get_var_wrap('weights',
shape=[inp_size, out_size],
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
cpu_variable=cpu_variables)
if biases_initializer is not None:
biases = get_var_wrap('biases',
shape=[out_size],
initializer=biases_initializer,
regularizer=biases_regularizer,
trainable=trainable,
cpu_variable=cpu_variables)
out = tf.add(tf.matmul(inp, weights, name='matmul'), biases, name='out')
else:
out = tf.matmul(inp, weights, name='out')
return out
| 42.45098
| 94
| 0.53903
|
794acd0e6f327be540f49fbb4c25674a0a16c051
| 3,441
|
py
|
Python
|
experiments/vgg16/VGG16_qDCA.py
|
petrapoklukar/DCA
|
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
|
[
"MIT"
] | 2
|
2022-02-14T15:54:22.000Z
|
2022-02-15T18:43:36.000Z
|
experiments/vgg16/VGG16_qDCA.py
|
petrapoklukar/DCA
|
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
|
[
"MIT"
] | null | null | null |
experiments/vgg16/VGG16_qDCA.py
|
petrapoklukar/DCA
|
e5b3f3481433306a4b33e712272f8bbf5e9d05ce
|
[
"MIT"
] | null | null | null |
import os
import pickle
from dca.DCA import DCA
from dca.schemes import (
DCALoggers,
DelaunayGraphParams,
ExperimentDirs,
GeomCAParams,
HDBSCANParams,
QueryData,
REData,
)
import typer
from VGG16_utils import _analyze_query_point_assignment
app = typer.Typer()
@app.command()
def vgg16_qDCA(version_id: str, run_DCA: int = 1, run_qDCA: int = 1, cleanup: int = 1):
repr_level = "feat_lin1"
experiment_path = "output/vgg16_qDCA/"
experiment_id = version_id
# Set parameters
path_to_dataset = f"representations/vgg16/{version_id}"
path_to_Rfeatures = os.path.join(path_to_dataset, "sampled_Rfeatures.pkl")
if os.path.isfile(path_to_Rfeatures):
with open(path_to_Rfeatures, "rb") as f:
Rdata = pickle.load(f)
else:
raise ValueError(f"Input file {path_to_Rfeatures} not found.")
path_to_Efeatures = os.path.join(path_to_dataset, "sampled_Efeatures.pkl")
if os.path.isfile(path_to_Efeatures):
with open(path_to_Efeatures, "rb") as f:
Edata = pickle.load(f)
else:
raise ValueError(f"Input file {path_to_Efeatures} not found.")
R = Rdata[repr_level]
E = Edata[repr_level]
init_data_config = REData(R=R, E=E)
experiment_config = ExperimentDirs(
experiment_dir=experiment_path,
experiment_id=experiment_id,
)
graph_config = DelaunayGraphParams(
filtered_edges_dir=os.path.join(experiment_id, "logs"),
)
hdbscan_config = HDBSCANParams(
clusterer_dir=os.path.join(experiment_id, "logs"),
)
geomCA_config = GeomCAParams()
exp_loggers = DCALoggers(experiment_config.logs_dir)
output = []
if run_DCA:
dca = DCA(
experiment_config,
graph_config,
hdbscan_config,
geomCA_config,
loggers=exp_loggers,
)
dca_scores = dca.fit(
init_data_config
) # Do not call cleanup, output files are needed for qDCA
output.append(dca_scores)
if run_qDCA:
path_to_Qfeatures = os.path.join(path_to_dataset, "query_features.pkl")
if os.path.isfile(path_to_Qfeatures):
with open(path_to_Qfeatures, "rb") as f:
query_data = pickle.load(f)
else:
raise ValueError(f"Input file {path_to_Qfeatures} not found.")
Q = query_data[repr_level]
query_data_config = QueryData(
Q=Q,
query_input_array_files_dir=os.path.join(experiment_id, "logs"),
query_input_array_comp_assignment_filename=f"query_data_comp_assignment.npy",
)
dca = DCA(
experiment_config,
graph_config,
hdbscan_config,
geomCA_config,
exp_loggers,
)
query_points_to_RE_assignment = dca.process_query_points(
init_data_config, query_data_config, assign_to_RE=True
)
output.append(query_points_to_RE_assignment)
accuracy = _analyze_query_point_assignment(
query_data,
Rdata,
Edata,
init_data_config.num_R,
query_points_to_RE_assignment,
experiment_config.DCA_dir,
)
print("Accuracy: %s", accuracy)
output.append(accuracy)
if cleanup:
dca.cleanup()
return output
if __name__ == "__main__":
typer.run(vgg16_qDCA)
| 28.675
| 89
| 0.637315
|
794acd1a438c20fa8cd3bdda0d24a850066f9baf
| 206
|
py
|
Python
|
file_FindString.py
|
karus4226/ulti_code
|
55c10ca4bd4210c7d784efcb6276d288f253bb40
|
[
"MIT"
] | null | null | null |
file_FindString.py
|
karus4226/ulti_code
|
55c10ca4bd4210c7d784efcb6276d288f253bb40
|
[
"MIT"
] | null | null | null |
file_FindString.py
|
karus4226/ulti_code
|
55c10ca4bd4210c7d784efcb6276d288f253bb40
|
[
"MIT"
] | null | null | null |
import mmap
def ListFile(input_str, input_file):
with open(input_file) as f:
s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
if s.find(input_str) != -1:
print('true')
| 25.75
| 61
| 0.606796
|
794acf35b66827ee323d19ae7acdda01babc7973
| 64,941
|
py
|
Python
|
nova/tests/unit/objects/test_request_spec.py
|
nfvri/nova
|
2ce5a440c44eb512f07adacd313304e226bb56a0
|
[
"Apache-2.0"
] | 1
|
2021-12-27T00:47:30.000Z
|
2021-12-27T00:47:30.000Z
|
nova/tests/unit/objects/test_request_spec.py
|
nfvri/nova
|
2ce5a440c44eb512f07adacd313304e226bb56a0
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/objects/test_request_spec.py
|
nfvri/nova
|
2ce5a440c44eb512f07adacd313304e226bb56a0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
from oslo_versionedobjects import base as ovo_base
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.objects import base
from nova.objects import request_spec
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network_cache_model
from nova.tests.unit import fake_request_spec
from nova.tests.unit.objects import test_objects
class _TestRequestSpecObject(object):
def test_image_meta_from_image_as_object(self):
# Just isolating the test for the from_dict() method
image_meta = objects.ImageMeta(name='foo')
spec = objects.RequestSpec()
spec._image_meta_from_image(image_meta)
self.assertEqual(image_meta, spec.image)
@mock.patch.object(objects.ImageMeta, 'from_dict')
def test_image_meta_from_image_as_dict(self, from_dict):
# Just isolating the test for the from_dict() method
image_meta = objects.ImageMeta(name='foo')
from_dict.return_value = image_meta
spec = objects.RequestSpec()
spec._image_meta_from_image({'name': 'foo'})
self.assertEqual(image_meta, spec.image)
def test_image_meta_from_image_as_none(self):
# just add a dumb check to have a full coverage
spec = objects.RequestSpec()
spec._image_meta_from_image(None)
self.assertIsNone(spec.image)
@mock.patch.object(base, 'obj_to_primitive')
def test_to_legacy_image(self, obj_to_primitive):
spec = objects.RequestSpec(image=objects.ImageMeta())
fake_dict = mock.Mock()
obj_to_primitive.return_value = fake_dict
self.assertEqual(fake_dict, spec._to_legacy_image())
obj_to_primitive.assert_called_once_with(spec.image)
@mock.patch.object(base, 'obj_to_primitive')
def test_to_legacy_image_with_none(self, obj_to_primitive):
spec = objects.RequestSpec(image=None)
self.assertEqual({}, spec._to_legacy_image())
self.assertFalse(obj_to_primitive.called)
def test_from_instance_as_object(self):
instance = objects.Instance()
instance.uuid = uuidutils.generate_uuid()
instance.numa_topology = None
instance.pci_requests = None
instance.project_id = fakes.FAKE_PROJECT_ID
instance.user_id = fakes.FAKE_USER_ID
instance.availability_zone = 'nova'
spec = objects.RequestSpec()
spec._from_instance(instance)
instance_fields = ['numa_topology', 'pci_requests', 'uuid',
'project_id', 'user_id', 'availability_zone']
for field in instance_fields:
if field == 'uuid':
self.assertEqual(getattr(instance, field),
getattr(spec, 'instance_uuid'))
else:
self.assertEqual(getattr(instance, field),
getattr(spec, field))
def test_from_instance_as_dict(self):
instance = dict(uuid=uuidutils.generate_uuid(),
numa_topology=None,
pci_requests=None,
project_id=fakes.FAKE_PROJECT_ID,
user_id=fakes.FAKE_USER_ID,
availability_zone='nova')
spec = objects.RequestSpec()
spec._from_instance(instance)
instance_fields = ['numa_topology', 'pci_requests', 'uuid',
'project_id', 'user_id', 'availability_zone']
for field in instance_fields:
if field == 'uuid':
self.assertEqual(instance.get(field),
getattr(spec, 'instance_uuid'))
else:
self.assertEqual(instance.get(field), getattr(spec, field))
@mock.patch.object(objects.InstancePCIRequests,
'from_request_spec_instance_props')
def test_from_instance_with_pci_requests(self, pci_from_spec):
fake_pci_requests = objects.InstancePCIRequests()
pci_from_spec.return_value = fake_pci_requests
instance = dict(
uuid=uuidutils.generate_uuid(),
root_gb=10,
ephemeral_gb=0,
memory_mb=10,
vcpus=1,
numa_topology=None,
project_id=fakes.FAKE_PROJECT_ID,
user_id=fakes.FAKE_USER_ID,
availability_zone='nova',
pci_requests={
'instance_uuid': 'fakeid',
'requests': [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]})
spec = objects.RequestSpec()
spec._from_instance(instance)
pci_from_spec.assert_called_once_with(instance['pci_requests'])
self.assertEqual(fake_pci_requests, spec.pci_requests)
def test_from_instance_with_numa_stuff(self):
instance = dict(
uuid=uuidutils.generate_uuid(),
root_gb=10,
ephemeral_gb=0,
memory_mb=10,
vcpus=1,
project_id=fakes.FAKE_PROJECT_ID,
user_id=fakes.FAKE_USER_ID,
availability_zone='nova',
pci_requests=None,
numa_topology={'cells': [{'id': 1, 'cpuset': ['1'], 'memory': 8192,
'pagesize': None, 'cpu_topology': None,
'cpu_pinning_raw': None}]})
spec = objects.RequestSpec()
spec._from_instance(instance)
self.assertIsInstance(spec.numa_topology, objects.InstanceNUMATopology)
cells = spec.numa_topology.cells
self.assertEqual(1, len(cells))
self.assertIsInstance(cells[0], objects.InstanceNUMACell)
def test_from_flavor_as_object(self):
flavor = objects.Flavor()
spec = objects.RequestSpec()
spec._from_flavor(flavor)
self.assertEqual(flavor, spec.flavor)
def test_from_flavor_as_dict(self):
flavor_dict = dict(id=1)
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec(ctxt)
spec._from_flavor(flavor_dict)
self.assertIsInstance(spec.flavor, objects.Flavor)
self.assertEqual({'id': 1}, spec.flavor.obj_get_changes())
def test_to_legacy_instance(self):
spec = objects.RequestSpec()
spec.flavor = objects.Flavor(root_gb=10,
ephemeral_gb=0,
memory_mb=10,
vcpus=1)
spec.numa_topology = None
spec.pci_requests = None
spec.project_id = fakes.FAKE_PROJECT_ID
spec.user_id = fakes.FAKE_USER_ID
spec.availability_zone = 'nova'
instance = spec._to_legacy_instance()
self.assertEqual({'root_gb': 10,
'ephemeral_gb': 0,
'memory_mb': 10,
'vcpus': 1,
'numa_topology': None,
'pci_requests': None,
'project_id': fakes.FAKE_PROJECT_ID,
'user_id': fakes.FAKE_USER_ID,
'availability_zone': 'nova'}, instance)
def test_to_legacy_instance_with_unset_values(self):
spec = objects.RequestSpec()
self.assertEqual({}, spec._to_legacy_instance())
def test_from_retry(self):
retry_dict = {'num_attempts': 1,
'hosts': [['fake1', 'node1']]}
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec(ctxt)
spec._from_retry(retry_dict)
self.assertIsInstance(spec.retry, objects.SchedulerRetries)
self.assertEqual(1, spec.retry.num_attempts)
self.assertIsInstance(spec.retry.hosts, objects.ComputeNodeList)
self.assertEqual(1, len(spec.retry.hosts))
self.assertEqual('fake1', spec.retry.hosts[0].host)
self.assertEqual('node1', spec.retry.hosts[0].hypervisor_hostname)
def test_from_retry_missing_values(self):
retry_dict = {}
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec(ctxt)
spec._from_retry(retry_dict)
self.assertIsNone(spec.retry)
def test_populate_group_info(self):
filt_props = {}
filt_props['group_updated'] = True
filt_props['group_policies'] = set(['affinity'])
filt_props['group_hosts'] = set(['fake1'])
filt_props['group_members'] = set(['fake-instance1'])
# Make sure it can handle group uuid not being present.
for group_uuid in (None, uuids.group_uuid):
if group_uuid:
filt_props['group_uuid'] = group_uuid
spec = objects.RequestSpec()
spec._populate_group_info(filt_props)
self.assertIsInstance(spec.instance_group, objects.InstanceGroup)
self.assertEqual('affinity', spec.instance_group.policy)
self.assertEqual(['fake1'], spec.instance_group.hosts)
self.assertEqual(['fake-instance1'], spec.instance_group.members)
if group_uuid:
self.assertEqual(uuids.group_uuid, spec.instance_group.uuid)
def test_populate_group_info_missing_values(self):
filt_props = {}
spec = objects.RequestSpec()
spec._populate_group_info(filt_props)
self.assertIsNone(spec.instance_group)
def test_from_limits(self):
limits_dict = {'numa_topology': None,
'vcpu': 1.0,
'disk_gb': 1.0,
'memory_mb': 1.0}
spec = objects.RequestSpec()
spec._from_limits(limits_dict)
self.assertIsInstance(spec.limits, objects.SchedulerLimits)
self.assertIsNone(spec.limits.numa_topology)
self.assertEqual(1, spec.limits.vcpu)
self.assertEqual(1, spec.limits.disk_gb)
self.assertEqual(1, spec.limits.memory_mb)
def test_from_limits_missing_values(self):
limits_dict = {}
spec = objects.RequestSpec()
spec._from_limits(limits_dict)
self.assertIsInstance(spec.limits, objects.SchedulerLimits)
self.assertIsNone(spec.limits.numa_topology)
self.assertIsNone(spec.limits.vcpu)
self.assertIsNone(spec.limits.disk_gb)
self.assertIsNone(spec.limits.memory_mb)
def test_from_hints(self):
hints_dict = {'foo_str': '1',
'bar_list': ['2']}
spec = objects.RequestSpec()
spec._from_hints(hints_dict)
expected = {'foo_str': ['1'],
'bar_list': ['2']}
self.assertEqual(expected, spec.scheduler_hints)
def test_from_hints_with_no_hints(self):
spec = objects.RequestSpec()
spec._from_hints(None)
self.assertIsNone(spec.scheduler_hints)
@mock.patch.object(objects.SchedulerLimits, 'from_dict')
def test_from_primitives(self, mock_limits):
spec_dict = {'instance_type': objects.Flavor(),
'instance_properties': objects.Instance(
uuid=uuidutils.generate_uuid(),
numa_topology=None,
pci_requests=None,
project_id=1,
user_id=2,
availability_zone='nova')}
filt_props = {}
# We seriously don't care about the return values, we just want to make
# sure that all the fields are set
mock_limits.return_value = None
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec.from_primitives(ctxt, spec_dict, filt_props)
mock_limits.assert_called_once_with({})
# Make sure that all fields are set using that helper method
skip = ['id', 'security_groups', 'network_metadata', 'is_bfv']
for field in [f for f in spec.obj_fields if f not in skip]:
self.assertTrue(spec.obj_attr_is_set(field),
'Field: %s is not set' % field)
# just making sure that the context is set by the method
self.assertEqual(ctxt, spec._context)
def test_from_primitives_with_requested_destination(self):
destination = objects.Destination(host='foo')
spec_dict = {}
filt_props = {'requested_destination': destination}
ctxt = context.RequestContext('fake', 'fake')
spec = objects.RequestSpec.from_primitives(ctxt, spec_dict, filt_props)
self.assertEqual(destination, spec.requested_destination)
def test_from_components(self):
ctxt = context.RequestContext('fake-user', 'fake-project')
destination = objects.Destination(host='foo')
instance = fake_instance.fake_instance_obj(ctxt)
image = {'id': uuids.image_id, 'properties': {'mappings': []},
'status': 'fake-status', 'location': 'far-away'}
flavor = fake_flavor.fake_flavor_obj(ctxt)
filter_properties = {'requested_destination': destination}
instance_group = None
spec = objects.RequestSpec.from_components(ctxt, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, instance_group, instance.availability_zone,
objects.SecurityGroupList())
# Make sure that all fields are set using that helper method
skip = ['id', 'network_metadata', 'is_bfv']
for field in [f for f in spec.obj_fields if f not in skip]:
self.assertTrue(spec.obj_attr_is_set(field),
'Field: %s is not set' % field)
# just making sure that the context is set by the method
self.assertEqual(ctxt, spec._context)
self.assertEqual(destination, spec.requested_destination)
@mock.patch('nova.objects.RequestSpec._populate_group_info')
def test_from_components_with_instance_group(self, mock_pgi):
# This test makes sure that we don't overwrite instance group passed
# to from_components
ctxt = context.RequestContext('fake-user', 'fake-project')
instance = fake_instance.fake_instance_obj(ctxt)
image = {'id': uuids.image_id, 'properties': {'mappings': []},
'status': 'fake-status', 'location': 'far-away'}
flavor = fake_flavor.fake_flavor_obj(ctxt)
filter_properties = {'fake': 'property'}
instance_group = objects.InstanceGroup()
objects.RequestSpec.from_components(ctxt, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, instance_group, instance.availability_zone)
self.assertFalse(mock_pgi.called)
@mock.patch('nova.objects.RequestSpec._populate_group_info')
def test_from_components_without_instance_group(self, mock_pgi):
# This test makes sure that we populate instance group if not
# present
ctxt = context.RequestContext(fakes.FAKE_USER_ID,
fakes.FAKE_PROJECT_ID)
instance = fake_instance.fake_instance_obj(ctxt)
image = {'id': uuids.image_id, 'properties': {'mappings': []},
'status': 'fake-status', 'location': 'far-away'}
flavor = fake_flavor.fake_flavor_obj(ctxt)
filter_properties = {'fake': 'property'}
objects.RequestSpec.from_components(ctxt, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone)
mock_pgi.assert_called_once_with(filter_properties)
@mock.patch('nova.objects.RequestSpec._populate_group_info')
def test_from_components_without_security_groups(self, mock_pgi):
# This test makes sure that we populate instance group if not
# present
ctxt = context.RequestContext(fakes.FAKE_USER_ID,
fakes.FAKE_PROJECT_ID)
instance = fake_instance.fake_instance_obj(ctxt)
image = {'id': uuids.image_id, 'properties': {'mappings': []},
'status': 'fake-status', 'location': 'far-away'}
flavor = fake_flavor.fake_flavor_obj(ctxt)
filter_properties = {'fake': 'property'}
spec = objects.RequestSpec.from_components(ctxt, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone)
self.assertNotIn('security_groups', spec)
def test_from_components_with_port_resource_request(self, ):
ctxt = context.RequestContext(fakes.FAKE_USER_ID,
fakes.FAKE_PROJECT_ID)
instance = fake_instance.fake_instance_obj(ctxt)
image = {'id': uuids.image_id, 'properties': {'mappings': []},
'status': 'fake-status', 'location': 'far-away'}
flavor = fake_flavor.fake_flavor_obj(ctxt)
filter_properties = {'fake': 'property'}
rg = request_spec.RequestGroup()
spec = objects.RequestSpec.from_components(ctxt, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone,
port_resource_requests=[rg])
self.assertListEqual([rg], spec.requested_resources)
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
self.assertEqual('1', spec_obj.get_scheduler_hint('foo_single'))
self.assertEqual(['1', '2'], spec_obj.get_scheduler_hint('foo_mul'))
self.assertIsNone(spec_obj.get_scheduler_hint('oops'))
self.assertEqual('bar', spec_obj.get_scheduler_hint('oops',
default='bar'))
def test_get_scheduler_hint_with_no_hints(self):
spec_obj = objects.RequestSpec()
self.assertEqual('bar', spec_obj.get_scheduler_hint('oops',
default='bar'))
@mock.patch.object(objects.RequestSpec, '_to_legacy_instance')
@mock.patch.object(base, 'obj_to_primitive')
def test_to_legacy_request_spec_dict(self, image_to_primitive,
spec_to_legacy_instance):
fake_image_dict = mock.Mock()
image_to_primitive.return_value = fake_image_dict
fake_instance = {'root_gb': 1.0,
'ephemeral_gb': 1.0,
'memory_mb': 1.0,
'vcpus': 1,
'numa_topology': None,
'pci_requests': None,
'project_id': fakes.FAKE_PROJECT_ID,
'availability_zone': 'nova',
'uuid': '1'}
spec_to_legacy_instance.return_value = fake_instance
fake_flavor = objects.Flavor(root_gb=10,
ephemeral_gb=0,
memory_mb=512,
vcpus=1)
spec = objects.RequestSpec(num_instances=1,
image=objects.ImageMeta(),
# instance properties
numa_topology=None,
pci_requests=None,
project_id=1,
availability_zone='nova',
instance_uuid=uuids.instance,
flavor=fake_flavor)
spec_dict = spec.to_legacy_request_spec_dict()
expected = {'num_instances': 1,
'image': fake_image_dict,
'instance_properties': fake_instance,
'instance_type': fake_flavor}
self.assertEqual(expected, spec_dict)
def test_to_legacy_request_spec_dict_with_unset_values(self):
spec = objects.RequestSpec()
self.assertEqual({'num_instances': 1,
'image': {},
'instance_properties': {},
'instance_type': {}},
spec.to_legacy_request_spec_dict())
def test_to_legacy_filter_properties_dict(self):
fake_numa_limits = objects.NUMATopologyLimits()
fake_computes_obj = objects.ComputeNodeList(
objects=[objects.ComputeNode(host='fake1',
hypervisor_hostname='node1')])
fake_dest = objects.Destination(host='fakehost')
spec = objects.RequestSpec(
ignore_hosts=['ignoredhost'],
force_hosts=['fakehost'],
force_nodes=['fakenode'],
retry=objects.SchedulerRetries(num_attempts=1,
hosts=fake_computes_obj),
limits=objects.SchedulerLimits(numa_topology=fake_numa_limits,
vcpu=1.0,
disk_gb=10.0,
memory_mb=8192.0),
instance_group=objects.InstanceGroup(hosts=['fake1'],
policy='affinity',
members=['inst1', 'inst2'],
uuid=uuids.group_uuid),
scheduler_hints={'foo': ['bar']},
requested_destination=fake_dest)
expected = {'ignore_hosts': ['ignoredhost'],
'force_hosts': ['fakehost'],
'force_nodes': ['fakenode'],
'retry': {'num_attempts': 1,
'hosts': [['fake1', 'node1']]},
'limits': {'numa_topology': fake_numa_limits,
'vcpu': 1.0,
'disk_gb': 10.0,
'memory_mb': 8192.0},
'group_updated': True,
'group_hosts': set(['fake1']),
'group_policies': set(['affinity']),
'group_members': set(['inst1', 'inst2']),
'group_uuid': uuids.group_uuid,
'scheduler_hints': {'foo': 'bar'},
'requested_destination': fake_dest}
self.assertEqual(expected, spec.to_legacy_filter_properties_dict())
def test_to_legacy_filter_properties_dict_with_nullable_values(self):
spec = objects.RequestSpec(force_hosts=None,
force_nodes=None,
retry=None,
limits=None,
instance_group=None,
scheduler_hints=None)
self.assertEqual({}, spec.to_legacy_filter_properties_dict())
def test_to_legacy_filter_properties_dict_with_unset_values(self):
spec = objects.RequestSpec()
self.assertEqual({}, spec.to_legacy_filter_properties_dict())
def test_ensure_network_metadata(self):
network_a = fake_network_cache_model.new_network({
'physical_network': 'foo', 'tunneled': False})
vif_a = fake_network_cache_model.new_vif({'network': network_a})
network_b = fake_network_cache_model.new_network({
'physical_network': 'foo', 'tunneled': False})
vif_b = fake_network_cache_model.new_vif({'network': network_b})
network_c = fake_network_cache_model.new_network({
'physical_network': 'bar', 'tunneled': False})
vif_c = fake_network_cache_model.new_vif({'network': network_c})
network_d = fake_network_cache_model.new_network({
'physical_network': None, 'tunneled': True})
vif_d = fake_network_cache_model.new_vif({'network': network_d})
nw_info = network_model.NetworkInfo([vif_a, vif_b, vif_c, vif_d])
info_cache = objects.InstanceInfoCache(network_info=nw_info,
instance_uuid=uuids.instance)
instance = objects.Instance(id=3, uuid=uuids.instance,
info_cache=info_cache)
spec = objects.RequestSpec()
self.assertNotIn('network_metadata', spec)
spec.ensure_network_metadata(instance)
self.assertIn('network_metadata', spec)
self.assertIsInstance(spec.network_metadata, objects.NetworkMetadata)
self.assertEqual(spec.network_metadata.physnets, set(['foo', 'bar']))
self.assertTrue(spec.network_metadata.tunneled)
def test_ensure_network_metadata_missing(self):
nw_info = network_model.NetworkInfo([])
info_cache = objects.InstanceInfoCache(network_info=nw_info,
instance_uuid=uuids.instance)
instance = objects.Instance(id=3, uuid=uuids.instance,
info_cache=info_cache)
spec = objects.RequestSpec()
self.assertNotIn('network_metadata', spec)
spec.ensure_network_metadata(instance)
self.assertNotIn('network_metadata', spec)
@mock.patch.object(request_spec.RequestSpec,
'_get_by_instance_uuid_from_db')
@mock.patch('nova.objects.InstanceGroup.get_by_uuid')
def test_get_by_instance_uuid(self, mock_get_ig, get_by_uuid):
fake_spec = fake_request_spec.fake_db_spec()
get_by_uuid.return_value = fake_spec
mock_get_ig.return_value = objects.InstanceGroup(name='fresh')
req_obj = request_spec.RequestSpec.get_by_instance_uuid(self.context,
fake_spec['instance_uuid'])
self.assertEqual(1, req_obj.num_instances)
# ignore_hosts is not persisted
self.assertIsNone(req_obj.ignore_hosts)
self.assertEqual('fake', req_obj.project_id)
self.assertEqual({'hint': ['over-there']}, req_obj.scheduler_hints)
self.assertEqual(['host1', 'host3'], req_obj.force_hosts)
self.assertIsNone(req_obj.availability_zone)
self.assertEqual(['node1', 'node2'], req_obj.force_nodes)
self.assertIsInstance(req_obj.image, objects.ImageMeta)
self.assertIsInstance(req_obj.numa_topology,
objects.InstanceNUMATopology)
self.assertIsInstance(req_obj.pci_requests,
objects.InstancePCIRequests)
self.assertIsInstance(req_obj.flavor, objects.Flavor)
# The 'retry' field is not persistent.
self.assertIsNone(req_obj.retry)
self.assertIsInstance(req_obj.limits, objects.SchedulerLimits)
self.assertIsInstance(req_obj.instance_group, objects.InstanceGroup)
self.assertEqual('fresh', req_obj.instance_group.name)
def _check_update_primitive(self, req_obj, changes):
self.assertEqual(req_obj.instance_uuid, changes['instance_uuid'])
serialized_obj = objects.RequestSpec.obj_from_primitive(
jsonutils.loads(changes['spec']))
# primitive fields
for field in ['instance_uuid', 'num_instances',
'project_id', 'scheduler_hints', 'force_hosts',
'availability_zone', 'force_nodes']:
self.assertEqual(getattr(req_obj, field),
getattr(serialized_obj, field))
# object fields
for field in ['image', 'numa_topology', 'pci_requests', 'flavor',
'limits', 'network_metadata']:
self.assertEqual(
getattr(req_obj, field).obj_to_primitive(),
getattr(serialized_obj, field).obj_to_primitive())
self.assertIsNone(serialized_obj.instance_group.members)
self.assertIsNone(serialized_obj.instance_group.hosts)
self.assertIsNone(serialized_obj.retry)
self.assertIsNone(serialized_obj.requested_destination)
self.assertIsNone(serialized_obj.ignore_hosts)
def test_create(self):
req_obj = fake_request_spec.fake_spec_obj(remove_id=True)
def _test_create_args(self2, context, changes):
self._check_update_primitive(req_obj, changes)
# DB creation would have set an id
changes['id'] = 42
return changes
with mock.patch.object(request_spec.RequestSpec, '_create_in_db',
_test_create_args):
req_obj.create()
def test_create_id_set(self):
req_obj = request_spec.RequestSpec(self.context)
req_obj.id = 3
self.assertRaises(exception.ObjectActionError, req_obj.create)
def test_create_does_not_persist_requested_fields(self):
req_obj = fake_request_spec.fake_spec_obj(remove_id=True)
expected_network_metadata = objects.NetworkMetadata(
physnets=set(['foo', 'bar']), tunneled=True)
req_obj.network_metadata = expected_network_metadata
expected_destination = request_spec.Destination(host='sample-host')
req_obj.requested_destination = expected_destination
rg = request_spec.RequestGroup(resources={'fake-rc': 13})
req_obj.requested_resources = [rg]
expected_retry = objects.SchedulerRetries(
num_attempts=2,
hosts=objects.ComputeNodeList(objects=[
objects.ComputeNode(host='host1', hypervisor_hostname='node1'),
objects.ComputeNode(host='host2', hypervisor_hostname='node2'),
]))
req_obj.retry = expected_retry
orig_create_in_db = request_spec.RequestSpec._create_in_db
with mock.patch.object(request_spec.RequestSpec, '_create_in_db') \
as mock_create_in_db:
mock_create_in_db.side_effect = orig_create_in_db
req_obj.create()
mock_create_in_db.assert_called_once()
updates = mock_create_in_db.mock_calls[0][1][1]
# assert that the following fields are not stored in the db
# 1. network_metadata
# 2. requested_destination
# 3. requested_resources
# 4. retry
data = jsonutils.loads(updates['spec'])['nova_object.data']
self.assertNotIn('network_metadata', data)
self.assertIsNone(data['requested_destination'])
self.assertIsNone(data['requested_resources'])
self.assertIsNone(data['retry'])
self.assertIsNotNone(data['instance_uuid'])
# also we expect that the following fields are not reset after create
# 1. network_metadata
# 2. requested_destination
# 3. requested_resources
# 4. retry
self.assertIsNotNone(req_obj.network_metadata)
self.assertJsonEqual(expected_network_metadata.obj_to_primitive(),
req_obj.network_metadata.obj_to_primitive())
self.assertIsNotNone(req_obj.requested_destination)
self.assertJsonEqual(expected_destination.obj_to_primitive(),
req_obj.requested_destination.obj_to_primitive())
self.assertIsNotNone(req_obj.requested_resources)
self.assertEqual(
13, req_obj.requested_resources[0].resources['fake-rc'])
self.assertIsNotNone(req_obj.retry)
self.assertJsonEqual(expected_retry.obj_to_primitive(),
req_obj.retry.obj_to_primitive())
def test_save_does_not_persist_requested_fields(self):
req_obj = fake_request_spec.fake_spec_obj(remove_id=True)
req_obj.create()
# change something to make sure _save_in_db is called
expected_network_metadata = objects.NetworkMetadata(
physnets=set(['foo', 'bar']), tunneled=True)
req_obj.network_metadata = expected_network_metadata
expected_destination = request_spec.Destination(host='sample-host')
req_obj.requested_destination = expected_destination
rg = request_spec.RequestGroup(resources={'fake-rc': 13})
req_obj.requested_resources = [rg]
expected_retry = objects.SchedulerRetries(
num_attempts=2,
hosts=objects.ComputeNodeList(objects=[
objects.ComputeNode(host='host1', hypervisor_hostname='node1'),
objects.ComputeNode(host='host2', hypervisor_hostname='node2'),
]))
req_obj.retry = expected_retry
req_obj.num_instances = 2
req_obj.ignore_hosts = [uuids.ignored_host]
orig_save_in_db = request_spec.RequestSpec._save_in_db
with mock.patch.object(request_spec.RequestSpec, '_save_in_db') \
as mock_save_in_db:
mock_save_in_db.side_effect = orig_save_in_db
req_obj.save()
mock_save_in_db.assert_called_once()
updates = mock_save_in_db.mock_calls[0][1][2]
# assert that the following fields are not stored in the db
# 1. network_metadata
# 2. requested_destination
# 3. requested_resources
# 4. retry
# 5. ignore_hosts
data = jsonutils.loads(updates['spec'])['nova_object.data']
self.assertNotIn('network_metadata', data)
self.assertIsNone(data['requested_destination'])
self.assertIsNone(data['requested_resources'])
self.assertIsNone(data['retry'])
self.assertIsNone(data['ignore_hosts'])
self.assertIsNotNone(data['instance_uuid'])
# also we expect that the following fields are not reset after save
# 1. network_metadata
# 2. requested_destination
# 3. requested_resources
# 4. retry
# 5. ignore_hosts
self.assertIsNotNone(req_obj.network_metadata)
self.assertJsonEqual(expected_network_metadata.obj_to_primitive(),
req_obj.network_metadata.obj_to_primitive())
self.assertIsNotNone(req_obj.requested_destination)
self.assertJsonEqual(expected_destination.obj_to_primitive(),
req_obj.requested_destination.obj_to_primitive())
self.assertIsNotNone(req_obj.requested_resources)
self.assertEqual(13, req_obj.requested_resources[0].resources
['fake-rc'])
self.assertIsNotNone(req_obj.retry)
self.assertJsonEqual(expected_retry.obj_to_primitive(),
req_obj.retry.obj_to_primitive())
self.assertIsNotNone(req_obj.ignore_hosts)
self.assertEqual([uuids.ignored_host], req_obj.ignore_hosts)
def test_save(self):
req_obj = fake_request_spec.fake_spec_obj()
# Make sure the requested_destination is not persisted since it is
# only valid per request/operation.
req_obj.requested_destination = objects.Destination(host='fake')
def _test_save_args(self2, context, instance_uuid, changes):
self._check_update_primitive(req_obj, changes)
# DB creation would have set an id
changes['id'] = 42
return changes
with mock.patch.object(request_spec.RequestSpec, '_save_in_db',
_test_save_args):
req_obj.save()
@mock.patch.object(request_spec.RequestSpec, '_destroy_in_db')
def test_destroy(self, destroy_in_db):
req_obj = fake_request_spec.fake_spec_obj()
req_obj.destroy()
destroy_in_db.assert_called_once_with(req_obj._context,
req_obj.instance_uuid)
@mock.patch.object(request_spec.RequestSpec, '_destroy_bulk_in_db')
def test_destroy_bulk(self, destroy_bulk_in_db):
uuids_to_be_deleted = []
for i in range(0, 5):
uuid = uuidutils.generate_uuid()
uuids_to_be_deleted.append(uuid)
destroy_bulk_in_db.return_value = 5
result = objects.RequestSpec.destroy_bulk(self.context,
uuids_to_be_deleted)
destroy_bulk_in_db.assert_called_once_with(self.context,
uuids_to_be_deleted)
self.assertEqual(5, result)
def test_reset_forced_destinations(self):
req_obj = fake_request_spec.fake_spec_obj()
# Making sure the fake object has forced hosts and nodes
self.assertIsNotNone(req_obj.force_hosts)
self.assertIsNotNone(req_obj.force_nodes)
with mock.patch.object(req_obj, 'obj_reset_changes') as mock_reset:
req_obj.reset_forced_destinations()
self.assertIsNone(req_obj.force_hosts)
self.assertIsNone(req_obj.force_nodes)
mock_reset.assert_called_once_with(['force_hosts', 'force_nodes'])
def test_compat_requested_destination(self):
req_obj = objects.RequestSpec(
requested_destination=objects.Destination())
versions = ovo_base.obj_tree_get_versions('RequestSpec')
primitive = req_obj.obj_to_primitive(target_version='1.5',
version_manifest=versions)
self.assertNotIn(
'requested_destination', primitive['nova_object.data'])
primitive = req_obj.obj_to_primitive(target_version='1.6',
version_manifest=versions)
self.assertIn('requested_destination', primitive['nova_object.data'])
def test_compat_security_groups(self):
sgl = objects.SecurityGroupList(objects=[])
req_obj = objects.RequestSpec(security_groups=sgl)
versions = ovo_base.obj_tree_get_versions('RequestSpec')
primitive = req_obj.obj_to_primitive(target_version='1.7',
version_manifest=versions)
self.assertNotIn('security_groups', primitive['nova_object.data'])
primitive = req_obj.obj_to_primitive(target_version='1.8',
version_manifest=versions)
self.assertIn('security_groups', primitive['nova_object.data'])
def test_compat_user_id(self):
req_obj = objects.RequestSpec(project_id=fakes.FAKE_PROJECT_ID,
user_id=fakes.FAKE_USER_ID)
versions = ovo_base.obj_tree_get_versions('RequestSpec')
primitive = req_obj.obj_to_primitive(target_version='1.8',
version_manifest=versions)
primitive = primitive['nova_object.data']
self.assertNotIn('user_id', primitive)
self.assertIn('project_id', primitive)
def test_compat_network_metadata(self):
network_metadata = objects.NetworkMetadata(physnets=set(),
tunneled=False)
req_obj = objects.RequestSpec(network_metadata=network_metadata,
user_id=fakes.FAKE_USER_ID)
versions = ovo_base.obj_tree_get_versions('RequestSpec')
primitive = req_obj.obj_to_primitive(target_version='1.9',
version_manifest=versions)
primitive = primitive['nova_object.data']
self.assertNotIn('network_metadata', primitive)
self.assertIn('user_id', primitive)
def test_compat_requested_resources(self):
req_obj = objects.RequestSpec(requested_resources=[],
instance_uuid=uuids.instance)
versions = ovo_base.obj_tree_get_versions('RequestSpec')
primitive = req_obj.obj_to_primitive(target_version='1.11',
version_manifest=versions)
primitive = primitive['nova_object.data']
self.assertNotIn('requested_resources', primitive)
self.assertIn('instance_uuid', primitive)
def test_default_requested_destination(self):
req_obj = objects.RequestSpec()
self.assertIsNone(req_obj.requested_destination)
def test_security_groups_load(self):
req_obj = objects.RequestSpec()
self.assertNotIn('security_groups', req_obj)
self.assertIsInstance(req_obj.security_groups,
objects.SecurityGroupList)
self.assertIn('security_groups', req_obj)
def test_network_requests_load(self):
req_obj = objects.RequestSpec()
self.assertNotIn('network_metadata', req_obj)
self.assertIsInstance(req_obj.network_metadata,
objects.NetworkMetadata)
self.assertIn('network_metadata', req_obj)
def test_destination_aggregates_default(self):
destination = objects.Destination()
self.assertIsNone(destination.aggregates)
def test_destination_require_aggregates(self):
destination = objects.Destination()
destination.require_aggregates(['foo', 'bar'])
destination.require_aggregates(['baz'])
self.assertEqual(['foo,bar', 'baz'], destination.aggregates)
def test_destination_1dotoh(self):
destination = objects.Destination(aggregates=['foo'])
primitive = destination.obj_to_primitive(target_version='1.0')
self.assertNotIn('aggregates', primitive['nova_object.data'])
def test_create_raises_on_unchanged_object(self):
ctxt = context.RequestContext(uuids.user_id, uuids.project_id)
req_obj = request_spec.RequestSpec(context=ctxt)
self.assertRaises(exception.ObjectActionError, req_obj.create)
def test_save_can_be_called_on_unchanged_object(self):
req_obj = fake_request_spec.fake_spec_obj(remove_id=True)
req_obj.create()
req_obj.save()
class TestRequestSpecObject(test_objects._LocalTest,
_TestRequestSpecObject):
pass
class TestRemoteRequestSpecObject(test_objects._RemoteTest,
_TestRequestSpecObject):
pass
class TestRequestGroupObject(test.TestCase):
def setUp(self):
super(TestRequestGroupObject, self).setUp()
self.user_id = uuids.user_id
self.project_id = uuids.project_id
self.context = context.RequestContext(uuids.user_id, uuids.project_id)
def test_fields_defaulted_at_create(self):
rg = request_spec.RequestGroup(self.context)
self.assertTrue(rg.use_same_provider)
self.assertEqual({}, rg.resources)
self.assertEqual(set(), rg.required_traits)
self.assertEqual(set(), rg.forbidden_traits)
self.assertEqual([], rg.aggregates)
self.assertIsNone(rg.requester_id)
self.assertEqual([], rg.provider_uuids)
self.assertIsNone(rg.in_tree)
def test_from_port_request(self):
port_resource_request = {
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000},
"required": ["CUSTOM_PHYSNET_2",
"CUSTOM_VNIC_TYPE_NORMAL"]
}
rg = request_spec.RequestGroup.from_port_request(
self.context, uuids.port_id, port_resource_request)
self.assertTrue(rg.use_same_provider)
self.assertEqual(
{"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000},
rg.resources)
self.assertEqual({"CUSTOM_PHYSNET_2", "CUSTOM_VNIC_TYPE_NORMAL"},
rg.required_traits)
self.assertEqual(uuids.port_id, rg.requester_id)
# and the rest is defaulted
self.assertEqual(set(), rg.forbidden_traits)
self.assertEqual([], rg.aggregates)
self.assertEqual([], rg.provider_uuids)
def test_from_port_request_without_traits(self):
port_resource_request = {
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000}}
rg = request_spec.RequestGroup.from_port_request(
self.context, uuids.port_id, port_resource_request)
self.assertTrue(rg.use_same_provider)
self.assertEqual(
{"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000},
rg.resources)
self.assertEqual(uuids.port_id, rg.requester_id)
# and the rest is defaulted
self.assertEqual(set(), rg.required_traits)
self.assertEqual(set(), rg.forbidden_traits)
self.assertEqual([], rg.aggregates)
self.assertEqual([], rg.provider_uuids)
def test_compat_requester_and_provider(self):
req_obj = objects.RequestGroup(
requester_id=uuids.requester, provider_uuids=[uuids.rp1],
required_traits=set(['CUSTOM_PHYSNET_2']))
versions = ovo_base.obj_tree_get_versions('RequestGroup')
primitive = req_obj.obj_to_primitive(
target_version='1.2',
version_manifest=versions)['nova_object.data']
self.assertIn('in_tree', primitive)
self.assertIn('requester_id', primitive)
self.assertIn('provider_uuids', primitive)
self.assertIn('required_traits', primitive)
primitive = req_obj.obj_to_primitive(
target_version='1.1',
version_manifest=versions)['nova_object.data']
self.assertNotIn('in_tree', primitive)
self.assertIn('requester_id', primitive)
self.assertIn('provider_uuids', primitive)
self.assertIn('required_traits', primitive)
primitive = req_obj.obj_to_primitive(
target_version='1.0',
version_manifest=versions)['nova_object.data']
self.assertNotIn('in_tree', primitive)
self.assertNotIn('requester_id', primitive)
self.assertNotIn('provider_uuids', primitive)
self.assertIn('required_traits', primitive)
class TestMappingRequestGroupsToProviders(test.NoDBTestCase):
def setUp(self):
super(TestMappingRequestGroupsToProviders, self).setUp()
self.spec = request_spec.RequestSpec()
def test_no_groups(self):
allocations = None
provider_traits = {}
self.spec.map_requested_resources_to_providers(
allocations, provider_traits)
# we cannot assert much, at least we see that the above call doesn't
# blow
self.assertIsNone(self.spec.requested_resources)
def test_unnumbered_group_not_supported(self):
allocations = {}
provider_traits = {}
group1 = request_spec.RequestGroup(
use_same_provider=False)
self.spec.requested_resources = [group1]
self.assertRaises(
NotImplementedError,
self.spec.map_requested_resources_to_providers, allocations,
provider_traits)
def test_forbidden_traits_not_supported(self):
allocations = {}
provider_traits = {}
group1 = request_spec.RequestGroup(
forbidden_traits={'STORAGE_DISK_HDD'})
self.spec.requested_resources = [group1]
self.assertRaises(
NotImplementedError,
self.spec.map_requested_resources_to_providers, allocations,
provider_traits)
def test_aggregates_not_supported(self):
allocations = {}
provider_traits = {}
group1 = request_spec.RequestGroup(
aggregates=[[uuids.agg1]])
self.spec.requested_resources = [group1]
self.assertRaises(
NotImplementedError,
self.spec.map_requested_resources_to_providers, allocations,
provider_traits)
def test_one_group(self):
allocations = {
uuids.compute1_rp: {
"resources": {
'VCPU': 1
}
},
uuids.net_dev1_rp: {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_rp: [
'CUSTOM_PHYSNET_PHYSNET0',
'CUSTOM_VNIC_TYPE_NORMAL'
],
}
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1]
self.spec.map_requested_resources_to_providers(
allocations, provider_traits)
self.assertEqual([uuids.net_dev1_rp], group1.provider_uuids)
def test_one_group_no_matching_allocation(self):
# NOTE(gibi): This negative test scenario should not happen in real
# end to end test as we assume that placement only returns candidates
# that are valid. But still we want to cover the error case in our
# implementation
allocations = {
uuids.compute1_rp: {
"resources": {
'VCPU': 1
}
},
uuids.net_dev1_rp: {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
}
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
}
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1]
self.assertRaises(
ValueError, self.spec.map_requested_resources_to_providers,
allocations, provider_traits)
def test_one_group_no_matching_trait(self):
# NOTE(gibi): This negative test scenario should not happen in real
# end to end test as we assume that placement only returns candidates
# that are valid. But still we want to cover the error case in our
# implementation
allocations = {
uuids.compute1_rp: {
"resources": {
'VCPU': 1
}
},
uuids.net_dev1_rp: {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_rp: [
'CUSTOM_PHYSNET_PHYSNET1',
'CUSTOM_VNIC_TYPE_NORMAL'
],
}
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1]
self.assertRaises(
ValueError, self.spec.map_requested_resources_to_providers,
allocations, provider_traits)
def test_two_groups_same_provider(self):
allocations = {
uuids.compute1_rp: {
"resources": {
'VCPU': 1
}
},
uuids.net_dev1_rp: {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 3,
'NET_BW_EGR_KILOBIT_PER_SEC': 3,
}
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_rp: [
'CUSTOM_PHYSNET_PHYSNET0',
'CUSTOM_VNIC_TYPE_NORMAL'
],
}
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
group2 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 2,
"NET_BW_EGR_KILOBIT_PER_SEC": 2,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1, group2]
self.spec.map_requested_resources_to_providers(
allocations, provider_traits)
self.assertEqual([uuids.net_dev1_rp], group1.provider_uuids)
self.assertEqual([uuids.net_dev1_rp], group2.provider_uuids)
def test_two_groups_different_providers(self):
# NOTE(gibi): we use OrderedDict here to make the test deterministic
allocations = collections.OrderedDict()
allocations[uuids.compute1_rp] = {
"resources": {
'VCPU': 1
}
}
allocations[uuids.net_dev1_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 2,
'NET_BW_EGR_KILOBIT_PER_SEC': 2,
}
}
allocations[uuids.net_dev2_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_rp: [
'CUSTOM_PHYSNET_PHYSNET0',
'CUSTOM_VNIC_TYPE_NORMAL'
],
uuids.net_dev2_rp: [
'CUSTOM_PHYSNET_PHYSNET0',
'CUSTOM_VNIC_TYPE_NORMAL'
],
}
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
group2 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 2,
"NET_BW_EGR_KILOBIT_PER_SEC": 2,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1, group2]
self.spec.map_requested_resources_to_providers(
allocations, provider_traits)
self.assertEqual([uuids.net_dev2_rp], group1.provider_uuids)
self.assertEqual([uuids.net_dev1_rp], group2.provider_uuids)
def test_two_groups_different_providers_reverse(self):
"""Similar as test_two_groups_different_providers but reorder the
groups to exercises another code path
"""
# NOTE(gibi): we use OrderedDict here to make the test deterministic
allocations = collections.OrderedDict()
allocations[uuids.compute1_rp] = {
"resources": {
'VCPU': 1
}
}
allocations[uuids.net_dev1_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 2,
'NET_BW_EGR_KILOBIT_PER_SEC': 2,
}
}
allocations[uuids.net_dev2_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
uuids.net_dev2_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
}
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 2,
"NET_BW_EGR_KILOBIT_PER_SEC": 2,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
group2 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1, group2]
self.spec.map_requested_resources_to_providers(
allocations, provider_traits)
self.assertEqual([uuids.net_dev1_rp], group1.provider_uuids)
self.assertEqual([uuids.net_dev2_rp], group2.provider_uuids)
def test_two_groups_different_providers_different_traits(self):
allocations = collections.OrderedDict()
allocations[uuids.compute1_rp] = {
"resources": {
'VCPU': 1
}
}
allocations[uuids.net_dev1_physnet1_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
allocations[uuids.net_dev2_physnet0_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_physnet1_rp: [
'CUSTOM_PHYSNET_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL'
],
uuids.net_dev2_physnet0_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
}
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
group2 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET1",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1, group2]
self.spec.map_requested_resources_to_providers(
allocations, provider_traits)
self.assertEqual([uuids.net_dev2_physnet0_rp], group1.provider_uuids)
self.assertEqual([uuids.net_dev1_physnet1_rp], group2.provider_uuids)
def test_three_groups(self):
"""A complex example where a lot of mappings are tried before the
solution is found.
"""
allocations = collections.OrderedDict()
allocations[uuids.compute1_rp] = {
"resources": {
'VCPU': 1
}
}
allocations[uuids.net_dev1_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 3,
'NET_BW_EGR_KILOBIT_PER_SEC': 3,
}
}
allocations[uuids.net_dev2_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 2,
'NET_BW_EGR_KILOBIT_PER_SEC': 2,
}
}
allocations[uuids.net_dev3_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
'NET_BW_EGR_KILOBIT_PER_SEC': 3,
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
uuids.net_dev2_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
uuids.net_dev3_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
}
# this fits to 2 RPs
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 3,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
# this fits to 2 RPs
group2 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 2,
"NET_BW_EGR_KILOBIT_PER_SEC": 2,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
# this fits to only one RPs
group3 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 3,
"NET_BW_EGR_KILOBIT_PER_SEC": 3,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1, group2, group3]
orig_validator = self.spec._is_valid_group_rp_mapping
with mock.patch.object(
self.spec, '_is_valid_group_rp_mapping',
side_effect=orig_validator
) as mock_validator:
self.spec.map_requested_resources_to_providers(
allocations, provider_traits)
self.assertEqual([uuids.net_dev3_rp], group1.provider_uuids)
self.assertEqual([uuids.net_dev2_rp], group2.provider_uuids)
self.assertEqual([uuids.net_dev1_rp], group3.provider_uuids)
# the algorithm tried out many possible mappings before found the
# the solution
self.assertEqual(58, mock_validator.call_count)
@mock.patch.object(request_spec.LOG, 'debug')
def test_two_groups_matches_but_allocation_leftover(self, mock_debug):
# NOTE(gibi): This negative test scenario should not happen in real
# end to end test as we assume that placement only returns candidates
# that are valid and this candidate is not valid as it provides more
# resources than the ports are requesting. Still we want to cover the
# error case in our implementation
allocations = collections.OrderedDict()
allocations[uuids.compute1_rp] = {
"resources": {
'VCPU': 1
}
}
allocations[uuids.net_dev1_physnet0_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 2,
'NET_BW_EGR_KILOBIT_PER_SEC': 2,
}
}
allocations[uuids.net_dev2_physnet0_rp] = {
"resources": {
'NET_BW_IGR_KILOBIT_PER_SEC': 1,
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
provider_traits = {
uuids.compute1_rp: [],
uuids.net_dev1_physnet0_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
uuids.net_dev2_physnet0_rp: [
'CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL'
],
}
group1 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
group2 = request_spec.RequestGroup(
resources={
"NET_BW_IGR_KILOBIT_PER_SEC": 1,
"NET_BW_EGR_KILOBIT_PER_SEC": 1,
},
required_traits={
"CUSTOM_PHYSNET_PHYSNET0",
"CUSTOM_VNIC_TYPE_NORMAL",
})
self.spec.requested_resources = [group1, group2]
self.assertRaises(
ValueError, self.spec.map_requested_resources_to_providers,
allocations, provider_traits)
self.assertIn('allocations leftover', mock_debug.mock_calls[3][1][0])
| 41.870406
| 79
| 0.601962
|
794acf808a7856e2c4b9f9889650b521c10f2b89
| 28,233
|
py
|
Python
|
torch/testing/_internal/jit_utils.py
|
vulcantron/pytorch
|
bbbf00a8a83879e687fb7e96e31a619388b6b54e
|
[
"Intel"
] | null | null | null |
torch/testing/_internal/jit_utils.py
|
vulcantron/pytorch
|
bbbf00a8a83879e687fb7e96e31a619388b6b54e
|
[
"Intel"
] | null | null | null |
torch/testing/_internal/jit_utils.py
|
vulcantron/pytorch
|
bbbf00a8a83879e687fb7e96e31a619388b6b54e
|
[
"Intel"
] | null | null | null |
# Torch
from torch.autograd import Variable
from torch.autograd.function import _nested_map
from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
from torch.onnx import OperatorExportTypes
import torch
import torch.cuda
import torch.jit
import torch.jit._logging
import torch.jit.frontend
import torch.jit.quantized
import zipfile
import functools
# Testing utils
from torch.testing import FileCheck
from torch.testing._internal.common_utils import IS_WINDOWS, \
freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS
from torch.testing._internal.common_jit import JitCommonTestCase
from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
# Standard library
from contextlib import contextmanager
from functools import reduce
from io import StringIO
from collections import defaultdict
import importlib.util
import inspect
import io
import math
import os
import pickle
import sys
import tempfile
import textwrap
from importlib.abc import Loader
from typing import Any, Dict, List
RUN_CUDA = torch.cuda.is_available()
RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1
RUN_CUDA_HALF = RUN_CUDA
# HIP supports half, no version check necessary
if torch.cuda.is_available() and not torch.version.hip:
CUDA_VERSION = torch._C._cuda_getCompiledVersion()
for d in range(torch.cuda.device_count()):
major = torch.cuda.get_device_capability(d)[0]
if (major < 6):
RUN_CUDA_HALF = False
def execWrapper(code, glob, loc):
exec(code, glob, loc)
def do_input_map(fn, input):
return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input)
def clear_class_registry():
torch._C._jit_clear_class_registry()
torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
torch.jit._state._clear_class_state()
def get_execution_plan(graph_executor_state):
execution_plans = list(graph_executor_state.execution_plans.values())
num_plans = len(execution_plans)
if num_plans != 1:
raise RuntimeError('This test assumes this GraphExecutor should '
'only have one execution plan, got: {}'.format(num_plans))
return execution_plans[0]
class _AssertRaisesRegexWithHighlightContext(object):
"""
A context manager that is useful for checking that error messages highlight
the correct part of the source code.
"""
def __init__(self, test_case, exception, regex, highlight):
self.test_case = test_case
self.exception_type = exception
self.regex = regex
self.highlight = highlight
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with self.test_case.assertRaisesRegex(self.exception_type, self.regex):
if type:
raise value
if self.highlight:
FileCheck().check_source_highlighted(self.highlight).run(str(value))
return True
FUSION_GROUP = "prim::TensorExprGroup"
class JitTestCase(JitCommonTestCase):
_do_cuda_memory_leak_check = True
_restored_warnings = False
class capture_stdout(list):
"""
Replace sys.stdout with a temporary StringIO
"""
def __enter__(self):
self.sys_stdout = sys.stdout
self.stringio = StringIO()
sys.stdout = self.stringio
return self
def __exit__(self, *args):
self.append(str(self.stringio.getvalue()))
del self.stringio
sys.stdout = self.sys_stdout
class capture_stderr(list):
"""
Replace sys.stderr with a temporary StringIO
"""
def __enter__(self):
self.sys_stderr = sys.stderr
self.stringio = StringIO()
sys.stderr = self.stringio
return self
def __exit__(self, *args):
self.append(str(self.stringio.getvalue()))
del self.stringio
sys.stderr = self.sys_stderr
def setHooks(self):
torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook)
def clearHooks(self):
torch._C._jit_set_emit_hooks(None, None)
def setUp(self):
super().setUp()
# unittest overrides all warning filters and forces all of them to show up
# after we install our own to silence those coming from inside PyTorch.
# This will ensure that our filter still takes precedence.
if not JitTestCase._restored_warnings:
torch.jit.TracerWarning.ignore_lib_warnings()
JitTestCase._restored_warnings = True
self.setHooks()
def tearDown(self):
super().tearDown()
# needs to be cleared because python might be unloaded before
# the callback gets destucted
self.clearHooks()
clear_class_registry()
def assertAllFused(self, graph, except_for=()):
# note this helper collects nodes on 'fast path' only
# i.e. the true blocks of specialized checks
def get_nodes_and_parents_recursively(block, kind, acc):
for node in block.nodes():
if node.kind() == kind:
acc[block].append(node)
elif node.kind() == 'prim::DifferentiableGraph':
get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc)
elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or
node.inputs().__next__().node().kind() == 'prim::TypeCheck' or
node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'):
get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc)
else:
for inner_block in node.blocks():
get_nodes_and_parents_recursively(inner_block, kind, acc)
allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate',
'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for)
fusion_groups : Dict[torch._C.Block, List[torch._C.Node]] = defaultdict(list)
get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups)
self.assertTrue(len(fusion_groups) == 1, 'got {}'.format(graph))
(graph, fusion_nodes) = list(fusion_groups.items())[0]
# the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes`
self.assertTrue(len(fusion_nodes) == 1, 'got {}'.format(graph))
self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
'got {}'.format(graph))
def _isHookExceptionOk(self, e):
se = str(e)
allowed = ("Could not export Python function",
"closures are not exportable")
for a in allowed:
if a in se:
return True
return False
def _compared_saved_loaded(self, m):
def extract_files(buffer):
# crack open the zip format to get at the main module code
archive = zipfile.ZipFile(buffer)
# check that we have no duplicate names
self.assertEqual(len(set(archive.namelist())), len(archive.namelist()))
files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist()))
# unwrap all the code files into strings
code_files_str = filter(lambda x: x.endswith('.py'), files)
code_files_stream = (archive.open(f) for f in code_files_str)
code_files = ("".join([line.decode() for line in file]) for file in code_files_stream)
# unpickled all the debug files
debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files)
debug_files_stream = (archive.open(f) for f in debug_files_str)
debug_files = (pickle.load(f) for f in debug_files_stream)
return code_files, debug_files
# disable the hook while we parse code, otherwise we will re-enter the hook
with torch._jit_internal._disable_emit_hooks():
try:
# short-circuit if this is an empty function or module
if len(m.code) == 0:
return
if isinstance(m, torch._C.ScriptModule):
if len(m._method_names()) == 0:
return
# save the module to a buffer
buffer = io.BytesIO()
torch.jit.save(m, buffer)
# copy the data in the buffer so we can restore it later. This
# is because py2 and py3 have different semantics with zipfile
# and it's easier to just work with a fresh copy each time.
buffer_copy = buffer.getvalue()
code_files, debug_files = extract_files(buffer)
except RuntimeError as e:
if not self._isHookExceptionOk(e):
raise
else:
return
# import the model again (from a the copy we made of the original)
buffer2 = io.BytesIO(buffer_copy)
imported = torch.jit.load(buffer2)
# save it again
saved_module_buffer_2 = io.BytesIO()
torch.jit.save(imported, saved_module_buffer_2)
saved_module_buffer_2.seek(0)
code_files_2, debug_files_2 = extract_files(saved_module_buffer_2)
for a, b in zip(code_files, code_files_2):
self.assertMultiLineEqual(a, b)
if isinstance(m, torch._C.ScriptModule):
self.assertTrue(torch._C._ivalue_tags_match(m, imported._c))
def emitFunctionHook(self, func):
# func has invalid names for export, skip the jitter check
if func.name == "<lambda>" or "aten::" in func.name:
return
self._compared_saved_loaded(func)
def emitModuleHook(self, module):
self._compared_saved_loaded(module)
def assertGraphContains(self, graph, kind):
self.assertTrue(any(n.kind() == kind for n in graph.nodes()))
def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False):
def perform_assert(graph, kind, actual, expected, consider_subgraphs):
if actual == expected:
return
subgraph = 'including' if consider_subgraphs else 'excluding'
raise AssertionError(
'{}\nError: graph contains {} {} nodes ({} subgraphs) but expected {}'.format(
graph, actual, kind, subgraph, expected))
if consider_subgraphs:
strgraph = str(graph)
count = strgraph.count(kind) - strgraph.count('with {}'.format(kind))
perform_assert(graph, kind, count, num_kind_nodes,
consider_subgraphs)
return
def nodes(block):
out = []
for node in block.nodes():
if node.kind() == kind:
out.append(node)
for block in node.blocks():
out += nodes(block)
return out
out_nodes = nodes(graph)
perform_assert(graph, kind, len(out_nodes), num_kind_nodes,
consider_subgraphs)
def assertExpectedONNXGraph(self, g, *args, **kwargs):
g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX)
self.assertExpectedGraph(g, *args, **kwargs)
def assertExpectedGraph(self, trace, *args, **kwargs):
if isinstance(trace, torch._C.Graph):
graph = trace
else:
graph = trace.graph()
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
self.assertExpected(str(graph), *args, **kwargs)
def run_pass(self, name, trace):
if isinstance(trace, torch._C.Graph):
graph = trace
set_graph = False
else:
set_graph = True
graph = trace.graph()
torch._C._jit_pass_lint(graph)
result = getattr(torch._C, '_jit_pass_' + name)(graph)
if result is not None and not isinstance(result, bool):
graph = result
torch._C._jit_pass_lint(graph)
if set_graph:
trace.set_graph(graph)
return graph
def get_frame_vars(self, frames_up):
frame = inspect.currentframe()
if not frame:
raise RuntimeError("failed to inspect frame")
i = 0
while i < frames_up + 1:
frame = frame.f_back
if not frame:
raise RuntimeError("failed to get frame")
i += 1
defined_vars: Dict[str, Any] = {}
defined_vars.update(frame.f_locals)
defined_vars.update(frame.f_globals)
return defined_vars
def assertRaisesRegexWithHighlight(self, exception, regex, highlight):
return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight)
def checkScriptRaisesRegex(self, script, inputs, exception, regex,
name=None, outputs=None, capture_output=False,
frames_up=1, profiling=ProfilingMode.PROFILING):
"""
Checks that a given function will throw the correct exception,
when executed with normal python, the string frontend, and the
AST frontend. Logic taken from `checkScript` (see comments there
for details)
"""
with enable_profiling_mode_for_profiling_tests():
# Normal Python
with self.assertRaisesRegex(exception, regex):
if isinstance(script, str):
frame = self.get_frame_vars(frames_up)
the_locals: Dict[str, Any] = {}
execWrapper(script, glob=frame, loc=the_locals)
frame.update(the_locals)
python_fn = frame[name]
else:
python_fn = script
python_fn(*inputs)
# String frontend
with self.assertRaisesRegex(exception, regex):
if isinstance(script, str):
cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
string_frontend = getattr(cu, name)
else:
source = textwrap.dedent(inspect.getsource(script))
cu = torch.jit.CompilationUnit(source, _frames_up=frames_up)
string_frontend = getattr(cu, script.__name__)
with self.assertRaisesRegex(exception, regex):
string_frontend(*inputs)
# optimized run
string_frontend(*inputs)
# Python AST frontend
if not isinstance(script, str):
with self.assertRaisesRegex(exception, regex):
ge = torch.jit.script(python_fn)
# profiling run
with self.assertRaisesRegex(exception, regex):
ge(*inputs)
# optimized run
ge(*inputs)
def checkBailouts(self, model, inputs, expected):
state = model.get_debug_state()
plan = get_execution_plan(state)
num_bailouts = plan.code.num_bailouts()
for i in range(0, num_bailouts):
plan.code.request_bailout(i)
bailout_outputs = model(*inputs)
self.assertEqual(bailout_outputs, expected)
def checkScript(self,
script,
inputs,
name='func',
optimize=True,
inputs_requires_grad=False,
capture_output=False,
frames_up=1,
profiling=ProfilingMode.PROFILING,
atol=None,
rtol=None):
"""
Checks that a given script generates the same output as the Python
version using the given inputs.
"""
with torch.jit.optimized_execution(optimize):
with enable_profiling_mode_for_profiling_tests():
extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs)
if isinstance(script, str):
# Compile the string to a Script function
# with enable_profiling_mode():
cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
# Execute the Python function so we can run it later and get its
# outputs
frame = self.get_frame_vars(frames_up)
the_locals: Dict[str, Any] = {}
execWrapper(script, glob=frame, loc=the_locals)
frame.update(the_locals)
python_fn = frame[name]
scripted_fn = getattr(cu, name)
else:
# Check the string frontend first
source = textwrap.dedent(inspect.getsource(script))
self.checkScript(
source,
inputs,
script.__name__,
optimize=optimize,
inputs_requires_grad=inputs_requires_grad,
capture_output=capture_output,
profiling=profiling,
frames_up=2)
# Continue checking the Python frontend
scripted_fn = torch.jit.script(script, _frames_up=1)
python_fn = script
if inputs_requires_grad:
recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs)
else:
recording_inputs = inputs
if capture_output:
with self.capture_stdout() as script_stdout:
script_outputs = scripted_fn(*recording_inputs)
with self.capture_stdout() as opt_script_stdout:
opt_script_outputs = scripted_fn(*recording_inputs)
with self.capture_stdout() as _python_stdout:
python_outputs = python_fn(*inputs)
if not IS_WINDOWS:
self.assertExpected(script_stdout[0], subname='stdout')
self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol)
else:
# profiling run
script_outputs = scripted_fn(*recording_inputs)
if inputs_requires_grad or extra_profile_runs:
opt_script_outputs = scripted_fn(*recording_inputs)
# optimized run
opt_script_outputs = scripted_fn(*recording_inputs)
if TEST_BAILOUTS:
self.checkBailouts(scripted_fn, inputs, opt_script_outputs)
python_outputs = python_fn(*inputs)
self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol)
self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol)
return scripted_fn
def checkTrace(self, func, reference_tensors, input_tensors=None,
drop=None, allow_unused=False, verbose=False,
inputs_require_grads=True, check_tolerance=1e-5, export_import=True,
_force_outplace=False):
# TODO: check gradients for parameters, not just inputs
def allSum(vs):
# drop allows us to remove some values from ever being used
# to test unused outputs
if drop is not None:
vs = vs[:-drop]
# we don't want all the grad for all the outputs to be the same
# so we multiply each by a constant
return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None)
if input_tensors is None:
input_tensors = reference_tensors
def flatten_inputs(inputs):
def input_reduce(input, fn, acc):
if isinstance(input, torch.Tensor):
fn(input, acc)
elif isinstance(input, dict):
reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc)
else:
reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc)
return acc
return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), []))
nograd_inputs = reference_tensors
if inputs_require_grads:
recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors)
flattened_recording_inputs = flatten_inputs(recording_inputs)
else:
recording_inputs = reference_tensors
# `check_trace` is set to False because check_trace is run with @no_grad
# Also, `checkTrace` already does all the checks
# against python function
ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance,
_force_outplace=_force_outplace, check_trace=False)
if export_import:
ge = self.getExportImportCopy(ge)
if verbose:
print(ge.graph)
# test no gradients case
outputs = func(*nograd_inputs)
outputs_ge = ge(*nograd_inputs)
self.assertEqual(outputs, outputs_ge)
# test gradients case
outputs = func(*recording_inputs)
if inputs_require_grads:
grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs,
allow_unused=allow_unused)
outputs_ge = ge(*recording_inputs)
if inputs_require_grads:
grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs,
allow_unused=allow_unused)
self.assertEqual(outputs, outputs_ge)
if inputs_require_grads:
self.assertEqual(grads, grads_ge)
self.assertEqual(outputs, outputs_ge)
if inputs_require_grads:
self.assertEqual(grads, grads_ge)
# test the grad grad case
outputs = func(*recording_inputs)
l1 = allSum(outputs)
if inputs_require_grads:
grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True,
allow_unused=allow_unused)
if inputs_require_grads:
l2 = (allSum(grads) * l1)
grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused)
if inputs_require_grads:
recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors)
flattened_recording_inputs = flatten_inputs(recording_inputs)
outputs_ge = ge(*recording_inputs)
l1_ge = allSum(outputs_ge)
if inputs_require_grads:
grads_ge = torch.autograd.grad(
l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused)
if inputs_require_grads:
l2_ge = (allSum(grads_ge) * l1_ge)
grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused)
self.assertEqual(outputs, outputs_ge)
if inputs_require_grads:
self.assertEqual(grads, grads_ge)
for g2, g2_ge in zip(grads2, grads2_ge):
if g2 is None and g2_ge is None:
continue
self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4)
return ge
def checkModule(self, nn_module, args):
"""
Check that a nn.Module's results in Script mode match eager and that it
can be exported
"""
sm = torch.jit.script(nn_module)
with freeze_rng_state():
eager_out = nn_module(*args)
with freeze_rng_state():
script_out = sm(*args)
self.assertEqual(eager_out, script_out)
self.assertExportImportModule(sm, args)
return sm
@contextmanager
def inline_everything_mode(should_inline):
old = torch._C._jit_get_inline_everything_mode()
torch._C._jit_set_inline_everything_mode(should_inline)
try:
yield
finally:
torch._C._jit_set_inline_everything_mode(old)
@contextmanager
def set_fusion_group_inlining(inlining):
old = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(inlining)
try:
yield
finally:
torch._C._debug_set_fusion_group_inlining(old)
# note: not re-entrant, use unnested only
@contextmanager
def disable_autodiff_subgraph_inlining(enabled=True):
torch._C._debug_set_autodiff_subgraph_inlining(not enabled)
try:
yield
finally:
torch._C._debug_set_autodiff_subgraph_inlining(True)
def _inline_everything(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with inline_everything_mode(True):
fn(*args, **kwargs)
return wrapper
# this exists for forward compatibility reasons temporarily.
# TODO(suo) remove
def _tmp_donotuse_dont_inline_everything(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with inline_everything_mode(False):
fn(*args, **kwargs)
return wrapper
# make it easy to quicky define/trace a function for these tests
def _trace(*args, **kwargs):
def wrapper(func):
return torch.jit.trace(func, args, **kwargs)
return wrapper
def enable_cpu_fuser(fn):
def wrapper(*args, **kwargs):
torch._C._jit_override_can_fuse_on_cpu_legacy(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_set_te_must_use_llvm_cpu(False)
try:
fn(*args, **kwargs)
finally:
torch._C._jit_override_can_fuse_on_cpu_legacy(False)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_set_te_must_use_llvm_cpu(True)
return wrapper
def enable_cpu_fuser_if(cond):
if cond:
return enable_cpu_fuser
else:
def noop_fuser(fn):
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
return noop_fuser
def get_forward(c):
return c._get_method('forward')
def get_forward_graph(c):
return c._get_method('forward').graph
def get_module_method(m, module, method):
return m._c.getattr(module)._get_method(method)
def attrs_with_prefix(module, prefix):
return [x for x, _ in module._modules._c.items()
if x.startswith(prefix)]
def warmup_backward(f, *args):
profiling_count = 3
results = []
for i in range(profiling_count):
if len(args) > 0:
r = torch.autograd.grad(f, *args)
results.append(r)
else:
f.backward(retain_graph=True)
return results
# TODO: Remove me once https://bugs.python.org/issue42666 is resolved
def make_global(*args):
for arg in args:
setattr(sys.modules[arg.__module__], arg.__name__, arg)
# Helper function to eval Python3 code without causing a syntax error for
# this file under py2
def _get_py3_code(code, fn_name):
with tempfile.TemporaryDirectory() as tmp_dir:
script_path = os.path.join(tmp_dir, 'script.py')
with open(script_path, 'w') as f:
f.write(code)
spec = importlib.util.spec_from_file_location(fn_name, script_path)
module = importlib.util.module_from_spec(spec)
loader = spec.loader
assert isinstance(loader, Loader) # Assert type to meet MyPy requriement
loader.exec_module(module)
fn = getattr(module, fn_name)
return fn
| 38.675342
| 124
| 0.609145
|
794acfb4facf81e0d36a23bcb12e938b97d763c8
| 3,285
|
py
|
Python
|
nodemcu/usocketio/client.py
|
adrianalin/bme680_nodemcu_socketio
|
1d2264b593bf70c5248c3db8fbbaa34d8f15f50e
|
[
"MIT"
] | 1
|
2021-06-16T21:38:13.000Z
|
2021-06-16T21:38:13.000Z
|
nodemcu/usocketio/client.py
|
juergs/bme680_nodemcu_socketio
|
1d2264b593bf70c5248c3db8fbbaa34d8f15f50e
|
[
"MIT"
] | null | null | null |
nodemcu/usocketio/client.py
|
juergs/bme680_nodemcu_socketio
|
1d2264b593bf70c5248c3db8fbbaa34d8f15f50e
|
[
"MIT"
] | 2
|
2019-09-08T08:35:58.000Z
|
2022-03-29T07:22:06.000Z
|
"""
Micropython Socket.IO client.
"""
import ure as re
import ujson as json
import usocket as socket
from ucollections import namedtuple
from .protocol import *
from .transport import SocketIO
URL_RE = re.compile(r'http://([A-Za-z0-9\-\.]+)(?:\:([0-9]+))?(/.+)?')
URI = namedtuple('URI', ('hostname', 'port', 'path'))
def urlparse(uri):
"""Parse http:// URLs"""
match = URL_RE.match(uri)
if match:
return URI(match.group(1), int(match.group(2)), match.group(3))
def _connect_http(hostname, port, path):
"""Stage 1 do the HTTP connection to get our SID"""
try:
sock = socket.socket()
addr = socket.getaddrinfo(hostname, port)
sock.connect(addr[0][4])
def send_header(header, *args):
if __debug__:
print(str(header), *args)
sock.write(header % args + '\r\n')
send_header(b'GET %s HTTP/1.1', path)
send_header(b'Host: %s:%s', hostname, port)
send_header(b'')
header = sock.readline()[:-2]
assert header == b'HTTP/1.1 200 OK', header
length = None
while header:
header = sock.readline()[:-2]
if not header:
break
header, value = header.split(b': ')
header = header.lower()
if header == b'content-type':
assert value == b'application/octet-stream'
elif header == b'content-length':
length = int(value)
assert length
data = sock.read(length)
return decode_payload(data)
finally:
sock.close()
def connect(uri):
"""Connect to a socket IO server."""
uri = urlparse(uri)
assert uri
path = uri.path or '/' + 'socket.io/?EIO=3'
# Start a HTTP connection, which will give us an SID to use to upgrade
# the websockets connection
packets = _connect_http(uri.hostname, uri.port, path)
# The first packet should open the connection,
# following packets might be initialisation messages for us
packet_type, params = next(packets)
assert packet_type == PACKET_OPEN
params = json.loads(params)
print("Websocket parameters = {}".format(params))
assert 'websocket' in params['upgrades']
sid = params['sid']
path += '&sid={}'.format(sid)
if __debug__:
print("Connecting to websocket SID {}".format(sid))
# Start a websocket and send a probe on it
ws_uri = 'ws://{hostname}:{port}{path}&transport=websocket'.format(
hostname=uri.hostname,
port=uri.port,
path=path)
socketio = SocketIO(ws_uri, **params)
# handle rest of the packets once we're in the main loop
@socketio.on('connection')
def on_connect(data):
for packet_type, data in packets:
socketio._handle_packet(packet_type, data)
socketio._send_packet(PACKET_PING, 'probe')
# Send a follow-up poll
# _connect_http(uri.hostname, uri.port, path + '&transport=polling')
# We should receive an answer to our probe
packet = socketio._recv()
assert packet == (PACKET_PONG, 'probe')
# Upgrade the connection
socketio._send_packet(PACKET_UPGRADE)
packet = socketio._recv()
assert packet == (PACKET_NOOP, '')
return socketio
| 26.071429
| 74
| 0.606697
|
794acfbb01d70027e24fcc220ee04f9d32d4c84b
| 1,638
|
py
|
Python
|
symphony/cli/pyinventory/graphql/users_query.py
|
marosmars/magma
|
51177a6ad7e66216184693a7b3d1dc58f901cd0e
|
[
"BSD-3-Clause"
] | 2
|
2020-11-05T18:58:26.000Z
|
2021-02-09T06:42:49.000Z
|
symphony/cli/pyinventory/graphql/users_query.py
|
marosmars/magma
|
51177a6ad7e66216184693a7b3d1dc58f901cd0e
|
[
"BSD-3-Clause"
] | 2
|
2021-03-31T19:41:55.000Z
|
2021-12-13T20:39:15.000Z
|
symphony/cli/pyinventory/graphql/users_query.py
|
marosmars/magma
|
51177a6ad7e66216184693a7b3d1dc58f901cd0e
|
[
"BSD-3-Clause"
] | 1
|
2021-04-16T02:19:25.000Z
|
2021-04-16T02:19:25.000Z
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import DataClassJsonMixin
from gql.gql.enum_utils import enum_field
from .user_role_enum import UserRole
from .user_status_enum import UserStatus
QUERY: List[str] = ["""
query UsersQuery {
users {
edges {
node {
id
authID
email
status
role
}
}
}
}
"""]
@dataclass
class UsersQuery(DataClassJsonMixin):
@dataclass
class UsersQueryData(DataClassJsonMixin):
@dataclass
class UserConnection(DataClassJsonMixin):
@dataclass
class UserEdge(DataClassJsonMixin):
@dataclass
class User(DataClassJsonMixin):
id: str
authID: str
email: str
status: UserStatus = enum_field(UserStatus)
role: UserRole = enum_field(UserRole)
node: Optional[User]
edges: List[UserEdge]
users: Optional[UserConnection]
data: UsersQueryData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient) -> UsersQueryData:
# fmt: off
variables = {}
response_text = client.call(''.join(set(QUERY)), variables=variables)
return cls.from_json(response_text).data
| 24.447761
| 77
| 0.637973
|
794ad26db5e0be67bb9b35936c8558a8df02c219
| 4,752
|
py
|
Python
|
sandbox/optimal_args_hashbits.py
|
wltrimbl/khmer
|
ff95776eabee96420f1ae43d0eff562682cbb17b
|
[
"CNRI-Python"
] | null | null | null |
sandbox/optimal_args_hashbits.py
|
wltrimbl/khmer
|
ff95776eabee96420f1ae43d0eff562682cbb17b
|
[
"CNRI-Python"
] | null | null | null |
sandbox/optimal_args_hashbits.py
|
wltrimbl/khmer
|
ff95776eabee96420f1ae43d0eff562682cbb17b
|
[
"CNRI-Python"
] | null | null | null |
#! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=invalid-name,missing-docstring
"""
Estimate optimal arguments using nodegraph counting.
% python sandbox/optimal_args_nodegraph.py <data1> [ <data2> <...> ]
Use '-h' for parameter help.
"""
from __future__ import print_function
import sys
import math
import threading
import khmer
from khmer.khmer_args import (report_on_config, info, add_threading_args,
build_nodegraph_args)
from khmer.kfile import check_input_files, check_space
from khmer.kfile import check_space
from khmer.khmer_args import graphsize_args_report
def get_parser():
parser = build_nodegraph_args(descr="Load sequences into the compressible "
"graph format plus optional tagset.")
add_threading_args(parser)
parser.add_argument('input_filenames', metavar='input_sequence_filename',
nargs='+', help='input FAST[AQ] sequence filename')
return parser
def main():
info('optimal_args_nodegraph.py', ['graph', 'SeqAn'])
args = get_parser().parse_args()
report_on_config(args, graphtype='nodegraph')
filenames = args.input_filenames
base = filenames[0]
for _ in args.input_filenames:
check_input_files(_, False)
check_space(args.input_filenames, False)
print('Counting kmers from sequences in %s' % repr(filenames),
file=sys.stderr)
htable = khmer.new_nodegraph(args.ksize, args.max_tablesize, args.n_tables)
target_method = htable.consume_seqfile_with_reads_parser
for _, filename in enumerate(filenames):
rparser = khmer.ReadParser(filename)
threads = []
print('consuming input', filename, file=sys.stderr)
for num in xrange(args.threads):
cur_thread = threading.Thread(
target=target_method, args=(rparser,))
threads.append(cur_thread)
cur_thread.start()
for thread in threads:
thread.join()
unique_kmers = htable.n_unique_kmers()
print('Total number of unique k-mers: {0}'.format(unique_kmers),
file=sys.stderr)
info_optimal = open(base + '.optimal_args', 'w')
fp_rate = khmer.calc_expected_collisions(htable)
print('fp rate estimated to be %1.3f' % fp_rate, file=sys.stderr)
if fp_rate > 0.15: # 0.18 is ACTUAL MAX. Do not change.
print("**", file=sys.stderr)
print("** ERROR: the graph structure is too small for this data set."
"Increase table size/# tables.", file=sys.stderr)
print("**", file=sys.stderr)
if not False:
sys.exit(1)
to_print = graphsize_args_report(unique_kmers, fp_rate)
print(to_print, file=info_optimal)
print('optimal arguments were written to', base + '.optimal_args',
file=sys.stderr)
if __name__ == '__main__':
main()
# vim: set filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
# vim: set textwidth=79:
| 37.417323
| 79
| 0.700547
|
794ad46945130273d4a03ed2144fbd3d9d8d6cac
| 355
|
py
|
Python
|
app/src/resources/remove_files.py
|
gerardovitale/covid-project
|
b4e28e8ee095070f2a2433f61725fd8c0374365e
|
[
"MIT"
] | null | null | null |
app/src/resources/remove_files.py
|
gerardovitale/covid-project
|
b4e28e8ee095070f2a2433f61725fd8c0374365e
|
[
"MIT"
] | null | null | null |
app/src/resources/remove_files.py
|
gerardovitale/covid-project
|
b4e28e8ee095070f2a2433f61725fd8c0374365e
|
[
"MIT"
] | null | null | null |
import os
from shutil import rmtree
from typing import List
def remove_files(files: List[str]) -> None:
for file in files:
if os.path.isfile(file):
os.remove(file)
print(f'[INFO] {file} has been removed')
elif os.path.isdir(file):
rmtree(file)
print(f'[INFO] {file} has been removed')
| 25.357143
| 52
| 0.591549
|
794ad46e1921906f5f20516f312868538270e0f5
| 2,525
|
py
|
Python
|
heppy/Response.py
|
bladeroot/heppy
|
b597916ff80890ca057b17cdd156e90bbbd9a87a
|
[
"BSD-3-Clause"
] | null | null | null |
heppy/Response.py
|
bladeroot/heppy
|
b597916ff80890ca057b17cdd156e90bbbd9a87a
|
[
"BSD-3-Clause"
] | null | null | null |
heppy/Response.py
|
bladeroot/heppy
|
b597916ff80890ca057b17cdd156e90bbbd9a87a
|
[
"BSD-3-Clause"
] | null | null | null |
import xml.etree.ElementTree as ET
from Doc import Doc
class Response(Doc):
def __init__(self, root):
self.data = {}
self.root = root
self.parse(self.root[0])
def find(self, tag, name):
return tag.find(name, namespaces=self.nsmap)
def findall(self, tag, name):
return tag.findall(name, self.nsmap)
def find_text(self, parent, name):
tag = self.find(parent, name)
if tag is not None:
return tag.text.strip()
def _put_attr(self, data, tag, attr):
attr_value = tag.attrib.get(attr)
if attr_value:
data[attr] = attr_value
def put_tag_data(self, dest, root, tag_name, attrs=[]):
if '@' in tag_name:
tag_name, key = tag_name.split('@')
elif ':' in tag_name:
key = tag_name.split(':')[1]
else:
key = tag_name
tag = self.find(root, tag_name)
if tag is None:
return
dest[key] = tag.text.strip()
for attr in attrs:
self._put_attr(dest, tag, attr)
def put_extension_block(self, response, command, root_tag, tags_data):
data = dict()
data['command'] = command
module_name = command.split(':')[0]
for tag_name, attrs in tags_data.iteritems():
response.put_tag_data(data, root_tag, module_name + ':' + tag_name, attrs)
response.put_to_list('extensions', data)
def put_to_dict(self, name, values):
if name not in self.data:
self.data[name] = {}
for k, v in values.iteritems():
self.data[name][k] = v
def put_to_list(self, name, value=[]):
if name not in self.data:
self.data[name] = []
if type(value) in [list, tuple]:
self.data[name].extend(value)
else:
self.data[name].append(value)
def parse(self, tag):
ns = tag.tag.split('}')[0][1:]
name = tag.tag.split('}')[1]
module = self.get_module(ns)
if module is None:
return
if name in module.opmap:
name = module.opmap[name]
method = 'parse_' + name
if not hasattr(module, method):
raise Exception('unknown tag', ns + ':' + name)
getattr(module, method)(self, tag)
@staticmethod
def parsexml(xml):
root = ET.fromstring(xml)
return Response(root)
@staticmethod
def build(name, start):
type = globals()[name]
return type(start)
| 29.360465
| 86
| 0.556436
|
794ad4a65ab33a25f44fbb339c7380a6d133a15f
| 461
|
py
|
Python
|
pythonnest/tests/__init__.py
|
d9pouces/PythonNest
|
53ad0c53f5c1b411a2af630099869e55a3549d22
|
[
"CECILL-B"
] | 1
|
2017-05-01T20:00:14.000Z
|
2017-05-01T20:00:14.000Z
|
pythonnest/tests/__init__.py
|
d9pouces/PythonNest
|
53ad0c53f5c1b411a2af630099869e55a3549d22
|
[
"CECILL-B"
] | null | null | null |
pythonnest/tests/__init__.py
|
d9pouces/PythonNest
|
53ad0c53f5c1b411a2af630099869e55a3549d22
|
[
"CECILL-B"
] | 2
|
2015-07-30T18:14:50.000Z
|
2019-11-02T10:06:59.000Z
|
"""
Package gathering all unitary tests for pythonnest.
Module names must start with `test_` to be taken into account.
You should consider to install :mod:`Distribute` to run all tests with::
$ python setup.py test
"""
import unittest
__author__ = 'Matthieu Gallet'
# __copyright__ = "Copyright 2013, 19pouces.net"
# __credits__ = "flanker"
# __maintainer__ = "flanker"
# __email__ = "flanker@19pouces.net"
if __name__ == '__main__':
unittest.main()
| 25.611111
| 72
| 0.73102
|
794ad50f5cb8e2917881acdf35f43b5e911cec4d
| 224
|
py
|
Python
|
sw/2.0/ostur_frontend/src/profile-flask.py
|
alvarop/ostur
|
6e56a3f53ac3ca09c12586cd35d0b829dd3d6e78
|
[
"MIT"
] | 7
|
2017-11-30T20:22:02.000Z
|
2021-01-03T02:22:52.000Z
|
sw/2.0/ostur_frontend/src/profile-flask.py
|
alvarop/ostur
|
6e56a3f53ac3ca09c12586cd35d0b829dd3d6e78
|
[
"MIT"
] | null | null | null |
sw/2.0/ostur_frontend/src/profile-flask.py
|
alvarop/ostur
|
6e56a3f53ac3ca09c12586cd35d0b829dd3d6e78
|
[
"MIT"
] | 1
|
2018-02-26T02:53:14.000Z
|
2018-02-26T02:53:14.000Z
|
#!flask/bin/python
from werkzeug.contrib.profiler import ProfilerMiddleware
from ostur_frontend import app
app.config["PROFILE"] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
app.run(debug=True)
| 28
| 66
| 0.808036
|
794ad53a08efe2d55a8b2cb4c1848400a2c212dc
| 6,126
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/program_enrollments/management/commands/tests/test_migrate_saml_uids.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/program_enrollments/management/commands/tests/test_migrate_saml_uids.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/program_enrollments/management/commands/tests/test_migrate_saml_uids.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Tests for the migrate_saml_uids management command.
"""
from unittest.mock import mock_open, patch
from django.core.management import call_command
from django.test import TestCase
from social_django.models import UserSocialAuth
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.program_enrollments.management.commands import migrate_saml_uids
from lms.djangoapps.program_enrollments.management.commands.tests.utils import UserSocialAuthFactory
_COMMAND_PATH = 'lms.djangoapps.program_enrollments.management.commands.migrate_saml_uids'
class TestMigrateSamlUids(TestCase):
"""
Test migrate_saml_uids command.
"""
provider_slug = 'gatech'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.command = migrate_saml_uids.Command()
def _format_email_uid_pair(self, email, uid):
return f'{{"email":"{email}","student_key":"{uid}"}}'
def _format_single_email_uid_pair_json(self, email, uid):
return '[{obj}]'.format(
obj=self._format_email_uid_pair(email, uid)
)
def _call_command(self, data):
"""
Call management command with `data` as contents of input file.
"""
with patch(
_COMMAND_PATH + '.py3_open',
mock_open(read_data=data)
) as _:
call_command(
self.command,
uid_mapping='./foo.json',
saml_provider_slug=self.provider_slug
)
def _format_slug_urn_pair(self, slug, urn):
return f'{slug}:{urn}'
def test_single_mapping(self):
new_urn = '9001'
auth = UserSocialAuthFactory.create(slug=self.provider_slug)
email = auth.user.email
old_uid = auth.uid
self._call_command(self._format_single_email_uid_pair_json(email, new_urn))
auth.refresh_from_db()
assert auth.uid == self._format_slug_urn_pair(self.provider_slug, new_urn)
assert not auth.uid == old_uid
def test_post_save_occurs(self):
"""
Test the signals downstream of this update are called with appropriate arguments
"""
auth = UserSocialAuthFactory.create(slug=self.provider_slug)
new_urn = '9001'
email = auth.user.email
with patch('lms.djangoapps.program_enrollments.signals.matriculate_learner') as signal_handler_mock:
self._call_command(self._format_single_email_uid_pair_json(email, new_urn))
assert signal_handler_mock.called
# first positional arg matches the user whose auth was updated
assert signal_handler_mock.call_args[0][0].id == auth.user.id
# second positional arg matches the urn we changed
assert signal_handler_mock.call_args[0][1] == self._format_slug_urn_pair(self.provider_slug, new_urn)
def test_multiple_social_auth_records(self):
"""
Test we only alter one UserSocialAuth record if a learner has two
"""
auth1 = UserSocialAuthFactory.create(slug=self.provider_slug)
auth2 = UserSocialAuthFactory.create(
slug=self.provider_slug,
user=auth1.user
)
new_urn = '9001'
email = auth1.user.email
assert email == auth2.user.email
self._call_command(self._format_single_email_uid_pair_json(email, new_urn))
auths = UserSocialAuth.objects.filter(
user__email=email,
uid=self._format_slug_urn_pair(self.provider_slug, new_urn)
)
assert auths.count() == 1
@patch(_COMMAND_PATH + '.log')
def test_learner_without_social_auth_records(self, mock_log):
user = UserFactory()
email = user.email
new_urn = '9001'
mock_info = mock_log.info
self._call_command(self._format_single_email_uid_pair_json(email, new_urn))
mock_info.assert_any_call(
'Number of users identified in the mapping file without'
' {slug} UserSocialAuth records: 1'.format(
slug=self.provider_slug
)
)
@patch(_COMMAND_PATH + '.log')
def test_learner_missed_by_mapping_file(self, mock_log):
auth = UserSocialAuthFactory()
# pylint disable required b/c this lint rule is confused about subfactories
email = auth.user.email
new_urn = '9001'
mock_info = mock_log.info
self._call_command(self._format_single_email_uid_pair_json('different' + email, new_urn))
mock_info.assert_any_call(
'Number of users with {slug} UserSocialAuth records '
'for which there was no mapping in the provided file: 1'.format(
slug=self.provider_slug
)
)
@patch(_COMMAND_PATH + '.log')
def test_several_learners(self, mock_log):
auths = [UserSocialAuthFactory() for _ in range(5)]
new_urn = '9001'
mock_info = mock_log.info
self._call_command('[{}]'.format(
','.join(
[
self._format_email_uid_pair(
auth.user.email,
new_urn + str(ind)
)
for ind, auth
in enumerate(auths)
]
)
))
for ind, auth in enumerate(auths):
auth.refresh_from_db()
assert auth.uid == self._format_slug_urn_pair(self.provider_slug, new_urn + str(ind))
mock_info.assert_any_call('Number of mappings in the mapping file updated: 5')
@patch(_COMMAND_PATH + '.log')
def test_learner_duplicated_in_mapping(self, mock_log):
auth = UserSocialAuthFactory()
email = auth.user.email
new_urn = '9001'
mock_info = mock_log.info
self._call_command('[{}]'.format(
','.join([self._format_email_uid_pair(email, new_urn) for _ in range(5)])
))
mock_info.assert_any_call('Number of mappings in the mapping file where the '
'identified user has already been processed: 4')
| 35.410405
| 113
| 0.635162
|
794ad6a233ff93f8c755e160c3aaf3ff416cd4e1
| 39,853
|
py
|
Python
|
examples/benchmark/utils/bert_modeling.py
|
Ezra-H/autodist
|
b5ab28d0d867c22742daa3c1d324fe20c1852bd7
|
[
"Apache-2.0"
] | 127
|
2020-07-16T16:33:10.000Z
|
2022-03-25T09:58:50.000Z
|
examples/benchmark/utils/bert_modeling.py
|
Ezra-H/autodist
|
b5ab28d0d867c22742daa3c1d324fe20c1852bd7
|
[
"Apache-2.0"
] | 17
|
2020-07-16T20:03:44.000Z
|
2021-02-24T19:53:12.000Z
|
examples/benchmark/utils/bert_modeling.py
|
Ezra-H/autodist
|
b5ab28d0d867c22742daa3c1d324fe20c1852bd7
|
[
"Apache-2.0"
] | 26
|
2020-07-21T01:23:55.000Z
|
2022-02-24T03:43:08.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import math
import six
import tensorflow as tf
from utils import tf_utils
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
backward_compatible=True):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
backward_compatible: Boolean, whether the variables shape are compatible
with checkpoints converted from TF 1.x BERT.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.backward_compatible = backward_compatible
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def get_bert_model(input_word_ids,
input_mask,
input_type_ids,
config=None,
name=None,
float_type=tf.float32):
"""Wraps the core BERT model as a keras.Model."""
bert_model_layer = BertModel(
config=config,
float_type=float_type,
name=name)
pooled_output, sequence_output = bert_model_layer(
input_word_ids, input_mask, input_type_ids)
bert_model = tf.keras.Model(
inputs=[input_word_ids, input_mask, input_type_ids],
outputs=[pooled_output, sequence_output])
return bert_model
class BertModel(tf.keras.layers.Layer):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_word_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
input_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
pooled_output, sequence_output = modeling.BertModel(config=config)(
input_word_ids=input_word_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
...
```
"""
def __init__(self, config, float_type=tf.float32, **kwargs):
super(BertModel, self).__init__(**kwargs)
self.config = (
BertConfig.from_dict(config)
if isinstance(config, dict) else copy.deepcopy(config))
self.float_type = float_type
def build(self, unused_input_shapes):
"""Implements build() for the layer."""
self.embedding_lookup = EmbeddingLookup(
vocab_size=self.config.vocab_size,
embedding_size=self.config.hidden_size,
initializer_range=self.config.initializer_range,
dtype=tf.float32,
name="word_embeddings")
self.embedding_postprocessor = EmbeddingPostprocessor(
use_type_embeddings=True,
token_type_vocab_size=self.config.type_vocab_size,
use_position_embeddings=True,
max_position_embeddings=self.config.max_position_embeddings,
dropout_prob=self.config.hidden_dropout_prob,
initializer_range=self.config.initializer_range,
dtype=tf.float32,
name="embedding_postprocessor")
self.encoder = Transformer(
num_hidden_layers=self.config.num_hidden_layers,
hidden_size=self.config.hidden_size,
num_attention_heads=self.config.num_attention_heads,
intermediate_size=self.config.intermediate_size,
intermediate_activation=self.config.hidden_act,
hidden_dropout_prob=self.config.hidden_dropout_prob,
attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,
initializer_range=self.config.initializer_range,
backward_compatible=self.config.backward_compatible,
float_type=self.float_type,
name="encoder")
self.pooler_transform = tf.keras.layers.Dense(
units=self.config.hidden_size,
activation="tanh",
kernel_initializer=get_initializer(self.config.initializer_range),
name="pooler_transform")
super(BertModel, self).build(unused_input_shapes)
def __call__(self,
input_word_ids,
input_mask=None,
input_type_ids=None,
**kwargs):
inputs = tf_utils.pack_inputs(
[input_word_ids, input_mask, input_type_ids])
return super(BertModel, self).__call__(inputs, **kwargs)
def call(self, inputs, mode="bert"):
"""Implements call() for the layer.
Args:
inputs: packed input tensors.
mode: string, `bert` or `encoder`.
Returns:
Output tensor of the last layer for BERT training (mode=`bert`) which
is a float Tensor of shape [batch_size, seq_length, hidden_size] or
a list of output tensors for encoder usage (mode=`encoder`).
"""
unpacked_inputs = tf_utils.unpack_inputs(inputs)
input_word_ids = unpacked_inputs[0]
input_mask = unpacked_inputs[1]
input_type_ids = unpacked_inputs[2]
word_embeddings = self.embedding_lookup(input_word_ids)
embedding_tensor = self.embedding_postprocessor(
word_embeddings=word_embeddings, token_type_ids=input_type_ids)
if self.float_type == tf.float16:
embedding_tensor = tf.cast(embedding_tensor, tf.float16)
attention_mask = None
if input_mask is not None:
attention_mask = create_attention_mask_from_input_mask(
input_word_ids, input_mask)
if mode == "encoder":
return self.encoder(
embedding_tensor, attention_mask, return_all_layers=True)
sequence_output = self.encoder(embedding_tensor, attention_mask)
first_token_tensor = tf.squeeze(sequence_output[:, 0:1, :], axis=1)
pooled_output = self.pooler_transform(first_token_tensor)
return (pooled_output, sequence_output)
def get_config(self):
config = {"config": self.config.to_dict()}
base_config = super(BertModel, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class EmbeddingLookup(tf.keras.layers.Layer):
"""Looks up words embeddings for id tensor."""
def __init__(self,
vocab_size,
embedding_size=768,
initializer_range=0.02,
**kwargs):
super(EmbeddingLookup, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.initializer_range = initializer_range
def build(self, unused_input_shapes):
"""Implements build() for the layer."""
self.embeddings = self.add_weight(
"embeddings",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
dtype=self.dtype)
super(EmbeddingLookup, self).build(unused_input_shapes)
def call(self, inputs):
"""Implements call() for the layer."""
input_shape = tf_utils.get_shape_list(inputs)
flat_input = tf.reshape(inputs, [-1])
output = tf.gather(self.embeddings, flat_input)
output = tf.reshape(output, input_shape + [self.embedding_size])
return output
class EmbeddingPostprocessor(tf.keras.layers.Layer):
"""Performs various post-processing on a word embedding tensor."""
def __init__(self,
use_type_embeddings=False,
token_type_vocab_size=None,
use_position_embeddings=True,
max_position_embeddings=512,
dropout_prob=0.0,
initializer_range=0.02,
initializer=None,
**kwargs):
super(EmbeddingPostprocessor, self).__init__(**kwargs)
self.use_type_embeddings = use_type_embeddings
self.token_type_vocab_size = token_type_vocab_size
self.use_position_embeddings = use_position_embeddings
self.max_position_embeddings = max_position_embeddings
self.dropout_prob = dropout_prob
self.initializer_range = initializer_range
if not initializer:
self.initializer = get_initializer(self.initializer_range)
else:
self.initializer = initializer
if self.use_type_embeddings and not self.token_type_vocab_size:
raise ValueError("If `use_type_embeddings` is True, then "
"`token_type_vocab_size` must be specified.")
def build(self, input_shapes):
"""Implements build() for the layer."""
(word_embeddings_shape, _) = input_shapes
width = word_embeddings_shape.as_list()[-1]
self.type_embeddings = None
if self.use_type_embeddings:
self.type_embeddings = self.add_weight(
"type_embeddings",
shape=[self.token_type_vocab_size, width],
initializer=get_initializer(self.initializer_range),
dtype=self.dtype)
self.position_embeddings = None
if self.use_position_embeddings:
self.position_embeddings = self.add_weight(
"position_embeddings",
shape=[self.max_position_embeddings, width],
initializer=get_initializer(self.initializer_range),
dtype=self.dtype)
self.output_layer_norm = tf.keras.layers.LayerNormalization(
name="layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_prob,
dtype=tf.float32)
super(EmbeddingPostprocessor, self).build(input_shapes)
def __call__(self, word_embeddings, token_type_ids=None, **kwargs):
inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids])
return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs)
def call(self, inputs):
"""Implements call() for the layer."""
unpacked_inputs = tf_utils.unpack_inputs(inputs)
word_embeddings = unpacked_inputs[0]
token_type_ids = unpacked_inputs[1]
input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = word_embeddings
if self.use_type_embeddings:
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
token_type_embeddings = tf.gather(self.type_embeddings,
flat_token_type_ids)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if self.use_position_embeddings:
position_embeddings = tf.expand_dims(
tf.slice(
self.position_embeddings, [
0, 0], [
seq_length, width]), axis=0)
output += position_embeddings
output = self.output_layer_norm(output)
output = self.output_dropout(output)
return output
class Attention(tf.keras.layers.Layer):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with tf.einsum as follows:
Input_tensor: [BFD]
Wq, Wk, Wv: [DNH]
Q:[BFNH] = einsum('BFD,DNH->BFNH', Input_tensor, Wq)
K:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wk)
V:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wv)
attention_scores:[BNFT] = einsum('BTNH,BFNH->BNFT', K, Q) / sqrt(H)
attention_probs:[BNFT] = softmax(attention_scores)
context_layer:[BFNH] = einsum('BNFT,BTNH->BFNH', attention_probs, V)
Wout:[DNH]
Output:[BFD] = einsum('BFNH,DNH>BFD', context_layer, Wout)
"""
def __init__(self,
num_attention_heads=12,
size_per_head=64,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
backward_compatible=False,
**kwargs):
super(Attention, self).__init__(**kwargs)
self.num_attention_heads = num_attention_heads
self.size_per_head = size_per_head
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.backward_compatible = backward_compatible
def build(self, unused_input_shapes):
"""Implements build() for the layer."""
self.query_dense = self._projection_dense_layer("query")
self.key_dense = self._projection_dense_layer("key")
self.value_dense = self._projection_dense_layer("value")
self.attention_probs_dropout = tf.keras.layers.Dropout(
rate=self.attention_probs_dropout_prob)
super(Attention, self).build(unused_input_shapes)
def reshape_to_matrix(self, input_tensor):
"""Reshape N > 2 rank tensor to rank 2 tensor for performance."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2."
"Shape = %s" % (input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def __call__(self, from_tensor, to_tensor, attention_mask=None, **kwargs):
inputs = tf_utils.pack_inputs([from_tensor, to_tensor, attention_mask])
return super(Attention, self).__call__(inputs, **kwargs)
def call(self, inputs):
"""Implements call() for the layer."""
(from_tensor, to_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_tensor` = [B, F, N ,H]
query_tensor = self.query_dense(from_tensor)
# `key_tensor` = [B, T, N, H]
key_tensor = self.key_dense(to_tensor)
# `value_tensor` = [B, T, N, H]
value_tensor = self.value_dense(to_tensor)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = tf.einsum(
"BTNH,BFNH->BNFT", key_tensor, query_tensor)
attention_scores = tf.multiply(
attention_scores,
1.0 /
math.sqrt(
float(
self.size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask,
attention_scores.dtype)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_probs_dropout(attention_probs)
# `context_layer` = [B, F, N, H]
context_tensor = tf.einsum(
"BNFT,BTNH->BFNH",
attention_probs,
value_tensor)
return context_tensor
def _projection_dense_layer(self, name):
"""A helper to define a projection layer."""
return Dense3D(
num_attention_heads=self.num_attention_heads,
size_per_head=self.size_per_head,
kernel_initializer=get_initializer(self.initializer_range),
output_projection=False,
backward_compatible=self.backward_compatible,
name=name)
class Dense3D(tf.keras.layers.Layer):
"""A Dense Layer using 3D kernel with tf.einsum implementation.
Attributes:
num_attention_heads: An integer, number of attention heads for each
multihead attention layer.
size_per_head: An integer, hidden size per attention head.
hidden_size: An integer, dimension of the hidden layer.
kernel_initializer: An initializer for the kernel weight.
bias_initializer: An initializer for the bias.
activation: An activation function to use. If nothing is specified, no
activation is applied.
use_bias: A bool, whether the layer uses a bias.
output_projection: A bool, whether the Dense3D layer is used for output
linear projection.
backward_compatible: A bool, whether the variables shape are compatible
with checkpoints converted from TF 1.x.
"""
def __init__(self,
num_attention_heads=12,
size_per_head=72,
kernel_initializer=None,
bias_initializer="zeros",
activation=None,
use_bias=True,
output_projection=False,
backward_compatible=False,
**kwargs):
"""Inits Dense3D."""
super(Dense3D, self).__init__(**kwargs)
self.num_attention_heads = num_attention_heads
self.size_per_head = size_per_head
self.hidden_size = num_attention_heads * size_per_head
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.activation = activation
self.use_bias = use_bias
self.output_projection = output_projection
self.backward_compatible = backward_compatible
@property
def compatible_kernel_shape(self):
if self.output_projection:
return [self.hidden_size, self.hidden_size]
return [self.last_dim, self.hidden_size]
@property
def compatible_bias_shape(self):
return [self.hidden_size]
@property
def kernel_shape(self):
if self.output_projection:
return [
self.num_attention_heads,
self.size_per_head,
self.hidden_size]
return [self.last_dim, self.num_attention_heads, self.size_per_head]
@property
def bias_shape(self):
if self.output_projection:
return [self.hidden_size]
return [self.num_attention_heads, self.size_per_head]
def build(self, input_shape):
"""Implements build() for the layer."""
dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError(
"Unable to build `Dense3D` layer with non-floating "
"point (and non-complex) dtype %s" %
(dtype,))
input_shape = tf.TensorShape(input_shape)
if tf.compat.dimension_value(input_shape[-1]) is None:
raise ValueError("The last dimension of the inputs to `Dense3D` "
"should be defined. Found `None`.")
self.last_dim = tf.compat.dimension_value(input_shape[-1])
self.input_spec = tf.keras.layers.InputSpec(
min_ndim=3, axes={-1: self.last_dim})
# Determines variable shapes.
if self.backward_compatible:
kernel_shape = self.compatible_kernel_shape
bias_shape = self.compatible_bias_shape
else:
kernel_shape = self.kernel_shape
bias_shape = self.bias_shape
self.kernel = self.add_weight(
"kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
"bias",
shape=bias_shape,
initializer=self.bias_initializer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
super(Dense3D, self).build(input_shape)
def call(self, inputs):
"""Implements ``call()`` for Dense3D.
Args:
inputs: A float tensor of shape [batch_size, sequence_length, hidden_size]
when output_projection is False, otherwise a float tensor of shape
[batch_size, sequence_length, num_heads, dim_per_head].
Returns:
The projected tensor with shape [batch_size, sequence_length, num_heads,
dim_per_head] when output_projection is False, otherwise [batch_size,
sequence_length, hidden_size].
"""
if self.backward_compatible:
kernel = tf.keras.backend.reshape(self.kernel, self.kernel_shape)
bias = (tf.keras.backend.reshape(self.bias, self.bias_shape)
if self.use_bias else None)
else:
kernel = self.kernel
bias = self.bias
if self.output_projection:
ret = tf.einsum("abcd,cde->abe", inputs, kernel)
else:
ret = tf.einsum("abc,cde->abde", inputs, kernel)
if self.use_bias:
ret += bias
if self.activation is not None:
return self.activation(ret)
return ret
class Dense2DProjection(tf.keras.layers.Layer):
"""A 2D projection layer with tf.einsum implementation."""
def __init__(self,
output_size,
kernel_initializer=None,
bias_initializer="zeros",
activation=None,
fp32_activation=False,
**kwargs):
super(Dense2DProjection, self).__init__(**kwargs)
self.output_size = output_size
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.activation = activation
self.fp32_activation = fp32_activation
def build(self, input_shape):
"""Implements build() for the layer."""
dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError("Unable to build `Dense2DProjection` layer with "
"non-floating point (and non-complex) "
"dtype %s" % (dtype,))
input_shape = tf.TensorShape(input_shape)
if tf.compat.dimension_value(input_shape[-1]) is None:
raise ValueError("The last dimension of the inputs to "
"`Dense2DProjection` should be defined. "
"Found `None`.")
last_dim = tf.compat.dimension_value(input_shape[-1])
self.input_spec = tf.keras.layers.InputSpec(
min_ndim=3, axes={-1: last_dim})
self.kernel = self.add_weight(
"kernel",
shape=[last_dim, self.output_size],
initializer=self.kernel_initializer,
dtype=self.dtype,
trainable=True)
self.bias = self.add_weight(
"bias",
shape=[self.output_size],
initializer=self.bias_initializer,
dtype=self.dtype,
trainable=True)
super(Dense2DProjection, self).build(input_shape)
def call(self, inputs):
"""Implements call() for Dense2DProjection.
Args:
inputs: float Tensor of shape [batch, from_seq_length,
num_attention_heads, size_per_head].
Returns:
A 3D Tensor.
"""
ret = tf.einsum("abc,cd->abd", inputs, self.kernel)
ret += self.bias
if self.activation is not None:
if self.dtype == tf.float16 and self.fp32_activation:
ret = tf.cast(ret, tf.float32)
return self.activation(ret)
return ret
class TransformerBlock(tf.keras.layers.Layer):
"""Single transformer layer.
It has two sub-layers. The first is a multi-head self-attention mechanism, and
the second is a positionwise fully connected feed-forward network.
"""
def __init__(self,
hidden_size=768,
num_attention_heads=12,
intermediate_size=3072,
intermediate_activation="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
backward_compatible=False,
float_type=tf.float32,
**kwargs):
super(TransformerBlock, self).__init__(**kwargs)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_activation = tf_utils.get_activation(
intermediate_activation)
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.backward_compatible = backward_compatible
self.float_type = float_type
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" %
(self.hidden_size, self.num_attention_heads))
self.attention_head_size = int(
self.hidden_size / self.num_attention_heads)
def build(self, unused_input_shapes):
"""Implements build() for the layer."""
self.attention_layer = Attention(
num_attention_heads=self.num_attention_heads,
size_per_head=self.attention_head_size,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
backward_compatible=self.backward_compatible,
name="self_attention")
self.attention_output_dense = Dense3D(
num_attention_heads=self.num_attention_heads,
size_per_head=int(self.hidden_size / self.num_attention_heads),
kernel_initializer=get_initializer(self.initializer_range),
output_projection=True,
backward_compatible=self.backward_compatible,
name="self_attention_output")
self.attention_dropout = tf.keras.layers.Dropout(
rate=self.hidden_dropout_prob)
self.attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm", axis=-1, epsilon=1e-12,
# We do layer norm in float32 for numeric stability.
dtype=tf.float32))
self.intermediate_dense = Dense2DProjection(
output_size=self.intermediate_size,
kernel_initializer=get_initializer(self.initializer_range),
activation=self.intermediate_activation,
# Uses float32 so that gelu activation is done in float32.
fp32_activation=True,
name="intermediate")
self.output_dense = Dense2DProjection(
output_size=self.hidden_size,
kernel_initializer=get_initializer(self.initializer_range),
name="output")
self.output_dropout = tf.keras.layers.Dropout(
rate=self.hidden_dropout_prob)
self.output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
super(TransformerBlock, self).build(unused_input_shapes)
def common_layers(self):
"""Explicitly gets all layer objects inside a Transformer encoder block."""
return [
self.attention_layer, self.attention_output_dense,
self.attention_dropout, self.attention_layer_norm,
self.intermediate_dense, self.output_dense, self.output_dropout,
self.output_layer_norm
]
def __call__(self, input_tensor, attention_mask=None, **kwargs):
inputs = tf_utils.pack_inputs([input_tensor, attention_mask])
return super(TransformerBlock, self).__call__(inputs, **kwargs)
def call(self, inputs):
"""Implements call() for the layer."""
(input_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)
attention_output = self.attention_layer(
from_tensor=input_tensor,
to_tensor=input_tensor,
attention_mask=attention_mask)
attention_output = self.attention_output_dense(attention_output)
attention_output = self.attention_dropout(attention_output)
# Use float32 in keras layer norm and the gelu activation in the
# intermediate dense layer for numeric stability
attention_output = self.attention_layer_norm(input_tensor +
attention_output)
if self.float_type == tf.float16:
attention_output = tf.cast(attention_output, tf.float16)
intermediate_output = self.intermediate_dense(attention_output)
if self.float_type == tf.float16:
intermediate_output = tf.cast(intermediate_output, tf.float16)
layer_output = self.output_dense(intermediate_output)
layer_output = self.output_dropout(layer_output)
# Use float32 in keras layer norm for numeric stability
layer_output = self.output_layer_norm(layer_output + attention_output)
if self.float_type == tf.float16:
layer_output = tf.cast(layer_output, tf.float16)
return layer_output
class Transformer(tf.keras.layers.Layer):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
"""
def __init__(self,
num_hidden_layers=12,
hidden_size=768,
num_attention_heads=12,
intermediate_size=3072,
intermediate_activation="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
backward_compatible=False,
float_type=tf.float32,
**kwargs):
super(Transformer, self).__init__(**kwargs)
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_activation = tf_utils.get_activation(
intermediate_activation)
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.backward_compatible = backward_compatible
self.float_type = float_type
def build(self, unused_input_shapes):
"""Implements build() for the layer."""
self.layers = []
for i in range(self.num_hidden_layers):
self.layers.append(
TransformerBlock(
hidden_size=self.hidden_size,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
intermediate_activation=self.intermediate_activation,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
backward_compatible=self.backward_compatible,
float_type=self.float_type,
name=(
"layer_%d" %
i)))
super(Transformer, self).build(unused_input_shapes)
def __call__(self, input_tensor, attention_mask=None, **kwargs):
inputs = tf_utils.pack_inputs([input_tensor, attention_mask])
return super(Transformer, self).__call__(inputs=inputs, **kwargs)
def call(self, inputs, return_all_layers=False):
"""Implements call() for the layer.
Args:
inputs: packed inputs.
return_all_layers: bool, whether to return outputs of all layers inside
encoders.
Returns:
Output tensor of the last layer or a list of output tensors.
"""
unpacked_inputs = tf_utils.unpack_inputs(inputs)
input_tensor = unpacked_inputs[0]
attention_mask = unpacked_inputs[1]
output_tensor = input_tensor
all_layer_outputs = []
for layer in self.layers:
output_tensor = layer(output_tensor, attention_mask)
all_layer_outputs.append(output_tensor)
if return_all_layers:
return all_layer_outputs
return all_layer_outputs[-1]
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = tf_utils.get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = tf_utils.get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]),
dtype=from_tensor.dtype)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=from_tensor.dtype)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
| 41.341286
| 95
| 0.63935
|
794ad7811fc6d0e26a30cf119ba413cf8840674c
| 23,057
|
py
|
Python
|
python_scripts/02_basic_preprocessing.py
|
lucyleeow/euroscipy-2019-scikit-learn-tutorial
|
81ec6483c5529af4655bf64ba0513f3f28cf565e
|
[
"CC0-1.0"
] | 27
|
2019-07-24T15:14:23.000Z
|
2021-12-02T10:13:47.000Z
|
python_scripts/02_basic_preprocessing.py
|
lucyleeow/euroscipy-2019-scikit-learn-tutorial
|
81ec6483c5529af4655bf64ba0513f3f28cf565e
|
[
"CC0-1.0"
] | 6
|
2019-08-07T13:07:10.000Z
|
2019-11-27T14:57:57.000Z
|
python_scripts/02_basic_preprocessing.py
|
lucyleeow/euroscipy-2019-scikit-learn-tutorial
|
81ec6483c5529af4655bf64ba0513f3f28cf565e
|
[
"CC0-1.0"
] | 16
|
2019-07-24T09:13:08.000Z
|
2021-11-04T19:24:42.000Z
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: notebooks//ipynb,python_scripts//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Introduction to scikit-learn
#
# ## Basic preprocessing and model fitting
#
# In this notebook, we present how to build predictive models on tabular
# datasets.
#
# In particular we will highlight:
# * the difference between numerical and categorical variables;
# * the importance of scaling numerical variables;
# * typical ways to deal categorical variables;
# * train predictive models on different kinds of data;
# * evaluate the performance of a model via cross-validation.
#
# ## Introducing the dataset
#
# To this aim, we will use data from the 1994 Census bureau database. The goal
# with this data is to regress wages from heterogeneous data such as age,
# employment, education, family information, etc.
#
# Let's first load the data located in the `datasets` folder.
# %%
import pandas as pd
df = pd.read_csv("https://www.openml.org/data/get_csv/1595261/adult-census.csv")
# Or use the local copy:
# df = pd.read_csv('../datasets/adult-census.csv')
# %% [markdown]
# Let's have a look at the first records of this data frame:
# %%
df.head()
# %% [markdown]
# The target variable in our study will be the "class" column while we will use
# the other columns as input variables for our model. This target column divides
# the samples (also known as records) into two groups: high income (>50K) vs low
# income (<=50K). The resulting prediction problem is therefore a binary
# classification problem.
#
# For simplicity, we will ignore the "fnlwgt" (final weight) column that was
# crafted by the creators of the dataset when sampling the dataset to be
# representative of the full census database.
# %%
target_name = "class"
target = df[target_name].to_numpy()
target
# %%
data = df.drop(columns=[target_name, "fnlwgt"])
data.head()
# %% [markdown]
# We can check the number of samples and the number of features available in
# the dataset:
# %%
print(
f"The dataset contains {data.shape[0]} samples and {data.shape[1]} "
"features"
)
# %% [markdown]
# ## Working with numerical data
#
# The numerical data is the most natural type of data used in machine learning
# and can (almost) directly be fed to predictive models. We can quickly have a
# look at such data by selecting the subset of columns from the original data.
#
# We will use this subset of data to fit a linear classification model to
# predict the income class.
# %%
data.columns
# %%
data.dtypes
# %%
numerical_columns = [c for c in data.columns
if data[c].dtype.kind in ["i", "f"]]
numerical_columns
# %%
data_numeric = data[numerical_columns]
data_numeric.head()
# %% [markdown]
# When building a machine learning model, it is important to leave out a
# subset of the data which we can use later to evaluate the trained model.
# The data used to fit a model a called training data while the one used to
# assess a model are called testing data.
#
# Scikit-learn provides an helper function `train_test_split` which will
# split the dataset into a training and a testing set. It will ensure that
# the data are shuffled randomly before splitting the data.
# %%
from sklearn.model_selection import train_test_split
data_train, data_test, target_train, target_test = train_test_split(
data_numeric, target, random_state=42
)
print(
f"The training dataset contains {data_train.shape[0]} samples and "
f"{data_train.shape[1]} features"
)
print(
f"The testing dataset contains {data_test.shape[0]} samples and "
f"{data_test.shape[1]} features"
)
# %% [markdown]
# We will build a linear classification model called "Logistic Regression". The
# `fit` method is called to train the model from the input and target data. Only
# the training data should be given for this purpose.
#
# In addition, when checking the time required to train the model and internally
# check the number of iterations done by the solver to find a solution.
# %%
from sklearn.linear_model import LogisticRegression
import time
model = LogisticRegression(solver='lbfgs')
start = time.time()
model.fit(data_train, target_train)
elapsed_time = time.time() - start
print(
f"The model {model.__class__.__name__} was trained in "
f"{elapsed_time:.3f} seconds for {model.n_iter_} iterations"
)
# %% [markdown]
# Let's ignore the convergence warning for now and instead let's try
# to use our model to make some predictions on the first three records
# of the held out test set:
# %%
target_predicted = model.predict(data_test)
target_predicted[:5]
# %%
target_test[:5]
# %%
predictions = data_test.copy()
predictions['predicted-class'] = target_predicted
predictions['expected-class'] = target_test
predictions['correct'] = target_predicted == target_test
predictions.head()
# %% [markdown]
# To quantitatively evaluate our model, we can use the method `score`. It will
# compute the classification accuracy when dealing with a classificiation
# problem.
# %%
print(
f"The test accuracy using a {model.__class__.__name__} is "
f"{model.score(data_test, target_test):.3f}"
)
# %% [markdown]
# This is mathematically equivalent as computing the average number of time
# the model makes a correct prediction on the test set:
# %%
(target_test == target_predicted).mean()
# %% [markdown]
# ## Exercise 1
#
# - What would be the score of a model that always predicts `' >50K'`?
# - What would be the score of a model that always predicts `' <= 50K'`?
# - Is 81% or 82% accuracy a good score for this problem?
#
# Hint: You can compute the cross-validated of a [DummyClassifier](https://scikit-learn.org/stable/modules/model_evaluation.html#dummy-estimators) the performance of such baselines.
#
# Use the dedicated notebook to do this exercise.
# %% [markdown]
# Let's now consider the `ConvergenceWarning` message that was raised previously
# when calling the `fit` method to train our model. This warning informs us that
# our model stopped learning becaused it reached the maximum number of
# iterations allowed by the user. This could potentially be detrimental for the
# model accuracy. We can follow the (bad) advice given in the warning message
# and increase the maximum number of iterations allowed.
# %%
model = LogisticRegression(solver='lbfgs', max_iter=50000)
start = time.time()
model.fit(data_train, target_train)
elapsed_time = time.time() - start
# %%
print(
f"The accuracy using a {model.__class__.__name__} is "
f"{model.score(data_test, target_test):.3f} with a fitting time of "
f"{elapsed_time:.3f} seconds in {model.n_iter_} iterations"
)
# %% [markdown]
# We can observe now a longer training time but not significant improvement in
# the predictive performance. Instead of increasing the number of iterations, we
# can try to help fit the model faster by scaling the data first. A range of
# preprocessing algorithms in scikit-learn allows to transform the input data
# before training a model. We can easily combine these sequential operation with
# a scikit-learn `Pipeline` which will chain the operations and can be used as
# any other classifier or regressor. The helper function `make_pipeline` will
# create a `Pipeline` by giving the successive transformations to perform.
#
# In our case, we will standardize the data and then train a new logistic
# regression model on that new version of the dataset set.
# %%
data_train.describe()
# %%
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
data_train_scaled = scaler.fit_transform(data_train)
data_train_scaled
# %%
data_train_scaled = pd.DataFrame(data_train_scaled, columns=data_train.columns)
data_train_scaled.describe()
# %%
from sklearn.pipeline import make_pipeline
model = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
start = time.time()
model.fit(data_train, target_train)
elapsed_time = time.time() - start
# %%
print(
f"The accuracy using a {model.__class__.__name__} is "
f"{model.score(data_test, target_test):.3f} with a fitting time of "
f"{elapsed_time:.3f} seconds in {model[-1].n_iter_} iterations"
)
# %% [markdown]
# We can see that the training time and the number of iterations is much shorter
# while the predictive performance (accuracy) stays the same.
#
# In the previous example, we split the original data into a training set and a
# testing set. This strategy has several issues: in the setting where the amount
# of data is limited, the subset of data used to train or test will be small;
# and the splitting was done in a random manner and we have no information
# regarding the confidence of the results obtained.
#
# Instead, we can use what cross-validation. Cross-validation consists in
# repeating this random splitting into training and testing sets and aggregate
# the model performance. By repeating the experiment, one can get an estimate of
# the variabilty of the model performance.
#
# The function `cross_val_score` allows for such experimental protocol by giving
# the model, the data and the target. Since there exists several
# cross-validation strategies, `cross_val_score` takes a parameter `cv` which
# defines the splitting strategy.
#
#
#
#
#
#
#
# %%
from sklearn.model_selection import cross_val_score
scores = cross_val_score(model, data_numeric, target, cv=5)
print(f"The different scores obtained are: \n{scores}")
# %%
print(f"The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown]
# Note that by computing the standard-deviation of the cross-validation scores
# we can get an idea of the uncertainty of our estimation of the predictive
# performance of the model: in the above results, only the first 2 decimals seem
# to be trustworthy. Using a single train / test split would not allow us to
# know anything about the level of uncertainty of the accuracy of the model.
#
# Setting `cv=5` created 5 distinct splits to get 5 variations for the training
# and testing sets. Each training set is used to fit one model which is then
# scored on the matching test set. This strategy is called K-fold
# cross-validation where `K` corresponds to the number of splits.
#
# The following matplotlib code helps visualize how the datasets is partitionned
# between train and test samples at each iteration of the cross-validation
# procedure:
# %%
# %matplotlib inline
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Patch
cmap_cv = plt.cm.coolwarm
def plot_cv_indices(cv, X, y, ax, lw=20):
"""Create a sample plot for indices of a cross-validation object."""
splits = list(cv.split(X=X, y=y))
n_splits = len(splits)
# Generate the training/testing visualizations for each CV split
for ii, (train, test) in enumerate(splits):
# Fill in indices with the training/test groups
indices = np.zeros(shape=X.shape[0], dtype=np.int32)
indices[train] = 1
# Visualize the results
ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=lw, cmap=cmap_cv,
vmin=-.2, vmax=1.2)
# Formatting
yticklabels = list(range(n_splits))
ax.set(yticks=np.arange(n_splits+2) + .5, yticklabels=yticklabels,
xlabel='Sample index', ylabel="CV iteration",
ylim=[n_splits + .2, -.2], xlim=[0, 100])
ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
return ax
# %%
# Some random data points
n_points = 100
X = np.random.randn(n_points, 10)
y = np.random.randn(n_points)
fig, ax = plt.subplots(figsize=(10, 6))
cv = KFold(5)
plot_cv_indices(cv, X, y, ax);
# %% [markdown]
# ## Working with categorical variables
#
# As we have seen in the previous section, a numerical variable is a continuous
# quantity represented by a real or integer number. Those variables can be
# naturally handled by machine learning algorithms that typically composed of
# a sequence of arithmetic instructions such as additions and multiplications.
#
# By opposition, categorical variables have discrete values typically represented
# by string labels taken in a finite list of possible choices. For instance, the
# variable `native-country` in our dataset is a categorical variable because it
# encodes the data using a finite list of possible countries (along with the `?`
# marker when this information is missing):
# %%
data["native-country"].value_counts()
# %% [markdown]
# In the remainder of this section, we will present different strategies to
# encode categorical data into numerical data which can be used by a
# machine-learning algorithm.
# %%
data.dtypes
# %%
categorical_columns = [c for c in data.columns
if data[c].dtype.kind not in ["i", "f"]]
categorical_columns
# %%
data_categorical = data[categorical_columns]
data_categorical.head()
# %%
print(f"The datasets is composed of {data_categorical.shape[1]} features")
# %% [markdown]
# ### Encoding ordinal categories
#
# The most intuitive strategy is to encode each category with a number.
# The `OrdinalEncoder` will transform the data in such manner.
# %%
from sklearn.preprocessing import OrdinalEncoder
encoder = OrdinalEncoder()
data_encoded = encoder.fit_transform(data_categorical)
print(f"The dataset encoded contains {data_encoded.shape[1]} features")
data_encoded[:5]
# %% [markdown]
# We can see that all categories have been encoded for each feature
# independently. We can also notice that the number of features before and after
# the encoding is the same.
#
# However, one has to be careful when using this encoding strategy. Using this
# integer representation can lead the downstream models to make the assumption
# that the categories are ordered: 0 is smaller than 1 which is smaller than 2,
# etc.
#
# By default, `OrdinalEncoder` uses a lexicographical strategy to map string
# category labels to integers. This strategy is completely arbitrary and often be
# meaningless. For instance suppose the dataset has a categorical variable named
# "size" with categories such as "S", "M", "L", "XL". We would like the integer
# representation to respect the meaning of the sizes by mapping them to increasing
# integers such as 0, 1, 2, 3. However lexicographical strategy used by default
# would map the labels "S", "M", "L", "XL" to 2, 1, 0, 3.
#
# The `OrdinalEncoder` class accepts a "categories" constructor argument to pass
# an the correct ordering explicitly.
#
# If a categorical variable does not carry any meaningful order information then
# this encoding might be misleading to downstream statistical models and you might
# consider using one-hot encoding instead (see below).
#
# Note however that the impact a violation of this ordering assumption is really
# dependent on the downstream models (for instance linear models are much more
# sensitive than models built from a ensemble of decision trees).
#
# ### Encoding nominal categories (without assuming any order)
#
# `OneHotEncoder` is an alternative encoder that can prevent the dowstream
# models to make a false assumption about the ordering of categories. For a
# given feature, it will create as many new columns as there are possible
# categories. For a given sample, the value of the column corresponding to the
# category will be set to `1` while all the columns of the other categories will
# be set to `0`.
# %%
print(f"The dataset is composed of {data_categorical.shape[1]} features")
data_categorical.head()
# %%
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False)
data_encoded = encoder.fit_transform(data_categorical)
print(f"The dataset encoded contains {data_encoded.shape[1]} features")
data_encoded
# %% [markdown]
# Let's wrap this numpy array in a dataframe with informative column names as provided by the encoder object:
# %%
columns_encoded = encoder.get_feature_names(data_categorical.columns)
pd.DataFrame(data_encoded, columns=columns_encoded).head()
# %% [markdown]
# Look at how the workclass variable of the first 3 records has been encoded and compare this to the original string representation.
#
# The number of features after the encoding is than 10 times larger than in the
# original data because some variables such as `occupation` and `native-country`
# have many possible categories.
#
# We can now integrate this encoder inside a machine learning pipeline as in the
# case with numerical data: let's train a linear classifier on
# the encoded data and check the performance of this machine learning pipeline
# using cross-validation.
# %%
model = make_pipeline(
OneHotEncoder(handle_unknown='ignore'),
LogisticRegression(solver='lbfgs', max_iter=1000)
)
scores = cross_val_score(model, data_categorical, target)
print(f"The different scores obtained are: \n{scores}")
# %%
print(f"The accuracy is: {scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown]
# As you can see, this representation of the categorical variables of the data is slightly more predictive of the revenue than the numerical variables that we used previously.
# %% [markdown]
# ## Exercise 2:
#
# - Try to fit a logistic regression model on categorical data transformed by
# the OrdinalEncoder instead. What do you observe?
#
# Use the dedicated notebook to do this exercise.
# %% [markdown]
# ## Using numerical and categorical variables together
#
# In the previous sections, we saw that we need to treat data specifically
# depending of their nature (i.e. numerical or categorical).
#
# Scikit-learn provides a `ColumnTransformer` class which will dispatch some
# specific columns to a specific transformer making it easy to fit a single
# predictive model on a dataset that combines both kinds of variables together
# (heterogeneously typed tabular data).
#
# We can first define the columns depending on their data type:
# * **binary encoding** will be applied to categorical columns with only too
# possible values (e.g. sex=male or sex=female in this example). Each binary
# categorical columns will be mapped to one numerical columns with 0 or 1
# values.
# * **one-hot encoding** will be applied to categorical columns with more that
# two possible categories. This encoding will create one additional column for
# each possible categorical value.
# * **numerical scaling** numerical features which will be standardized.
#
#
#
#
#
#
#
# %%
binary_encoding_columns = ['sex']
one_hot_encoding_columns = ['workclass', 'education', 'marital-status',
'occupation', 'relationship',
'race', 'native-country']
scaling_columns = ['age', 'education-num', 'hours-per-week',
'capital-gain', 'capital-loss']
# %% [markdown]
# We can now create our `ColumnTransfomer` by specifying a list of triplet
# (preprocessor name, transformer, columns). Finally, we can define a pipeline
# to stack this "preprocessor" with our classifier (logistic regression).
# %%
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer([
('binary-encoder', OrdinalEncoder(), binary_encoding_columns),
('one-hot-encoder', OneHotEncoder(handle_unknown='ignore'),
one_hot_encoding_columns),
('standard-scaler', StandardScaler(), scaling_columns)
])
model = make_pipeline(
preprocessor,
LogisticRegression(solver='lbfgs', max_iter=1000)
)
# %% [markdown]
# The final model is more complex than the previous models but still follows the
# same API:
# - the `fit` method is called to preprocess the data then train the classifier;
# - the `predict` method can make predictions on new data;
# - the `score` method is used to predict on the test data and compare the
# predictions to the expected test labels to compute the accuracy.
# %%
data_train, data_test, target_train, target_test = train_test_split(
data, target, random_state=42
)
model.fit(data_train, target_train)
model.predict(data_test)[:5]
# %%
target_test[:5]
# %%
data_test.head()
# %%
model.score(data_test, target_test)
# %% [markdown]
# This model can also be cross-validated as usual (instead of using a single
# train-test split):
# %%
scores = cross_val_score(model, data, target, cv=5)
print(f"The different scores obtained are: \n{scores}")
# %%
print(f"The accuracy is: {scores.mean():.3f} +- {scores.std():.3f}")
# %% [markdown]
# The compound model has a higher predictive accuracy than the
# two models that used numerical and categorical variables in
# isolation.
# %% [markdown]
# # Fitting a more powerful model
#
# Linear models are very nice because they are usually very cheap to train,
# small to deploy, fast to predict and give a good baseline.
#
# However it is often useful to check whether more complex models such as
# ensemble of decision trees can lead to higher predictive performance.
#
# In the following we try a scalable implementation of the Gradient Boosting
# Machine algorithm. For this class of models, we know that contrary to linear
# models, it is useless to scale the numerical features and furthermore it is
# both safe and significantly more computationally efficient use an arbitrary
# integer encoding for the categorical variable even if the ordering is
# arbitrary. Therefore we adapt the preprocessing pipeline as follows:
# %%
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
# For each categorical column, extract the list of all possible categories
# in some arbritrary order.
categories = [data[column].unique() for column in data[categorical_columns]]
preprocessor = ColumnTransformer([
('categorical', OrdinalEncoder(categories=categories), categorical_columns),
], remainder="passthrough")
model = make_pipeline(preprocessor, HistGradientBoostingClassifier())
model.fit(data_train, target_train)
print(model.score(data_test, target_test))
# %% [markdown]
# We can observe that we get significantly higher accuracies with the Gradient
# Boosting model. This is often what we observe whenever the dataset has a large
# number of samples and limited number of informative features (e.g. less than
# 1000) with a mix of numerical and categorical variables.
#
# This explains why Gradient Boosted Machines are very popular among datascience
# practitioners who work with tabular data.
#
#
#
#
#
#
#
# %% [markdown]
# ## Exercise 3:
#
# - Check that scaling the numerical features does not impact the speed or
# accuracy of HistGradientBoostingClassifier
# - Check that one-hot encoding the categorical variable does not improve the
# accuracy of HistGradientBoostingClassifier but slows down the training.
#
# Use the dedicated notebook to do this exercise.
| 33.127874
| 181
| 0.742421
|
794ad91e6b4b902022a35fa59f5d0d92a0decdfd
| 802
|
py
|
Python
|
python/batch-compute-with-step-functions/workshop/stack/cicdpipeline_stack.py
|
hy714335634/aws-cdk-examples
|
c66198642985e68e0541a4777d2a8e9acd222b7d
|
[
"Apache-2.0"
] | null | null | null |
python/batch-compute-with-step-functions/workshop/stack/cicdpipeline_stack.py
|
hy714335634/aws-cdk-examples
|
c66198642985e68e0541a4777d2a8e9acd222b7d
|
[
"Apache-2.0"
] | null | null | null |
python/batch-compute-with-step-functions/workshop/stack/cicdpipeline_stack.py
|
hy714335634/aws-cdk-examples
|
c66198642985e68e0541a4777d2a8e9acd222b7d
|
[
"Apache-2.0"
] | null | null | null |
from aws_cdk import (
core
)
from construct.cicdpipeline.cicd_batch import CICDBatch
from construct.cicdpipeline.cicd_web import CICDWeb
class CICDPipelineStack(core.Stack):
def __init__(self, scope: core.Construct, id: str,
UserName="default",
EmailAddress="default",
BatchRepo="default",
WebRepo="default",
WebService="default",
**kwargs
) -> None:
super().__init__(scope, id, **kwargs)
self.My_CICDBatch = CICDBatch(self,
"CICDBatch-" + UserName,
UserName=UserName,
Repo=BatchRepo
)
self.My_CICDWeb = CICDWeb(self,
"CICDWeb-" + UserName,
UserName=UserName,
Repo=WebRepo,
WebService=WebService
)
| 25.870968
| 55
| 0.5798
|
794ad95513d571a04f14ed62d1a3d09cc27d8657
| 1,813
|
py
|
Python
|
python-the-hard-way/21-functions-can-return-something.py
|
Valka7a/python-playground
|
f08d4374f2cec2e8b1afec3753854b1ec10ff480
|
[
"MIT"
] | null | null | null |
python-the-hard-way/21-functions-can-return-something.py
|
Valka7a/python-playground
|
f08d4374f2cec2e8b1afec3753854b1ec10ff480
|
[
"MIT"
] | null | null | null |
python-the-hard-way/21-functions-can-return-something.py
|
Valka7a/python-playground
|
f08d4374f2cec2e8b1afec3753854b1ec10ff480
|
[
"MIT"
] | null | null | null |
# Exercise 21: Functions Can Return Something
def add(a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a - b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d" % (a, b)
return a / b
print "Let's do some math with just functions!"
# Drill 3
age = add(40, 15)
height = subtract(68, 7)
weight = multiply(94, 5)
iq = divide(101, 3)
print "Age: %d, Height: %d, Weight: %d, IQ: %d" % (age, height, weight, iq)
# A puzzle for the extra credit, type it in anyway.
print "Here is a puzzle."
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: ", what, "Can you do it by hand?"
# Study Drills
# 1.If you aren't really sure what 'return' does, try writing a
# few of your own functions and have them return some values.
# You can return anything that you can put to the right of an '='.
# 2. At the end of the scrpt is a puzzle. I'm taking the return
# value of one function and using it as the argument of another
# function. I'm doing this in a chain so that I'm kind of
# creating a formula using the functions. It looks really weird,
# but if you run the script you can see the results. What you
# should do is try to figure out the normal formula that would
# recreate this same set of operations.
# 3. Once you have the formula worked out for the puzzle, get
# in there and see what happens when you modify the parts of
# the functions. Try to change it on purpose to make another
# value.
# 4. Do the inverse. Write a simple formula and use the
# functions in the same way to calculate it.
# Drill 4
next = multiply(age, add(height, subtract(weight, divide(iq, 3))))
print "This becomes from mine formula: ", next, "Can you do it?"
| 31.807018
| 75
| 0.688913
|
794ad96dcb9388fb4d6e61885714b7abe06bf22d
| 2,347
|
py
|
Python
|
imageapp/models.py
|
Edwin-Karanu-Muiruri/django-gallery-karanu
|
06f456189ce1a3a164df1d6ce0971b684159ccfb
|
[
"MIT"
] | null | null | null |
imageapp/models.py
|
Edwin-Karanu-Muiruri/django-gallery-karanu
|
06f456189ce1a3a164df1d6ce0971b684159ccfb
|
[
"MIT"
] | 8
|
2020-05-26T08:37:17.000Z
|
2022-01-13T02:46:44.000Z
|
imageapp/models.py
|
Edwin-Karanu-Muiruri/django-gallery-karanu
|
06f456189ce1a3a164df1d6ce0971b684159ccfb
|
[
"MIT"
] | null | null | null |
from django.db import models
from cloudinary.models import CloudinaryField
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length = 50)
def __str__(self):
return self.name
def save_category(self):
self.save()
def delete_category(self):
self.delete()
@classmethod
def update_category(cls,id,value):
cls.objects.filter(id = id).update(name = value)
class Location(models.Model):
name = models.CharField(max_length = 50)
def __str__(self):
return self.name
def save_location(self):
self.save()
def delete_location(self):
self.delete()
@classmethod
def update_location(cls,id,value):
cls.objects.filter(id = id).update(name = value)
@classmethod
def display_all_locations(cls):
return cls.objects.all()
class Image(models.Model):
image_name = models.CharField(max_length = 50)
image = CloudinaryField('image')
description = models.TextField()
category = models.ForeignKey(Category , on_delete = models.CASCADE,default = 'category')
location = models.ForeignKey(Location, on_delete = models.CASCADE, default = 'location')
def __str__(self):
return self.image_name
def save_image(self):
self.save()
def delete_image():
self.delete()
@classmethod
def get_image_by_id(cls,id):
image = cls.objects.get(id = id)
return image
@classmethod
def search_image(cls,category_search):
try:
searched_category = Category.objects.get(name__icontains = category_search)
images = Image.objects.filter(category = searched_category.id)
return images
except Exception:
return "No images matched that category. Please try another eg. Family, Friends or Places"
@classmethod
def filter_by_location(cls,location_search):
searched_location = Location.objects.get(name = location_search)
images = Image.objects.filter(location = searched_location.id)
return images
@classmethod
def display_all_images(cls):
return cls.objects.all()
@classmethod
def update_image_description(cls,id,value):
cls.objects.filter(id = id).update(description = value)
| 27.940476
| 102
| 0.659139
|
794ad97eb8176cc86f403a372f2d42b4b097b84b
| 1,021
|
py
|
Python
|
molecool/tests/test_measure.py
|
ywang40/molecool
|
4ec6af6d894c3152b1cd9d616e44521b3c6a5a46
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/tests/test_measure.py
|
ywang40/molecool
|
4ec6af6d894c3152b1cd9d616e44521b3c6a5a46
|
[
"BSD-3-Clause"
] | 1
|
2020-12-18T19:24:59.000Z
|
2020-12-18T19:24:59.000Z
|
molecool/tests/test_measure.py
|
ywang40/molecool
|
4ec6af6d894c3152b1cd9d616e44521b3c6a5a46
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tests for the measure module.
"""
import molecool
import numpy as np
import pytest
def test_calculate_distance():
r1 = np.array([0, 0, 0])
r2 = np.array([0, 1, 0])
expected_distance = 1
calculated_diatance = molecool.calculate_distance(r1, r2)
assert expected_distance == calculated_diatance
def test_calculate_angle():
r1 = np.array([0, 0, -1])
r2 = np.array([0, 0, 0])
r3 = np.array([1, 0, 0])
expect_angle = 90
calculated_angle = molecool.calculate_angle(r1, r2, r3, degrees=True)
assert expect_angle == calculated_angle
@pytest.mark.parametrize("p1, p2, p3, expected_angle", [
(np.array([np.sqrt(2)/2, np.sqrt(2)/2, 0]), np.array([0, 0, 0]), np.array([1, 0, 0]), 45),
(np.array([0, 0, -1]), np.array([0, 1, 0]), np.array([1, 0, 0]), 60),
])
def test_calculate_angle_many(p1, p2, p3, expected_angle):
calculated_angle = molecool.calculate_angle(p1, p2, p3, degrees=True)
assert expected_angle == pytest.approx(calculated_angle)
| 24.902439
| 94
| 0.644466
|
794ada23c90178a7c63699908b299622fc11437c
| 8,977
|
py
|
Python
|
libqtile/dgroups.py
|
luanfagu/pyBox
|
a91308a4131dbe244555521f3dd248ff53d20588
|
[
"MIT"
] | null | null | null |
libqtile/dgroups.py
|
luanfagu/pyBox
|
a91308a4131dbe244555521f3dd248ff53d20588
|
[
"MIT"
] | null | null | null |
libqtile/dgroups.py
|
luanfagu/pyBox
|
a91308a4131dbe244555521f3dd248ff53d20588
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011-2012 Florian Mounier
# Copyright (c) 2012-2014 roger
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sebastian Kricner
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import libqtile.hook
from libqtile.backend.base import Static
from libqtile.command import lazy
from libqtile.config import Group, Key, Match, Rule
from libqtile.log_utils import logger
def simple_key_binder(mod, keynames=None):
"""Bind keys to mod+group position or to the keys specified as second argument"""
def func(dgroup):
# unbind all
for key in dgroup.keys[:]:
dgroup.qtile.ungrab_key(key)
dgroup.keys.remove(key)
if keynames:
keys = keynames
else:
# keys 1 to 9 and 0
keys = list(map(str, list(range(1, 10)) + [0]))
# bind all keys
for keyname, group in zip(keys, dgroup.qtile.groups):
name = group.name
key = Key([mod], keyname, lazy.group[name].toscreen())
key_s = Key([mod, "shift"], keyname, lazy.window.togroup(name))
key_c = Key(
[mod, "control"],
keyname,
lazy.group.switch_groups(name)
)
dgroup.keys.append(key)
dgroup.keys.append(key_s)
dgroup.keys.append(key_c)
dgroup.qtile.grab_key(key)
dgroup.qtile.grab_key(key_s)
dgroup.qtile.grab_key(key_c)
return func
class DGroups:
"""Dynamic Groups"""
def __init__(self, qtile, dgroups, key_binder=None, delay=1):
self.qtile = qtile
self.groups = dgroups
self.groups_map = {}
self.rules = []
self.rules_map = {}
self.last_rule_id = 0
for rule in getattr(qtile.config, 'dgroups_app_rules', []):
self.add_rule(rule)
self.keys = []
self.key_binder = key_binder
self._setup_hooks()
self._setup_groups()
self.delay = delay
self.timeout = {}
def add_rule(self, rule, last=True):
rule_id = self.last_rule_id
self.rules_map[rule_id] = rule
if last:
self.rules.append(rule)
else:
self.rules.insert(0, rule)
self.last_rule_id += 1
return rule_id
def remove_rule(self, rule_id):
rule = self.rules_map.get(rule_id)
if rule:
self.rules.remove(rule)
del self.rules_map[rule_id]
else:
logger.warning('Rule "%s" not found', rule_id)
def add_dgroup(self, group, start=False):
self.groups_map[group.name] = group
rule = Rule(group.matches, group=group.name)
self.rules.append(rule)
if start:
self.qtile.add_group(group.name, group.layout, group.layouts, group.label)
def _setup_groups(self):
for group in self.groups:
self.add_dgroup(group, group.init)
if group.spawn and not self.qtile.no_spawn:
if isinstance(group.spawn, str):
spawns = [group.spawn]
else:
spawns = group.spawn
for spawn in spawns:
pid = self.qtile.cmd_spawn(spawn)
self.add_rule(Rule(Match(net_wm_pid=pid), group.name))
def _setup_hooks(self):
libqtile.hook.subscribe.addgroup(self._addgroup)
libqtile.hook.subscribe.client_new(self._add)
libqtile.hook.subscribe.client_killed(self._del)
if self.key_binder:
libqtile.hook.subscribe.setgroup(
lambda: self.key_binder(self)
)
libqtile.hook.subscribe.changegroup(
lambda: self.key_binder(self)
)
def _addgroup(self, group_name):
if group_name not in self.groups_map:
self.add_dgroup(Group(group_name, persist=False))
def _add(self, client):
if client in self.timeout:
logger.info('Remove dgroup source')
self.timeout.pop(client).cancel()
# ignore static windows
if isinstance(client, Static):
return
# ignore windows whose groups is already set (e.g. from another hook or
# when it was set on state restore)
if client.group is not None:
return
group_set = False
intrusive = False
for rule in self.rules:
# Matching Rules
if rule.matches(client):
if rule.group:
if rule.group in self.groups_map:
layout = self.groups_map[rule.group].layout
layouts = self.groups_map[rule.group].layouts
label = self.groups_map[rule.group].label
else:
layout = None
layouts = None
label = None
group_added = self.qtile.add_group(rule.group, layout, layouts, label)
client.togroup(rule.group)
group_set = True
group_obj = self.qtile.groups_map[rule.group]
group = self.groups_map.get(rule.group)
if group and group_added:
for k, v in list(group.layout_opts.items()):
if isinstance(v, collections.Callable):
v(group_obj.layout)
else:
setattr(group_obj.layout, k, v)
affinity = group.screen_affinity
if affinity and len(self.qtile.screens) > affinity:
self.qtile.screens[affinity].set_group(group_obj)
if rule.float:
client.enablefloating()
if rule.intrusive:
intrusive = rule.intrusive
if rule.break_on_match:
break
# If app doesn't have a group
if not group_set:
current_group = self.qtile.current_group.name
if current_group in self.groups_map and \
self.groups_map[current_group].exclusive and \
not intrusive:
wm_class = client.get_wm_class()
if wm_class:
if len(wm_class) > 1:
wm_class = wm_class[1]
else:
wm_class = wm_class[0]
group_name = wm_class
else:
group_name = client.name or 'Unnamed'
self.add_dgroup(Group(group_name, persist=False), start=True)
client.togroup(group_name)
self.sort_groups()
def sort_groups(self):
grps = self.qtile.groups
sorted_grps = sorted(grps, key=lambda g: self.groups_map[g.name].position)
if grps != sorted_grps:
self.qtile.groups = sorted_grps
libqtile.hook.fire("changegroup")
def _del(self, client):
# ignore static windows
if isinstance(client, Static):
return
group = client.group
def delete_client():
# Delete group if empty and don't persist
if group and group.name in self.groups_map and \
not self.groups_map[group.name].persist and \
len(group.windows) <= 0:
self.qtile.delete_group(group.name)
self.sort_groups()
del self.timeout[client]
# Wait the delay until really delete the group
logger.info('Add dgroup timer with delay {}s'.format(self.delay))
self.timeout[client] = self.qtile.call_later(
self.delay, delete_client
)
| 34.929961
| 90
| 0.574134
|
794adae09b3013480d9b5a52fdd962a0bc51d495
| 4,982
|
py
|
Python
|
TimeSformer/timesformer/utils/c2_model_loading.py
|
balaganeshmohan/Emotion-recognition
|
ad4816226598155c273d99fa4a4ca80953adcaa1
|
[
"MIT"
] | null | null | null |
TimeSformer/timesformer/utils/c2_model_loading.py
|
balaganeshmohan/Emotion-recognition
|
ad4816226598155c273d99fa4a4ca80953adcaa1
|
[
"MIT"
] | null | null | null |
TimeSformer/timesformer/utils/c2_model_loading.py
|
balaganeshmohan/Emotion-recognition
|
ad4816226598155c273d99fa4a4ca80953adcaa1
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Caffe2 to PyTorch checkpoint name converting utility."""
import re
def get_name_convert_func():
"""
Get the function to convert Caffe2 layer names to PyTorch layer names.
Returns:
(func): function to convert parameter name from Caffe2 format to PyTorch
format.
"""
pairs = [
# ------------------------------------------------------------
# 'nonlocal_conv3_1_theta_w' -> 's3.pathway0_nonlocal3.conv_g.weight'
[
r"^nonlocal_conv([0-9]+)_([0-9]+)_(.*)",
r"s\1.pathway0_nonlocal\2_\3",
],
# 'theta' -> 'conv_theta'
[r"^(.*)_nonlocal([0-9]+)_(theta)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'g' -> 'conv_g'
[r"^(.*)_nonlocal([0-9]+)_(g)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'phi' -> 'conv_phi'
[r"^(.*)_nonlocal([0-9]+)_(phi)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'out' -> 'conv_out'
[r"^(.*)_nonlocal([0-9]+)_(out)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'nonlocal_conv4_5_bn_s' -> 's4.pathway0_nonlocal3.bn.weight'
[r"^(.*)_nonlocal([0-9]+)_(bn)_(.*)", r"\1_nonlocal\2.\3.\4"],
# ------------------------------------------------------------
# 't_pool1_subsample_bn' -> 's1_fuse.conv_f2s.bn.running_mean'
[r"^t_pool1_subsample_bn_(.*)", r"s1_fuse.bn.\1"],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[r"^t_pool1_subsample_(.*)", r"s1_fuse.conv_f2s.\1"],
# 't_res4_5_branch2c_bn_subsample_bn_rm' -> 's4_fuse.conv_f2s.bias'
[
r"^t_res([0-9]+)_([0-9]+)_branch2c_bn_subsample_bn_(.*)",
r"s\1_fuse.bn.\3",
],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[
r"^t_res([0-9]+)_([0-9]+)_branch2c_bn_subsample_(.*)",
r"s\1_fuse.conv_f2s.\3",
],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^res([0-9]+)_([0-9]+)_branch([0-9]+)([a-z])_(.*)",
r"s\1.pathway0_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^res_conv1_bn_(.*)", r"s1.pathway0_stem.bn.\1"],
# 'conv1_xy_w_momentum' -> 's1.pathway0_stem.conv_xy.'
[r"^conv1_xy(.*)", r"s1.pathway0_stem.conv_xy\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^res([0-9]+)_([0-9]+)_branch([0-9]+)_(.*)",
r"s\1.pathway0_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^res_conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^t_res([0-9]+)_([0-9]+)_branch([0-9]+)([a-z])_(.*)",
r"s\1.pathway1_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^t_res_conv1_bn_(.*)", r"s1.pathway1_stem.bn.\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^t_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^t_res([0-9]+)_([0-9]+)_branch([0-9]+)_(.*)",
r"s\1.pathway1_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^t_res_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# ------------------------------------------------------------
# pred_ -> head.projection.
[r"pred_(.*)", r"head.projection.\1"],
# '.b_bn_fc' -> '.se.fc'
[r"(.*)b_bn_fc(.*)", r"\1se.fc\2"],
# conv_5 -> head.conv_5.
[r"conv_5(.*)", r"head.conv_5\1"],
# conv_5 -> head.conv_5.
[r"lin_5(.*)", r"head.lin_5\1"],
# '.bn_b' -> '.weight'
[r"(.*)bn.b\Z", r"\1bn.bias"],
# '.bn_s' -> '.weight'
[r"(.*)bn.s\Z", r"\1bn.weight"],
# '_bn_rm' -> '.running_mean'
[r"(.*)bn.rm\Z", r"\1bn.running_mean"],
# '_bn_riv' -> '.running_var'
[r"(.*)bn.riv\Z", r"\1bn.running_var"],
# '_b' -> '.bias'
[r"(.*)[\._]b\Z", r"\1.bias"],
# '_w' -> '.weight'
[r"(.*)[\._]w\Z", r"\1.weight"],
]
def convert_caffe2_name_to_pytorch(caffe2_layer_name):
"""
Convert the caffe2_layer_name to pytorch format by apply the list of
regular expressions.
Args:
caffe2_layer_name (str): caffe2 layer name.
Returns:
(str): pytorch layer name.
"""
for source, dest in pairs:
caffe2_layer_name = re.sub(source, dest, caffe2_layer_name)
return caffe2_layer_name
return convert_caffe2_name_to_pytorch
| 41.516667
| 80
| 0.483541
|
794adae9d3325381cb36e9c7ba403c9ad78226a0
| 4,017
|
py
|
Python
|
venv/Lib/site-packages/ldap3/extend/microsoft/dirSync.py
|
pileofscraps/wordcloud_backend
|
2b2feff5e58de2bbdb0393d78d703f21ee1cf3ba
|
[
"MIT"
] | 4
|
2021-01-31T20:30:40.000Z
|
2022-02-19T08:56:28.000Z
|
venv/Lib/site-packages/ldap3/extend/microsoft/dirSync.py
|
pileofscraps/wordcloud_backend
|
2b2feff5e58de2bbdb0393d78d703f21ee1cf3ba
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/ldap3/extend/microsoft/dirSync.py
|
pileofscraps/wordcloud_backend
|
2b2feff5e58de2bbdb0393d78d703f21ee1cf3ba
|
[
"MIT"
] | 6
|
2021-08-24T19:28:52.000Z
|
2022-02-20T18:21:34.000Z
|
"""
"""
# Created on 2015.10.21
#
# Author: Giovanni Cannata
#
# Copyright 2015 - 2018 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from ...core.exceptions import LDAPExtensionError
from ...protocol.microsoft import dir_sync_control, extended_dn_control, show_deleted_control
from ... import SUBTREE, DEREF_NEVER
from ...utils.dn import safe_dn
class DirSync(object):
def __init__(self,
connection,
sync_base,
sync_filter,
attributes,
cookie,
object_security,
ancestors_first,
public_data_only,
incremental_values,
max_length,
hex_guid
):
self.connection = connection
if self.connection.check_names and sync_base:
self. base = safe_dn(sync_base)
else:
self.base = sync_base
self.filter = sync_filter
self.attributes = attributes
self.cookie = cookie
self.object_security = object_security
self.ancestors_first = ancestors_first
self.public_data_only = public_data_only
self.incremental_values = incremental_values
self.max_length = max_length
self.hex_guid = hex_guid
self.more_results = True
def loop(self):
result = self.connection.search(search_base=self.base,
search_filter=self.filter,
search_scope=SUBTREE,
attributes=self.attributes,
dereference_aliases=DEREF_NEVER,
controls=[dir_sync_control(criticality=True,
object_security=self.object_security,
ancestors_first=self.ancestors_first,
public_data_only=self.public_data_only,
incremental_values=self.incremental_values,
max_length=self.max_length, cookie=self.cookie),
extended_dn_control(criticality=False, hex_format=self.hex_guid),
show_deleted_control(criticality=False)]
)
if not self.connection.strategy.sync:
response, result = self.connection.get_response(result)
else:
response = self.connection.response
result = self.connection.result
if result['description'] == 'success' and 'controls' in result and '1.2.840.113556.1.4.841' in result['controls']:
self.more_results = result['controls']['1.2.840.113556.1.4.841']['value']['more_results']
self.cookie = result['controls']['1.2.840.113556.1.4.841']['value']['cookie']
return response
elif 'controls' in result:
raise LDAPExtensionError('Missing DirSync control in response from server')
else:
raise LDAPExtensionError('error %r in DirSync' % result)
| 43.663043
| 122
| 0.564849
|
794adbf916a38d41b39eefd1c1009715f429f0a1
| 3,522
|
py
|
Python
|
saleor/graphql/checkout/tests/deprecated/test_checkout_line_delete.py
|
SlashKing/saleor
|
bdd78044d542ef5650af7f5c0fd177001661c5b2
|
[
"CC-BY-4.0"
] | 1
|
2022-02-21T07:17:08.000Z
|
2022-02-21T07:17:08.000Z
|
saleor/graphql/checkout/tests/deprecated/test_checkout_line_delete.py
|
SlashKing/saleor
|
bdd78044d542ef5650af7f5c0fd177001661c5b2
|
[
"CC-BY-4.0"
] | 81
|
2021-10-11T04:26:07.000Z
|
2022-03-28T04:46:43.000Z
|
saleor/graphql/checkout/tests/deprecated/test_checkout_line_delete.py
|
SlashKing/saleor
|
bdd78044d542ef5650af7f5c0fd177001661c5b2
|
[
"CC-BY-4.0"
] | 1
|
2022-02-16T22:00:59.000Z
|
2022-02-16T22:00:59.000Z
|
from unittest import mock
import graphene
from .....checkout.error_codes import CheckoutErrorCode
from .....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from .....checkout.utils import calculate_checkout_quantity
from .....plugins.manager import get_plugins_manager
from ....tests.utils import get_graphql_content
from ...mutations import update_checkout_shipping_method_if_invalid
MUTATION_CHECKOUT_LINES_DELETE = """
mutation checkoutLineDelete($checkoutId: ID, $token: UUID, $lineId: ID!) {
checkoutLineDelete(checkoutId: $checkoutId, token: $token lineId: $lineId) {
checkout {
token
lines {
quantity
variant {
id
}
}
}
errors {
field
message
code
}
}
}
"""
@mock.patch(
"saleor.graphql.checkout.mutations.update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_line_delete_by_id(
mocked_update_shipping_method, user_api_client, checkout_with_item
):
checkout = checkout_with_item
lines, _ = fetch_checkout_lines(checkout)
assert calculate_checkout_quantity(lines) == 3
assert checkout.lines.count() == 1
line = checkout.lines.first()
assert line.quantity == 3
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
variables = {"checkoutId": checkout_id, "lineId": line_id}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLineDelete"]
assert not data["errors"]
checkout.refresh_from_db()
lines, _ = fetch_checkout_lines(checkout)
assert checkout.lines.count() == 0
assert calculate_checkout_quantity(lines) == 0
manager = get_plugins_manager()
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
def test_checkout_line_delete_neither_token_and_id_given(
user_api_client, checkout_with_item
):
checkout = checkout_with_item
line = checkout.lines.first()
line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
variables = {"lineId": line_id}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLineDelete"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
def test_checkout_line_delete_both_token_and_id_given(
user_api_client, checkout_with_item
):
checkout = checkout_with_item
line = checkout.lines.first()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
line_id = graphene.Node.to_global_id("CheckoutLine", line.pk)
variables = {"checkoutId": checkout_id, "token": checkout.token, "lineId": line_id}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_DELETE, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLineDelete"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
| 34.871287
| 87
| 0.703861
|
794adc05261b372410d8a043aa6918e595e6bfe2
| 631
|
py
|
Python
|
setup.py
|
frcl/gitdir
|
65d85ca01adf506374a75a6b56d7b985c0d7bd0b
|
[
"MIT"
] | null | null | null |
setup.py
|
frcl/gitdir
|
65d85ca01adf506374a75a6b56d7b985c0d7bd0b
|
[
"MIT"
] | null | null | null |
setup.py
|
frcl/gitdir
|
65d85ca01adf506374a75a6b56d7b985c0d7bd0b
|
[
"MIT"
] | 1
|
2021-04-03T13:41:58.000Z
|
2021-04-03T13:41:58.000Z
|
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='gitdir',
version='1.2.4',
author='Siddharth Dushantha',
author_email='siddharth.dushantha@gmail.com',
description='Download a single directory/folder from a GitHub repo',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/sdushantha/gitdir',
packages=setuptools.find_packages(),
entry_points={
'console_scripts': [
'gitdir = gitdir.gitdir:main',
]
},
install_requires=['colorama~=0.4']
)
| 27.434783
| 72
| 0.671949
|
794adc5e5f593aae0f60d1cb70d8bff5a5df7877
| 3,092
|
py
|
Python
|
pontoon/base/urls.py
|
rhencke/pontoon
|
d530830acd4e03f3e29cae3273a5fede9f246499
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/base/urls.py
|
rhencke/pontoon
|
d530830acd4e03f3e29cae3273a5fede9f246499
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/base/urls.py
|
rhencke/pontoon
|
d530830acd4e03f3e29cae3273a5fede9f246499
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url
from django.views.generic import RedirectView, TemplateView
import views
urlpatterns = [
# Home
url(r'^$', views.home, name='pontoon.home'),
# Terms
url(r'^terms/$',
TemplateView.as_view(template_name='terms.html'),
name='pontoon.terms'),
# TRANSLATE URLs
# Legacy: Translate project's page
url(r'^locale/(?P<locale>[A-Za-z0-9\-\@\.]+)/project/(?P<slug>.+)' +
'/page/(?P<page>.+)/$',
RedirectView.as_view(url="/%(locale)s/%(slug)s/%(page)s/", permanent=True)),
# Legacy: Translate project
url(r'^locale/(?P<locale>[A-Za-z0-9\-\@\.]+)/project/(?P<slug>.+)/$',
RedirectView.as_view(url="/%(locale)s/%(slug)s/", permanent=True)),
# AJAX: Get locale details
url(r'^teams/(?P<locale>[A-Za-z0-9\-\@\.]+)/projects/$',
views.locale_projects,
name='pontoon.locale.projects'),
# AJAX: Get locale stats used in All Resources part
url(r'^teams/(?P<locale>[A-Za-z0-9\-\@\.]+)/stats/$',
views.locale_stats,
name='pontoon.locale.stats'),
# AJAX: Get locale-project pages/paths with stats
url(r'^(?P<locale>[A-Za-z0-9\-\@\.]+)/(?P<slug>[\w-]+)/parts/$',
views.locale_project_parts,
name='pontoon.locale.project.parts'),
# AJAX: Get authors and time range data
url(r'^(?P<locale>[A-Za-z0-9\-\@\.]+)/(?P<slug>[\w-]+)/(?P<part>.+)/authors-and-time-range/$',
views.authors_and_time_range,
name='pontoon.authors.and.time.range'),
# Locale-agnostic links
url(r'^projects/(?P<slug>[\w-]+)/(?P<part>.+)/$',
views.translate_locale_agnostic,
name='pontoon.translate.locale.agnostic'),
# Translate project
url(r'^(?P<locale>[A-Za-z0-9\-\@\.]+)/(?P<slug>[\w-]+)/(?P<part>.+)/$',
views.translate,
name='pontoon.translate'),
# Download translation memory
url(r'^(?P<locale>[A-Za-z0-9\-\@\.]+)/(?P<slug>[\w-]+)/(?P<filename>.+)\.tmx$',
views.download_translation_memory,
name='pontoon.download_tmx'),
# AJAX
url(r'^get-entities/', views.entities,
name='pontoon.entities'),
url(r'^update/', views.update_translation,
name='pontoon.update'),
url(r'^perform-checks/', views.perform_checks,
name='pontoon.perform.checks'),
url(r'^get-history/', views.get_translation_history,
name='pontoon.get_history'),
url(r'^unapprove-translation/', views.unapprove_translation,
name='pontoon.unapprove_translation'),
url(r'^reject-translation/', views.reject_translation,
name='pontoon.reject_translation'),
url(r'^unreject-translation/', views.unreject_translation,
name='pontoon.unreject_translation'),
url(r'^other-locales/', views.get_translations_from_other_locales,
name='pontoon.other_locales'),
url(r'^download/', views.download,
name='pontoon.download'),
url(r'^upload/', views.upload,
name='pontoon.upload'),
url(r'^update-tour-status/', views.update_tour_status,
name='pontoon.update_tour_status'),
]
| 36.376471
| 98
| 0.611902
|
794adc63730a7a9f443e4fd343d4d02a17fe901b
| 1,159
|
py
|
Python
|
examples/cutensor/elementwise_binary.py
|
Dahlia-Chehata/cupy
|
1005f55075f89aa17e60074aaa6494ff8d033251
|
[
"MIT"
] | null | null | null |
examples/cutensor/elementwise_binary.py
|
Dahlia-Chehata/cupy
|
1005f55075f89aa17e60074aaa6494ff8d033251
|
[
"MIT"
] | null | null | null |
examples/cutensor/elementwise_binary.py
|
Dahlia-Chehata/cupy
|
1005f55075f89aa17e60074aaa6494ff8d033251
|
[
"MIT"
] | null | null | null |
#
# D_{x,y,z} = alpha * A_{z,y,x} + gamma * C_{x,y,z}
#
import numpy
import cupy
from cupy import cutensor
import cupyx.time
dtype = numpy.float32
mode_a = ('z', 'y', 'x')
mode_c = ('x', 'y', 'z')
extent = {'x': 400, 'y': 200, 'z': 300}
a = cupy.random.random([extent[i] for i in mode_a])
c = cupy.random.random([extent[i] for i in mode_c])
a = a.astype(dtype)
c = c.astype(dtype)
desc_a = cutensor.create_tensor_descriptor(a)
desc_c = cutensor.create_tensor_descriptor(c)
mode_a = cutensor.create_mode(*mode_a)
mode_c = cutensor.create_mode(*mode_c)
alpha = numpy.array(1.1, dtype)
gamma = numpy.array(1.3, dtype)
perf = cupyx.time.repeat(
cutensor.elementwise_binary,
(alpha, a, desc_a, mode_a, gamma, c, desc_c, mode_c),
n_warmup=1, n_repeat=5)
itemsize = numpy.dtype(dtype).itemsize
transfer_byte = a.size * itemsize
if alpha.item() != 0.0:
transfer_byte += a.size * itemsize
if gamma.item() != 0.0:
transfer_byte += c.size * itemsize
elapsed = perf.gpu_times.mean()
gbs = transfer_byte / elapsed / 1e9
print('dtype: {}'.format(numpy.dtype(dtype).name))
print(perf)
print('effective memory bandwidth (GB/s): {}'.format(gbs))
| 24.659574
| 58
| 0.679034
|
794adcfb959d52f6b697bbd2b7040e3254c6436c
| 820
|
py
|
Python
|
shop/shop_backend/api/migrations/0010_profile.py
|
CSchool/lksh-web-services
|
2558bd7a07f8cb501634e5eb1f37345a38d34f2e
|
[
"Apache-2.0"
] | null | null | null |
shop/shop_backend/api/migrations/0010_profile.py
|
CSchool/lksh-web-services
|
2558bd7a07f8cb501634e5eb1f37345a38d34f2e
|
[
"Apache-2.0"
] | null | null | null |
shop/shop_backend/api/migrations/0010_profile.py
|
CSchool/lksh-web-services
|
2558bd7a07f8cb501634e5eb1f37345a38d34f2e
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-05 10:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0009_auto_20210805_1005'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tokens', models.IntegerField(default=0, verbose_name='tokens')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='userprofile', to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.8
| 149
| 0.65122
|
794addabf568faf10fc1c581e5bfbf36d9e7fc3c
| 23,857
|
py
|
Python
|
adbForTest/Utils/adbUtils.py
|
LiuTianen/PackManage
|
4b067954cc223baa14569a6f1517954b9cdb968f
|
[
"MIT"
] | null | null | null |
adbForTest/Utils/adbUtils.py
|
LiuTianen/PackManage
|
4b067954cc223baa14569a6f1517954b9cdb968f
|
[
"MIT"
] | null | null | null |
adbForTest/Utils/adbUtils.py
|
LiuTianen/PackManage
|
4b067954cc223baa14569a6f1517954b9cdb968f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
import os
import platform
import re
import time
import subprocess
class AdbTools(object):
def __init__(self, device_id=''):
self.__system = platform.system()
self.__find = ''
self.__command = ''
self.__device_id = device_id
self.__get_find()
self.__check_adb()
self.__connection_devices()
def __get_find(self):
"""
判断系统类型,windows使用findstr,linux使用grep
:return:
"""
if self.__system is "Windows":
self.__find = "findstr"
else:
self.__find = "grep"
def __check_adb(self):
"""
检查adb
判断是否设置环境变量ANDROID_HOME
:return:
"""
if "ANDROID_HOME" in os.environ:
if self.__system == "Windows":
path = os.path.join(os.environ["ANDROID_HOME"], "platform-tools", "adb.exe")
if os.path.exists(path):
self.__command = path
else:
raise EnvironmentError(
"Adb not found in $ANDROID_HOME path: %s." % os.environ["ANDROID_HOME"])
else:
path = os.path.join(os.environ["ANDROID_HOME"], "platform-tools", "adb")
if os.path.exists(path):
self.__command = path
else:
raise EnvironmentError(
"Adb not found in $ANDROID_HOME path: %s." % os.environ["ANDROID_HOME"])
else:
raise EnvironmentError(
"Adb not found in $ANDROID_HOME path: %s." % os.environ["ANDROID_HOME"])
def __connection_devices(self):
"""
连接指定设备,单个设备可不传device_id
:return:
"""
if self.__device_id == "":
return
self.__device_id = "-s %s" % self.__device_id
def adb(self, args):
"""
执行adb命令
:param args:参数
:return:
"""
cmd = "%s %s %s" % (self.__command, self.__device_id, str(args))
return os.popen(cmd)
def shell(self, args):
"""
执行adb shell命令
:param args:参数
:return:
"""
cmd = "%s %s shell %s" % (self.__command, self.__device_id, str(args))
return os.popen(cmd)
def getOnlineDevices(self):
devices = str(self.adb("devices").read())
online = re.findall(r'(.*?)\s+device\s', devices)
return online
def get_current_application(self):
"""
获取当前运行的应用信息
:return:
"""
return self.shell('dumpsys window w | %s \/ | %s name=' % (self.__find, self.__find)).read()
def get_current_package(self):
"""
获取当前运行app包名
:return:
"""
reg = re.compile(r'name=(.+?)/')
return re.findall(reg, self.get_current_application())[0]
def get_current_activity(self):
"""
获取当前运行activity
:return: package/activity
"""
reg = re.compile(r'name=(.+?)\)')
return re.findall(reg, self.get_current_application())[0]
def __get_process(self, package_name):
"""
获取进程信息
:param package_name:
:return:
"""
if self.__system is "Windows":
pid_command = self.shell("ps | %s %s$" % (self.__find, package_name)).read()
else:
pid_command = self.shell("ps | %s -w %s" % (self.__find, package_name)).read()
return pid_command
def process_exists(self, package_name):
"""
返回进程是否存在
:param package_name:
:return:
"""
process = self.__get_process(package_name)
return package_name in process
def get_pid(self, package_name):
"""
获取pid
:return:
"""
pid_command = self.__get_process(package_name)
if pid_command == '':
print("The process doesn't exist.")
return pid_command
req = re.compile(r"\d+")
result = str(pid_command).split()
result.remove(result[0])
return req.findall(" ".join(result))[0]
def get_uid(self, pid):
"""
获取uid
:param pid:
:return:
"""
result = self.shell("cat /proc/%s/status" % pid).readlines()
for i in result:
if 'uid' in i.lower():
return i.split()[1]
def get_flow_data_tcp(self, uid):
"""
获取应用tcp流量
:return:(接收, 发送)
"""
tcp_rcv = self.shell("cat proc/uid_stat/%s/tcp_rcv" % uid).read().split()[0]
tcp_snd = self.shell("cat proc/uid_stat/%s/tcp_snd" % uid).read().split()[0]
return tcp_rcv, tcp_snd
def get_flow_data_all(self, uid):
"""
获取应用流量全部数据
包含该应用多个进程的所有数据 tcp udp等
(rx_bytes, tx_bytes) >> (接收, 发送)
:param uid:
:return:list(dict)
"""
all_data = []
d = {}
data = self.shell("cat /proc/net/xt_qtaguid/stats | %s %s" % (self.__find, uid)).readlines()
for i in data:
if not i.startswith('\n'):
item = i.strip().split()
d['idx'] = item[0]
d['iface'] = item[1]
d['acct_tag_hex'] = item[2]
d['uid_tag_int'] = item[3]
d['cnt_set'] = item[4]
d['rx_bytes'] = item[5]
d['rx_packets'] = item[6]
d['tx_bytes'] = item[7]
d['tx_packets'] = item[8]
d['rx_tcp_bytes'] = item[9]
d['rx_tcp_packets'] = item[10]
d['rx_udp_bytes'] = item[11]
d['rx_udp_packets'] = item[12]
d['rx_other_bytes'] = item[13]
d['rx_other_packets'] = item[14]
d['tx_tcp_bytes'] = item[15]
d['tx_tcp_packets'] = item[16]
d['tx_udp_bytes'] = item[17]
d['tx_udp_packets'] = item[18]
d['tx_other_bytes'] = item[19]
d['tx_other_packets'] = item[20]
all_data.append(d)
d = {}
return all_data
def dump_apk_launch(self, path):
"""
dump apk文件
:param path: apk路径
:return:
"""
# 检查build-tools是否添加到环境变量中
# 需要用到里面的aapt命令
l = os.environ['PATH'].split(';')
build_tools = False
for i in l:
if 'build-tools' in i:
build_tools = True
if not build_tools:
raise EnvironmentError("ANDROID_HOME BUILD-TOOLS COMMAND NOT FOUND.\nPlease set the environment variable.")
cmd = ('aapt dump badging %s' % (path,)) + (' | %s launchable ' %(self.__find))
result = ""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
output = str(output, encoding='utf8')
if output != "":
result = output.split("'")[1]
return result
@staticmethod
def dump_apk_name(path):
"""
dump apk文件
:param path: apk路径
:return:
"""
# 检查build-tools是否添加到环境变量中
# 需要用到里面的aapt命令
l = os.environ['PATH'].split(';')
build_tools = False
for i in l:
if 'build-tools' in i:
build_tools = True
if not build_tools:
raise EnvironmentError("ANDROID_HOME BUILD-TOOLS COMMAND NOT FOUND.\nPlease set the environment variable.")
cmd = ('aapt dump badging %s' % (path,)) + " | findstr package:"
result = ""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
output = str(output, encoding='utf8')
if output != "":
result = output.split("'")[1]
return result
@staticmethod
def dump_xml(path, filename):
"""
dump apk xml文件
:return:
"""
return os.popen('aapt dump xmlstrings %s %s' % (path, filename))
def uiautomator_dump(self):
"""
获取屏幕uiautomator xml文件
:return:
"""
return self.shell('uiautomator dump').read().split()[-1]
def pull(self, source, target):
"""
从手机端拉取文件到电脑端
:return:
"""
self.adb('pull %s %s' % (source, target))
def push(self, source, target):
"""
从电脑端推送文件到手机端
:param source:
:param target:
:return:
"""
self.adb('push %s %s' % (source, target))
def remove(self, path):
"""
从手机端删除文件
:return:
"""
self.shell('rm %s' % (path,))
def clear_app_data(self, package):
"""
清理应用数据
:return:
"""
self.shell('pm clear %s' % (package,))
def install(self, path):
"""
安装apk文件
:return:
"""
# adb install 安装错误常见列表
errors = {'INSTALL_FAILED_ALREADY_EXISTS': '程序已经存在',
'INSTALL_DEVICES_NOT_FOUND': '找不到设备',
'INSTALL_FAILED_DEVICE_OFFLINE': '设备离线',
'INSTALL_FAILED_INVALID_APK': '无效的APK',
'INSTALL_FAILED_INVALID_URI': '无效的链接',
'INSTALL_FAILED_INSUFFICIENT_STORAGE': '没有足够的存储空间',
'INSTALL_FAILED_DUPLICATE_PACKAGE': '已存在同名程序',
'INSTALL_FAILED_NO_SHARED_USER': '要求的共享用户不存在',
'INSTALL_FAILED_UPDATE_INCOMPATIBLE': '版本不能共存',
'INSTALL_FAILED_SHARED_USER_INCOMPATIBLE': '需求的共享用户签名错误',
'INSTALL_FAILED_MISSING_SHARED_LIBRARY': '需求的共享库已丢失',
'INSTALL_FAILED_REPLACE_COULDNT_DELETE': '需求的共享库无效',
'INSTALL_FAILED_DEXOPT': 'dex优化验证失败',
'INSTALL_FAILED_DEVICE_NOSPACE': '手机存储空间不足导致apk拷贝失败',
'INSTALL_FAILED_DEVICE_COPY_FAILED': '文件拷贝失败',
'INSTALL_FAILED_OLDER_SDK': '系统版本过旧',
'INSTALL_FAILED_CONFLICTING_PROVIDER': '存在同名的内容提供者',
'INSTALL_FAILED_NEWER_SDK': '系统版本过新',
'INSTALL_FAILED_TEST_ONLY': '调用者不被允许测试的测试程序',
'INSTALL_FAILED_CPU_ABI_INCOMPATIBLE': '包含的本机代码不兼容',
'CPU_ABIINSTALL_FAILED_MISSING_FEATURE': '使用了一个无效的特性',
'INSTALL_FAILED_CONTAINER_ERROR': 'SD卡访问失败',
'INSTALL_FAILED_INVALID_INSTALL_LOCATION': '无效的安装路径',
'INSTALL_FAILED_MEDIA_UNAVAILABLE': 'SD卡不存在',
'INSTALL_FAILED_INTERNAL_ERROR': '系统问题导致安装失败',
'INSTALL_PARSE_FAILED_NO_CERTIFICATES': '文件未通过认证 >> 设置开启未知来源',
'INSTALL_PARSE_FAILED_INCONSISTENT_CERTIFICATES': '文件认证不一致 >> 先卸载原来的再安装',
'INSTALL_FAILED_INVALID_ZIP_FILE': '非法的zip文件 >> 先卸载原来的再安装',
'INSTALL_CANCELED_BY_USER': '需要用户确认才可进行安装',
'INSTALL_FAILED_VERIFICATION_FAILURE': '验证失败 >> 尝试重启手机',
'DEFAULT': '未知错误'
}
print('Installing...')
l = self.adb('install -r %s' % (path,)).read()
if 'Success' in l:
print('Install Success')
if 'Failure' in l:
reg = re.compile('\\[(.+?)\\]')
key = re.findall(reg, l)[0]
try:
print('Install Failure >> %s' % errors[key])
except KeyError:
print('Install Failure >> %s' % key)
return l
def uninstall(self, package):
"""
卸载apk
:param package: 包名
:return:
"""
print('Uninstalling...')
l = self.adb('uninstall %s' % (package,)).read()
print(l)
def screenshot(self, target_path=''):
"""
手机截图
:param target_path: 目标路径
:return:
"""
format_time = time.strftime('%Y%m%d%H%M%S')
self.shell('screencap -p /sdcard/%s.png' % (format_time,))
time.sleep(1)
if target_path == '':
self.pull('/sdcard/%s.png' % (format_time,), os.path.expanduser('~'))
else:
self.pull('/sdcard/%s.png' % (format_time,), target_path)
self.remove('/sdcard/%s.png' % (format_time,))
def get_cache_logcat(self):
"""
导出缓存日志
:return:
"""
return self.adb('logcat -v time -d')
def get_logcat_tag(self,logTag):
"""
导出缓存日志
:return:
"""
return self.adb('logcat %s' %(logTag)).read().strip()
def get_crash_logcat(self):
"""
导出崩溃日志
:return:
"""
return self.adb('logcat -v time -d | %s AndroidRuntime' % (self.__find,))
def clear_cache_logcat(self):
"""
清理缓存区日志
:return:
"""
self.adb('logcat -c')
def get_top(self):
"""
获取运行中的APP进程信息
:return:
"""
return self.shell('top -m 10').read().strip()
def get_device_time(self):
"""
获取设备时间
:return:
"""
return self.shell('date').read().strip()
def ls(self, command):
"""
shell ls命令
:return:
"""
return self.shell('ls %s' % (command,)).readlines()
def file_exists(self, target):
"""
判断文件在目标路径是否存在
:return:
"""
l = self.ls(target)
for i in l:
if i.strip() == target:
return True
return False
def is_install(self, target_app):
"""
判断目标app在设备上是否已安装
:param target_app: 目标app包名
:return: bool
"""
return target_app in self.shell('pm list packages %s' % (target_app,)).read()
def get_device_model(self):
"""
获取设备型号
:return:
"""
return self.shell('getprop ro.product.model').read().strip()
def get_device_id(self):
"""
获取设备id
:return:
"""
return self.adb('get-serialno').read().strip()
def get_device_android_version(self):
"""
获取设备Android版本
:return:
"""
return self.shell('getprop ro.build.version.release').read().strip()
def get_device_sdk_version(self):
"""
获取设备SDK版本
:return:
"""
return self.shell('getprop ro.build.version.sdk').read().strip()
def get_device_mac_address(self):
"""
获取设备MAC地址
:return:
"""
return self.shell('cat /sys/class/net/wlan0/address').read().strip()
def get_device_ip_address(self):
"""
获取设备IP地址
pass: 适用WIFI 蜂窝数据
:return:
"""
if not self.get_wifi_state() and not self.get_data_state():
return
l = self.shell('ip addr | %s global' % self.__find).read()
reg = re.compile('\d+\.\d+\.\d+\.\d+')
return re.findall(reg, l)[0]
def get_device_imei(self):
"""
获取设备IMEI
:return:
"""
sdk = self.get_device_sdk_version()
# Android 5.0以下方法
if int(sdk) < 21:
l = self.shell('dumpsys iphonesubinfo').read()
reg = re.compile('[0-9]{15}')
return re.findall(reg, l)[0]
elif self.root():
l = self.shell('service call iphonesubinfo 1').read()
print(l)
print(re.findall(re.compile("'.+?'"), l))
imei = ''
for i in re.findall(re.compile("'.+?'"), l):
imei += i.replace('.', '').replace("'", '').replace(' ', '')
return imei
else:
print('The device not root.')
return ''
def check_sim_card(self):
"""
检查设备SIM卡
:return:
"""
return len(self.shell('getprop | %s gsm.operator.alpha]' % self.__find).read().strip().split()[-1]) > 2
def get_device_operators(self):
"""
获取运营商
:return:
"""
return self.shell('getprop | %s gsm.operator.alpha]' % self.__find).read().strip().split()[-1]
def get_device_state(self):
"""
获取设备状态
:return:
"""
return self.adb('get-state').read().strip()
def get_display_state(self):
"""
获取屏幕状态
:return: 亮屏/灭屏
"""
l = self.shell('dumpsys power').readlines()
for i in l:
if 'mScreenOn=' in i:
return i.split()[-1] == 'mScreenOn=true'
if 'Display Power' in i:
return 'ON' in i.split('=')[-1].upper()
def get_screen_normal_size(self):
"""
获取设备屏幕分辨率 >> 标配
:return:
"""
return self.shell('wm size').read().strip().split()[-1].split('x')
def get_screen_reality_size(self):
"""
获取设备屏幕分辨率 >> 实际分辨率
:return:
"""
x = 0
y = 0
l = self.shell(r'getevent -p | %s -e "0"' % self.__find).readlines()
for n in l:
if len(n.split()) > 0:
if n.split()[0] == '0035':
x = int(n.split()[7].split(',')[0])
elif n.split()[0] == '0036':
y = int(n.split()[7].split(',')[0])
return x, y
def get_device_interior_sdcard(self):
"""
获取内部SD卡空间
:return: (path,total,used,free,block)
"""
return self.shell('df | %s \/mnt\/shell\/emulated' % self.__find).read().strip().split()
def get_device_external_sdcard(self):
"""
获取外部SD卡空间
:return: (path,total,used,free,block)
"""
return self.shell('df | %s \/storage' % self.__find).read().strip().split()
def __fill_rom(self, path, stream, count):
"""
填充数据
:param path: 填充地址
:param stream: 填充流大小
:param count: 填充次数
:return:
"""
self.shell('dd if=/dev/zero of=%s bs=%s count=%s' % (path, stream, count)).read().strip()
def fill_interior_sdcard(self, filename, size):
"""
填充内置SD卡
:param filename: 文件名
:param size: 填充大小,单位byte
:return:
"""
if size > 10485760: # 10m
self.__fill_rom('sdcard/%s' % filename, 10485760, size / 10485760)
else:
self.__fill_rom('sdcard/%s' % filename, size, 1)
def fill_external_sdcard(self, filename, size):
"""
填充外置SD卡
:param filename: 文件名
:param size: 填充大小,单位byte
:return:
"""
path = self.get_device_external_sdcard()[0]
if size > 10485760: # 10m
self.__fill_rom('%s/%s' % (path, filename), 10485760, size / 10485760)
else:
self.__fill_rom('%s/%s' % (path, filename), size, 1)
def kill_process(self, pid):
"""
杀死进程
pass: 一般需要权限不推荐使用
:return:
"""
return self.shell('kill %s' % pid).read().strip()
def quit_app(self, package):
"""
退出应用
:return:
"""
return self.shell('am force-stop %s' % package).read().strip()
def reboot(self):
"""
重启设备
:return:
"""
self.adb('reboot')
def recovery(self):
"""
重启设备并进入recovery模式
:return:
"""
self.adb('reboot recovery')
def fastboot(self):
"""
重启设备并进入fastboot模式
:return:
"""
self.adb('reboot bootloader')
def root(self):
"""
获取root状态
:return:
"""
return 'not found' not in self.shell('su -c ls -l /data/').read().strip()
def wifi(self, power):
"""
开启/关闭wifi
pass: 需要root权限
:return:
"""
if not self.root():
print('The device not root.')
return
if power:
self.shell('su -c svc wifi enable').read().strip()
else:
self.shell('su -c svc wifi disable').read().strip()
def data(self, power):
"""
开启/关闭蜂窝数据
pass: 需要root权限
:return:
"""
if not self.root():
print('The device not root.')
return
if power:
self.shell('su -c svc data enable').read().strip()
else:
self.shell('su -c svc data disable').read().strip()
def get_wifi_state(self):
"""
获取WiFi连接状态
:return:
"""
return 'enabled' in self.shell('dumpsys wifi | %s ^Wi-Fi' % self.__find).read().strip()
def get_data_state(self):
"""
获取移动网络连接状态
:return:
"""
return '2' in self.shell('dumpsys telephony.registry | %s mDataConnectionState' % self.__find).read().strip()
def get_network_state(self):
"""
设备是否连上互联网
:return:
"""
return 'unknown host' not in self.shell('ping -w 1 www.baidu.com').read().strip()
def get_wifi_password_list(self):
"""
获取WIFI密码列表
:return:
"""
if not self.root():
print('The device not root.')
return []
l = re.findall(re.compile('ssid=".+?"\s{3}psk=".+?"'), self.shell('su -c cat /data/misc/wifi/*.conf').read())
return [re.findall(re.compile('".+?"'), i) for i in l]
def call(self, number):
"""
拨打电话
:param number:
:return:
"""
self.shell('am start -a android.intent.action.CALL -d tel:%s' % number)
def open_url(self, url):
"""
打开网页
:return:
"""
self.shell('am start -a android.intent.action.VIEW -d %s' % url)
def start_application(self, component):
"""
启动一个应用
e.g: com.android.settings/com.android.settings.Settings
"""
self.shell("am start -n %s" % component)
def send_keyevent(self, keycode):
"""
发送一个按键事件
https://developer.android.com/reference/android/view/KeyEvent.html
:return:
"""
self.shell('input keyevent %s' % keycode)
def rotation_screen(self, param):
"""
旋转屏幕
:param param: 0 >> 纵向,禁止自动旋转; 1 >> 自动旋转
:return:
"""
self.shell('/system/bin/content insert --uri content://settings/system --bind '
'name:s:accelerometer_rotation --bind value:i:%s' % param)
def instrument(self, command):
"""
启动instrument app
:param command: 命令
:return:
"""
return self.shell('am instrument %s' % command).read()
def export_apk(self, package, target_path='', timeout=5000):
"""
从设备导出应用
:param timeout: 超时时间
:param target_path: 导出后apk存储路径
:param package: 包名
:return:
"""
num = 0
if target_path == '':
self.adb('pull /data/app/%s-1/base.apk %s' % (package, os.path.expanduser('~')))
while 1:
num += 1
if num <= timeout:
if os.path.exists(os.path.join(os.path.expanduser('~'), 'base.apk')):
os.rename(os.path.join(os.path.expanduser('~'), 'base.apk'),
os.path.join(os.path.expanduser('~'), '%s.apk' % package))
else:
self.adb('pull /data/app/%s-1/base.apk %s' % (package, target_path))
while 1:
num += 1
if num <= timeout:
if os.path.exists(os.path.join(os.path.expanduser('~'), 'base.apk')):
os.rename(os.path.join(os.path.expanduser('~'), 'base.apk'),
os.path.join(os.path.expanduser('~'), '%s.apk' % package))
if __name__ == '__main__':
print(AdbTools().getOnlineDevices())
| 29.636025
| 119
| 0.499476
|
794addb6a7b397814132af4b6a926938a9a2864e
| 10,349
|
py
|
Python
|
configs/common/Options.py
|
dspencer001/usu_gem5
|
ff3150a999cb141908e134304811714cfc2500e4
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/Options.py
|
dspencer001/usu_gem5
|
ff3150a999cb141908e134304811714cfc2500e4
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/Options.py
|
dspencer001/usu_gem5
|
ff3150a999cb141908e134304811714cfc2500e4
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import m5
from m5.defines import buildEnv
from m5.objects import *
from Benchmarks import *
def addCommonOptions(parser):
# system options
parser.add_option("--cpu-type", type="choice", default="atomic",
choices = ["atomic", "timing", "detailed", "inorder",
"arm_detailed"],
help = "type of cpu to run with")
parser.add_option("--checker", action="store_true");
parser.add_option("-n", "--num-cpus", type="int", default=1)
parser.add_option("--caches", action="store_true")
parser.add_option("--l2cache", action="store_true")
parser.add_option("--fastmem", action="store_true")
parser.add_option("--clock", action="store", type="string", default='2GHz')
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="int", default=1)
parser.add_option("--num-l3caches", type="int", default=1)
parser.add_option("--l1d_size", type="string", default="64kB")
parser.add_option("--l1i_size", type="string", default="32kB")
parser.add_option("--l2_size", type="string", default="2MB")
parser.add_option("--l3_size", type="string", default="16MB")
parser.add_option("--l1d_assoc", type="int", default=2)
parser.add_option("--l1i_assoc", type="int", default=2)
parser.add_option("--l2_assoc", type="int", default=8)
parser.add_option("--l3_assoc", type="int", default=16)
parser.add_option("--cacheline_size", type="int", default=64)
parser.add_option("--ruby", action="store_true")
parser.add_option("--smt", action="store_true", default=False,
help = """
Only used if multiple programs are specified. If true,
then the number of threads per cpu is same as the
number of programs.""")
# Run duration options
parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
metavar="T", help="Stop after T ticks")
parser.add_option("--maxtime", type="float")
parser.add_option("-I", "--maxinsts", action="store", type="int",
default=None, help="""Total number of instructions to
simulate (default: run forever)""")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="""Parameter available in simulation with m5
initparam""")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> take checkpoints at tick M and every N ticks thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
parser.add_option("--restore-with-cpu", action="store", type="choice",
default="atomic", choices = ["atomic", "timing",
"detailed", "inorder"],
help = "cpu type for restoring from a checkpoint")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("--repeat-switch", action="store", type="int",
default=None,
help="switch back and forth between CPUs with period <N>")
parser.add_option("-s", "--standard-switch", action="store", type="int",
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>")
parser.add_option("-p", "--prog-interval", type="int",
help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
def addSEOptions(parser):
# Benchmark options
parser.add_option("-c", "--cmd", default="",
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="""The options to pass to the binary, use " "
around the entire string""")
parser.add_option("-i", "--input", default="",
help="Read stdin from a file.")
parser.add_option("--output", default="",
help="Redirect stdout to a file.")
parser.add_option("--errout", default="",
help="Redirect stderr to a file.")
def addFSOptions(parser):
# Simulation options
parser.add_option("--timesync", action="store_true",
help="Prevent simulated time from getting ahead of real time")
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
"files in the gem5 output directory")
if buildEnv['TARGET_ISA'] == "arm":
parser.add_option("--bare-metal", action="store_true",
help="Provide the raw system without the linux specific bits")
parser.add_option("--machine-type", action="store", type="choice",
choices=ArmMachineType.map.keys(), default="RealView_PBX")
# Benchmark options
parser.add_option("--dual", action="store_true",
help="Simulate two systems attached with an ethernet link")
parser.add_option("-b", "--benchmark", action="store", type="string",
dest="benchmark",
help="Specify the benchmark to run. Available benchmarks: %s"\
% DefinedBenchmarks)
# Metafile options
parser.add_option("--etherdump", action="store", type="string", dest="etherdump",
help="Specify the filename to dump a pcap capture of the" \
"ethernet traffic")
# Disk Image Options
parser.add_option("--disk-image", action="store", type="string", default=None,
help="Path to the disk image to use.")
# Memory Size Options
parser.add_option("--mem-size", action="store", type="string", default=None,
help="Specify the physical memory size (single memory)")
| 55.047872
| 85
| 0.638516
|
794ade2213d5045c673eb601fdac23fd2e6a8d14
| 300
|
py
|
Python
|
tests/asserts/attribute.py
|
FilippoBoido/py2puml
|
9f25ca7ea0bcb2f9e7b25751bcda680a0e8f7d90
|
[
"MIT"
] | 59
|
2020-06-04T11:32:10.000Z
|
2022-03-22T18:37:28.000Z
|
tests/asserts/attribute.py
|
mayshukla/py2puml
|
9f25ca7ea0bcb2f9e7b25751bcda680a0e8f7d90
|
[
"MIT"
] | 21
|
2020-05-14T14:31:23.000Z
|
2022-03-25T02:44:05.000Z
|
tests/asserts/attribute.py
|
mayshukla/py2puml
|
9f25ca7ea0bcb2f9e7b25751bcda680a0e8f7d90
|
[
"MIT"
] | 11
|
2021-01-07T04:11:47.000Z
|
2022-03-01T21:32:32.000Z
|
from py2puml.domain.umlclass import UmlAttribute
def assert_attribute(attribute: UmlAttribute, expected_name: str, expected_type: str, expected_staticity: bool):
assert attribute.name == expected_name
assert attribute.type == expected_type
assert attribute.static == expected_staticity
| 37.5
| 112
| 0.8
|
794adec29f9a6d57ea58fa35c13c4a673cd69d9c
| 3,408
|
py
|
Python
|
galsendev_demo/settings.py
|
PapiHack/galsendev-demo-docker
|
f3563fc7756dc14b88a3710d26f91937de94a2ea
|
[
"MIT"
] | 2
|
2021-06-27T19:35:26.000Z
|
2021-07-03T13:19:28.000Z
|
galsendev_demo/settings.py
|
PapiHack/galsendev-demo-docker
|
f3563fc7756dc14b88a3710d26f91937de94a2ea
|
[
"MIT"
] | null | null | null |
galsendev_demo/settings.py
|
PapiHack/galsendev-demo-docker
|
f3563fc7756dc14b88a3710d26f91937de94a2ea
|
[
"MIT"
] | null | null | null |
"""
Django settings for galsendev_demo project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-bihq%hc(s$)4&j8kltsyj&5etkj02mj&8m9n9p0pbtxvl2rkyn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'galsendev_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR / 'templates'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'galsendev_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'galsendev_demo',
'USER': 'galsendev',
'PASSWORD': 'dev4life',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.818182
| 91
| 0.692782
|
794adf08540e2d5096120f2a51c3122fa639a650
| 492
|
py
|
Python
|
plotly/validators/mesh3d/hoverlabel/font/_sizesrc.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/mesh3d/hoverlabel/font/_sizesrc.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/mesh3d/hoverlabel/font/_sizesrc.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='sizesrc',
parent_name='mesh3d.hoverlabel.font',
**kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 25.894737
| 66
| 0.605691
|
794adf2f271a0eca3be9e230b094681670cdb629
| 3,891
|
py
|
Python
|
software/glasgow/platform/ice40.py
|
emilazy/Glasgow
|
4575ad07ccce76b0b92d29a76fc18a3700a68823
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
software/glasgow/platform/ice40.py
|
emilazy/Glasgow
|
4575ad07ccce76b0b92d29a76fc18a3700a68823
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
software/glasgow/platform/ice40.py
|
emilazy/Glasgow
|
4575ad07ccce76b0b92d29a76fc18a3700a68823
|
[
"Apache-2.0",
"0BSD"
] | null | null | null |
import asyncio
from nmigen import *
from nmigen.vendor.lattice_ice40 import *
from ..device.hardware import *
from ..gateware import GatewareBuildError
__all__ = ["GlasgowPlatformICE40"]
class GlasgowPlatformICE40(LatticeICE40Platform):
def toolchain_program(self, products, name):
bitstream = products.get("{}.bin".format(name))
loop = asyncio.get_event_loop()
loop.run_until_complete(GlasgowHardwareDevice().download_bitstream(bitstream))
def get_pll(self, pll, simple_feedback=True):
if not 10e6 <= pll.f_in <= 133e6:
pll.logger.error("PLL: f_in (%.3f MHz) must be between 10 and 133 MHz",
pll.f_in / 1e6)
raise GatewareBuildError("PLL f_in out of range")
if not 16e6 <= pll.f_out <= 275e6:
pll.logger.error("PLL: f_out (%.3f MHz) must be between 16 and 275 MHz",
pll.f_out / 1e6)
raise GatewareBuildError("PLL f_out out of range")
# The documentation in the iCE40 PLL Usage Guide incorrectly lists the
# maximum value of DIVF as 63, when it is only limited to 63 when using
# feedback modes other that SIMPLE.
if simple_feedback:
divf_max = 128
else:
divf_max = 64
variants = []
for divr in range(0, 16):
f_pfd = pll.f_in / (divr + 1)
if not 10e6 <= f_pfd <= 133e6:
continue
for divf in range(0, divf_max):
if simple_feedback:
f_vco = f_pfd * (divf + 1)
if not 533e6 <= f_vco <= 1066e6:
continue
for divq in range(1, 7):
f_out = f_vco * (2 ** -divq)
variants.append((divr, divf, divq, f_pfd, f_out))
else:
for divq in range(1, 7):
f_vco = f_pfd * (divf + 1) * (2 ** divq)
if not 533e6 <= f_vco <= 1066e6:
continue
f_out = f_vco * (2 ** -divq)
variants.append((divr, divf, divq, f_pfd, f_out))
if not variants:
pll.logger.error("PLL: f_in (%.3f MHz) to f_out (%.3f) constraints not satisfiable",
pll.f_in / 1e6, pll.f_out / 1e6)
raise GatewareBuildError("PLL f_in/f_out out of range")
def f_out_diff(variant):
*_, f_out = variant
return abs(f_out - pll.f_out)
divr, divf, divq, f_pfd, f_out = min(variants, key=f_out_diff)
if f_pfd < 17:
filter_range = 1
elif f_pfd < 26:
filter_range = 2
elif f_pfd < 44:
filter_range = 3
elif f_pfd < 66:
filter_range = 4
elif f_pfd < 101:
filter_range = 5
else:
filter_range = 6
if simple_feedback:
feedback_path = "SIMPLE"
else:
feedback_path = "NON_SIMPLE"
ppm = abs(pll.f_out - f_out) / pll.f_out * 1e6
pll.logger.debug("PLL: f_in=%.3f f_out(req)=%.3f f_out(act)=%.3f [MHz] ppm=%d",
pll.f_in / 1e6, pll.f_out / 1e6, f_out / 1e6, ppm)
pll.logger.trace("iCE40 PLL: feedback_path=%s divr=%d divf=%d divq=%d filter_range=%d",
feedback_path, divr, divf, divq, filter_range)
return Instance("SB_PLL40_CORE",
p_FEEDBACK_PATH=feedback_path,
p_PLLOUT_SELECT="GENCLK",
p_DIVR=divr,
p_DIVF=divf,
p_DIVQ=divq,
p_FILTER_RANGE=filter_range,
i_REFERENCECLK=ClockSignal(pll.idomain),
o_PLLOUTCORE=ClockSignal(pll.odomain),
i_RESETB=~ResetSignal(pll.idomain),
i_BYPASS=Const(0),
)
| 35.697248
| 96
| 0.52917
|
794ae01501c75f2cf1e708dd9e72e109f8078f69
| 6,998
|
py
|
Python
|
rev-sim-recommender-veto/VeTo.py
|
schatzopoulos/VeTo-workloads
|
1475c4d1638b9897c9e52c9192d3a6723bb1bdc4
|
[
"Apache-2.0"
] | null | null | null |
rev-sim-recommender-veto/VeTo.py
|
schatzopoulos/VeTo-workloads
|
1475c4d1638b9897c9e52c9192d3a6723bb1bdc4
|
[
"Apache-2.0"
] | null | null | null |
rev-sim-recommender-veto/VeTo.py
|
schatzopoulos/VeTo-workloads
|
1475c4d1638b9897c9e52c9192d3a6723bb1bdc4
|
[
"Apache-2.0"
] | null | null | null |
import sys
import csv
import os
# Define CSV dialect to be used.
csv.register_dialect(
'exp_dialect',
delimiter = '\t'
)
class VeTo:
def score(self, coeff, method, rrf_k, topk_thr, lines_to_read, sim_score):
if method == 'borda':
return coeff * lines_to_read
elif method == 'rrf':
return coeff * (1.0 / (rrf_k + (topk_thr - lines_to_read)))
elif method == 'sum':
return coeff * float(sim_score)
def run(self, method, basic_path, kfold, topk_thr, alpha, beta, rrf_k):
experiment_path = basic_path+"experiments/"+str(kfold)+"-fold/"
suggestion_path = experiment_path+"suggestions/"
hin_wp_avg_precision = []
hin_wp_avg_recall = []
test_size_strict = 0
try:
# Run once for each pair of train/test sets.
for fold in range(0,kfold):
if fold == (kfold-1):
fold_path = experiment_path+"folds/fold"+str(fold)+"+/"
else:
fold_path = experiment_path+"folds/fold"+str(fold)+"/"
# Create a dictionary with all items in train set
train_set = dict()
with open(fold_path+"train.csv",'r') as train_file:
train_entries = csv.reader(train_file,dialect='exp_dialect')
for entry in train_entries:
train_set[entry[0]] = 1
train_file.close()
# Create a list with all items in the test set
test_set = []
with open(fold_path+"test.csv",'r') as test_file:
test_entries_csv = csv.reader(test_file,dialect='exp_dialect')
test_size = 0
for entry in test_entries_csv:
test_set.append(entry[0])
test_size += 1
test_file.close()
if fold == 0:
test_size_strict = test_size #to be used in case the size of last partition is larger
hin_wp_avg_precision = [0]*test_size_strict
hin_wp_avg_recall = [0]*test_size_strict
# Get suggestions based on HIN
hin_wp_sugg = dict()
for entry in train_set:
try:
with open(basic_path+"input/author_sim/HIN-APT/"+entry+".csv",'r') as auth_sim1_file:
sim1_authors = csv.reader(auth_sim1_file,dialect='exp_dialect')
lines_to_read = topk_thr
for auth in sim1_authors:
if auth[1] in train_set: #do not consider anyone in the training set
continue
lines_to_read -= 1
if lines_to_read == -1:
break
if auth[1] in hin_wp_sugg:
hin_wp_sugg[auth[1]] += self.score(alpha, method, rrf_k, topk_thr, lines_to_read, auth[2]) #pow(lines_to_read,3) #* float(auth[2]) #get borda-count score
else:
hin_wp_sugg[auth[1]] = self.score(alpha, method, rrf_k, topk_thr, lines_to_read, auth[2]) # #* float(auth[2]) #get borda-count score
auth_sim1_file.close()
except FileNotFoundError:
# print("NOT FOUND: " + basic_path+"input/author_sim/HIN-APT/"+entry+".csv")
pass
try:
with open(basic_path+"input/author_sim/HIN-APV/"+entry+".csv",'r') as auth_sim2_file:
sim2_authors = csv.reader(auth_sim2_file,dialect='exp_dialect')
lines_to_read = topk_thr
for auth in sim2_authors:
if auth[1] in train_set: #do not consider anyone in the training set
continue
lines_to_read -= 1
if lines_to_read == -1:
break
if auth[1] in hin_wp_sugg:
hin_wp_sugg[auth[1]] += self.score(beta, method, rrf_k, topk_thr, lines_to_read, auth[2]) #pow(lines_to_read,3) #* float(auth[2]) #get borda-count score
else:
hin_wp_sugg[auth[1]] = self.score(beta, method, rrf_k, topk_thr, lines_to_read, auth[2]) #pow(lines_to_read,3) #* float(auth[2]) #get borda-count score
auth_sim2_file.close()
except FileNotFoundError:
# print("NOT FOUND: " + basic_path+"input/author_sim/HIN-APV/"+entry+".csv")
pass
hin_wp_sugg_list = sorted(hin_wp_sugg,key=hin_wp_sugg.get, reverse=True) #sort suggestions based on borda count
hin_wp_sugg_list = hin_wp_sugg_list[0:test_size] #keep as many as in the test size
# Calculate top-k precision & recall for different k values
for k in range(1,test_size_strict):
#print("- Calculating precision & recall for fold #"+str(fold)+" at top-"+str(k)+":") #debug
#consider first k elements for each method
hin_wp_sugg_list_topk = hin_wp_sugg_list[0:k]
hin_wp_found = set(test_set).intersection(hin_wp_sugg_list_topk)
hin_wp_found_cnt = len(hin_wp_found)
hin_wp_precision = hin_wp_found_cnt/k
hin_wp_recall = hin_wp_found_cnt/test_size_strict
hin_wp_avg_precision[k] += hin_wp_precision
hin_wp_avg_recall[k] += hin_wp_recall
value = []
hin_wp_f1_measures = [0]*test_size_strict
for k in range(1,test_size_strict):
#
hin_wp_avg_precision[k] = hin_wp_avg_precision[k]/kfold
hin_wp_avg_recall[k] = hin_wp_avg_recall[k]/kfold
if (hin_wp_avg_precision[k]+hin_wp_avg_recall[k]) !=0:
hin_wp_f1_measures[k] = 2*hin_wp_avg_precision[k]*hin_wp_avg_recall[k]/(hin_wp_avg_precision[k]+hin_wp_avg_recall[k])
else:
hin_wp_f1_measures[k] = 0
# print([k,hin_wp_avg_precision[k]])
# print([k,hin_wp_avg_recall[k]])
# print([hin_wp_avg_recall[k],hin_wp_avg_precision[k]])
# print([k,hin_wp_f1_measures[k]])
value.append(hin_wp_f1_measures[k])
return value;
except IOError as e:
print("=> ERROR: Cannot open file...")
print(e)
| 48.262069
| 191
| 0.508574
|
794ae06e34a11f05cd8176aae3b10696070f3e89
| 187,839
|
py
|
Python
|
locations/spiders/carrefour.py
|
cmecklenborg/alltheplaces
|
e62b59fb0071b6e289c4622d368fdb203a28347e
|
[
"MIT"
] | null | null | null |
locations/spiders/carrefour.py
|
cmecklenborg/alltheplaces
|
e62b59fb0071b6e289c4622d368fdb203a28347e
|
[
"MIT"
] | null | null | null |
locations/spiders/carrefour.py
|
cmecklenborg/alltheplaces
|
e62b59fb0071b6e289c4622d368fdb203a28347e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
import re
CITIES = [
{
"zip": 3636,
"name": "POZO CERCADO (EL CHORRO (F), DPTO. RIVADAVIA (S))",
"state": "SALTA",
"lat": -23.4933,
"lon": -61.9267,
},
{
"zip": 4123,
"name": "LAS SALADAS",
"state": "SALTA",
"lat": -25.7833,
"lon": -64.5,
},
{
"zip": 4126,
"name": "EL BRETE",
"state": "SALTA",
"lat": -26.0667,
"lon": -65.3667,
},
{
"zip": 4141,
"name": "LA CIENEGUITA",
"state": "SALTA",
"lat": -26.4367,
"lon": -65.97,
},
{
"zip": 4190,
"name": "AGUAS CALIENTES",
"state": "SALTA",
"lat": -25.8,
"lon": -65.0333,
},
{
"zip": 4191,
"name": "SAN PEDRO DE LOS CORRALES",
"state": "SALTA",
"lat": -25.9689,
"lon": -64.6583,
},
{
"zip": 4193,
"name": "PUENTE DE PLATA",
"state": "SALTA",
"lat": -25.95,
"lon": -64.7167,
},
{
"zip": 4198,
"name": "CAMARA (ARENAL, DPTO. ROSARIO DE LA FRONTERA)",
"state": "SALTA",
"lat": -25.7833,
"lon": -65.0417,
},
{
"zip": 4400,
"name": "CHAMICAL",
"state": "SALTA",
"lat": -24.8056,
"lon": -65.3417,
},
{
"zip": 4401,
"name": "VAQUEROS",
"state": "SALTA",
"lat": -24.7167,
"lon": -65.4167,
},
{"zip": 4403, "name": "CEBADOS", "state": "SALTA", "lat": -25.5, "lon": -65.2917},
{
"zip": 4405,
"name": "EL PORVENIR",
"state": "SALTA",
"lat": -25.3833,
"lon": -64.6667,
},
{
"zip": 4407,
"name": "LA SILLETA",
"state": "SALTA",
"lat": -24.8667,
"lon": -65.5833,
},
{
"zip": 4409,
"name": "SAN BERNARDO DE LAS ZORRAS",
"state": "SALTA",
"lat": -24.5485,
"lon": -65.8803,
},
{
"zip": 4411,
"name": "NACIMIENTOS",
"state": "SALTA",
"lat": -24.1367,
"lon": -66.51,
},
{
"zip": 4413,
"name": "SALAR DE POCITOS",
"state": "SALTA",
"lat": -24.3333,
"lon": -67.0167,
},
{
"zip": 4415,
"name": "VILLITAS",
"state": "SALTA",
"lat": -24.9881,
"lon": -66.0048,
},
{
"zip": 4417,
"name": "CACHI ADENTRO",
"state": "SALTA",
"lat": -25.0833,
"lon": -66.2333,
},
{"zip": 4419, "name": "LA PAYA", "state": "SALTA", "lat": -25.15, "lon": -66.2333},
{
"zip": 4421,
"name": "EL CARRIL",
"state": "SALTA",
"lat": -25.0833,
"lon": -65.4667,
},
{
"zip": 4423,
"name": "EL MOYAR",
"state": "SALTA",
"lat": -25.0917,
"lon": -65.5458,
},
{
"zip": 4425,
"name": "CASTA�ARES",
"state": "SALTA",
"lat": -25.5426,
"lon": -65.4481,
},
{
"zip": 4427,
"name": "ANGASTACO",
"state": "SALTA",
"lat": -25.6333,
"lon": -66.1833,
},
{
"zip": 4430,
"name": "AGUA CALIENTE",
"state": "SALTA",
"lat": -24.6167,
"lon": -64.8833,
},
{
"zip": 4431,
"name": "TACA TACA (ESTACION FCGB)",
"state": "SALTA",
"lat": -24.8492,
"lon": -64.8348,
},
{
"zip": 4432,
"name": "GALLINATO",
"state": "SALTA",
"lat": -24.6708,
"lon": -65.1708,
},
{
"zip": 4434,
"name": "EL NARANJO (PASO DE LA CRUZ, DPTO. ANTA)",
"state": "SALTA",
"lat": -24.9229,
"lon": -64.7448,
},
{
"zip": 4440,
"name": "VERA CRUZ",
"state": "SALTA",
"lat": -25.3583,
"lon": -65.2292,
},
{
"zip": 4441,
"name": "METAN VIEJO",
"state": "SALTA",
"lat": -25.5333,
"lon": -64.9667,
},
{
"zip": 4444,
"name": "FINCA ROCCA",
"state": "SALTA",
"lat": -25.3833,
"lon": -64.6333,
},
{
"zip": 4446,
"name": "ALGARROBAL (CHA�AR MUYO, DPTO. ANTA)",
"state": "SALTA",
"lat": -25.2095,
"lon": -64.2929,
},
{
"zip": 4448,
"name": "SAUCE SOLO",
"state": "SALTA",
"lat": -25.0083,
"lon": -64.2,
},
{
"zip": 4449,
"name": "EL BORDO (APOLINARIO SARAVIA, DPTO. ANTA)",
"state": "SALTA",
"lat": -24.4718,
"lon": -64.0782,
},
{"zip": 4452, "name": "TALAVERA", "state": "SALTA", "lat": -25.4333, "lon": -63.8},
{"zip": 4530, "name": "PARANI", "state": "SALTA", "lat": -23.2333, "lon": -64.9},
{
"zip": 4531,
"name": "COLONIA SANTA ROSA",
"state": "SALTA",
"lat": -23.3667,
"lon": -64.5,
},
{"zip": 4533, "name": "ANGELICA", "state": "SALTA", "lat": -23.2667, "lon": -64.25},
{
"zip": 4534,
"name": "EL QUIMILAR",
"state": "SALTA",
"lat": -23.3167,
"lon": -63.95,
},
{
"zip": 4535,
"name": "ALTO VERDE",
"state": "SALTA",
"lat": -23.9533,
"lon": -63.12,
},
{
"zip": 4537,
"name": "JERONIMO MATORRAS (ESTACION FCGB)",
"state": "SALTA",
"lat": -23.8,
"lon": -64.0792,
},
{
"zip": 4538,
"name": "FINCA LA TOMA",
"state": "SALTA",
"lat": -23.4333,
"lon": -64.35,
},
{"zip": 4542, "name": "EL CULCO", "state": "SALTA", "lat": -23.55, "lon": -64.4167},
{
"zip": 4550,
"name": "COLONIA OTOMANA",
"state": "SALTA",
"lat": -23.3611,
"lon": -63.8111,
},
{
"zip": 4552,
"name": "GENERAL BALLIVIAN",
"state": "SALTA",
"lat": -22.9333,
"lon": -63.8667,
},
{
"zip": 4554,
"name": "MONTE CARMELO",
"state": "SALTA",
"lat": -23.4143,
"lon": -63.2167,
},
{
"zip": 4560,
"name": "VILLA SAAVEDRA",
"state": "SALTA",
"lat": -22.3655,
"lon": -63.4976,
},
{
"zip": 4561,
"name": "AMBERES",
"state": "SALTA",
"lat": -22.5167,
"lon": -62.5333,
},
{
"zip": 4562,
"name": "GENERAL ENRIQUE MOSCONI",
"state": "SALTA",
"lat": -22.6,
"lon": -63.8167,
},
{
"zip": 4563,
"name": "CAMPAMENTO VESPUCIO",
"state": "SALTA",
"lat": -22.3655,
"lon": -63.4976,
},
{
"zip": 4564,
"name": "PIQUIRENDA",
"state": "SALTA",
"lat": -22.3333,
"lon": -63.7833,
},
{
"zip": 4566,
"name": "CAMPO DURAN",
"state": "SALTA",
"lat": -22.2333,
"lon": -63.7,
},
{"zip": 4568, "name": "ACAMBUCO", "state": "SALTA", "lat": -22.1833, "lon": -63.95},
{
"zip": 4633,
"name": "SAN ANTONIO DE IRUYA",
"state": "SALTA",
"lat": -22.425,
"lon": -65.5583,
},
{"zip": 4644, "name": "BACOYA", "state": "SALTA", "lat": -22.3818, "lon": -65.5788},
{
"zip": 4650,
"name": "NUEVO PORVENIR",
"state": "SALTA",
"lat": -22.1,
"lon": -65.7333,
},
{
"zip": 4651,
"name": "SAN FRANCISCO",
"state": "SALTA",
"lat": -22.2139,
"lon": -65.2528,
},
{
"zip": 1601,
"name": "ISLA MARTIN GARCIA",
"state": "BUENOS AIRES",
"lat": -34.5167,
"lon": -58.5389,
},
{
"zip": 1602,
"name": "FLORIDA",
"state": "BUENOS AIRES",
"lat": -34.5167,
"lon": -58.5,
},
{
"zip": 1605,
"name": "MUNRO",
"state": "BUENOS AIRES",
"lat": -34.5333,
"lon": -58.55,
},
{
"zip": 1607,
"name": "BARRIO OBRERO FERROVIARIO",
"state": "BUENOS AIRES",
"lat": -34.5167,
"lon": -58.5389,
},
{
"zip": 1609,
"name": "BOULOGNE ESTAFETA No.1",
"state": "BUENOS AIRES",
"lat": -34.5,
"lon": -58.5667,
},
{
"zip": 1611,
"name": "SOLANA DEL MONTE",
"state": "BUENOS AIRES",
"lat": -34.5,
"lon": -58.6333,
},
{
"zip": 1612,
"name": "KILOMETRO 30",
"state": "BUENOS AIRES",
"lat": -34.4696,
"lon": -58.6713,
},
{
"zip": 1613,
"name": "LOS POLVORINES",
"state": "BUENOS AIRES",
"lat": -34.5,
"lon": -58.6833,
},
{
"zip": 1615,
"name": "GRAND BOURG",
"state": "BUENOS AIRES",
"lat": -34.4833,
"lon": -58.7167,
},
{
"zip": 1617,
"name": "BARRIO EL ZORZAL",
"state": "BUENOS AIRES",
"lat": -34.4602,
"lon": -58.6345,
},
{
"zip": 1619,
"name": "GARIN",
"state": "BUENOS AIRES",
"lat": -34.4233,
"lon": -58.7619,
},
{
"zip": 1621,
"name": "LOS SANTOS VIEJOS",
"state": "BUENOS AIRES",
"lat": -34.4094,
"lon": -58.7094,
},
{
"zip": 1623,
"name": "BARRIO GARIN NORTE",
"state": "BUENOS AIRES",
"lat": -34.3676,
"lon": -58.7325,
},
{
"zip": 1625,
"name": "BELEN DE ESCOBAR",
"state": "BUENOS AIRES",
"lat": -34.3467,
"lon": -58.8186,
},
{
"zip": 1627,
"name": "MATHEU",
"state": "BUENOS AIRES",
"lat": -34.3831,
"lon": -58.8489,
},
{
"zip": 1629,
"name": "PILAR",
"state": "BUENOS AIRES",
"lat": -34.4836,
"lon": -58.9319,
},
{
"zip": 1633,
"name": "MANZONE",
"state": "BUENOS AIRES",
"lat": -34.4833,
"lon": -58.8667,
},
{
"zip": 1635,
"name": "KILOMETRO 45 (APEADERO FCGU., PTE. DERQUI, PDO. PILAR)",
"state": "BUENOS AIRES",
"lat": -34.4972,
"lon": -58.8636,
},
{
"zip": 1636,
"name": "LA LUCILA",
"state": "BUENOS AIRES",
"lat": -34.5,
"lon": -58.4833,
},
{
"zip": 1640,
"name": "MARTINEZ ESTAFETA No.5",
"state": "BUENOS AIRES",
"lat": -34.4833,
"lon": -58.5083,
},
{
"zip": 1642,
"name": "SAN ISIDRO",
"state": "BUENOS AIRES",
"lat": -34.4708,
"lon": -58.5286,
},
{
"zip": 1643,
"name": "BECCAR",
"state": "BUENOS AIRES",
"lat": -34.4667,
"lon": -58.5333,
},
{
"zip": 1644,
"name": "DOCTOR ALBERT SCHWEITZER (PARADA FCGM)",
"state": "BUENOS AIRES",
"lat": -34.3553,
"lon": -58.5427,
},
{
"zip": 1646,
"name": "SAN FERNANDO",
"state": "BUENOS AIRES",
"lat": -34.4442,
"lon": -58.5775,
},
{
"zip": 1647,
"name": "RIO PARANA GUAZU",
"state": "BUENOS AIRES",
"lat": -33.7833,
"lon": -58.6,
},
{
"zip": 1648,
"name": "NUEVO PUERTO TIGRE",
"state": "BUENOS AIRES",
"lat": -34.3553,
"lon": -58.5427,
},
{
"zip": 1649,
"name": "ARROYO ESPERA GRANDE",
"state": "BUENOS AIRES",
"lat": -34.3553,
"lon": -58.5427,
},
{
"zip": 1650,
"name": "SAN MARTIN (PDO. GRAL. SAN MARTIN)",
"state": "BUENOS AIRES",
"lat": -34.5417,
"lon": -58.5583,
},
{
"zip": 1651,
"name": "SAN ANDRES",
"state": "BUENOS AIRES",
"lat": -34.55,
"lon": -58.5333,
},
{
"zip": 1655,
"name": "JOSE LEON SUAREZ",
"state": "BUENOS AIRES",
"lat": -34.5333,
"lon": -58.5833,
},
{
"zip": 1657,
"name": "BARRIO VILLA MARIA DE LOS REMEDIOS DE ESCALADA",
"state": "BUENOS AIRES",
"lat": -34.5417,
"lon": -58.5583,
},
{
"zip": 1659,
"name": "BARRIO DE SUBOFICIALES SARGENTO CABRAL (APEADERO F.C.G.U.)",
"state": "BUENOS AIRES",
"lat": -34.5417,
"lon": -58.5583,
},
{
"zip": 1661,
"name": "BARRIO JORGE NEWBERY (BELLA VISTA, PDO. GRAL. SARMIENTO)",
"state": "BUENOS AIRES",
"lat": -34.5096,
"lon": -58.7618,
},
{
"zip": 1663,
"name": "SAN MIGUEL ESTAFETA N�6",
"state": "BUENOS AIRES",
"lat": -34.5326,
"lon": -58.753,
},
{
"zip": 1664,
"name": "BARRIO VILLA MANUELITA",
"state": "BUENOS AIRES",
"lat": -34.5096,
"lon": -58.7618,
},
{
"zip": 1665,
"name": "BARRIO SAN LUIS",
"state": "BUENOS AIRES",
"lat": -34.5,
"lon": -58.75,
},
{
"zip": 1667,
"name": "RUTA 8 KILOMETRO 37,500 AL 41",
"state": "BUENOS AIRES",
"lat": -34.5096,
"lon": -58.7618,
},
{
"zip": 1669,
"name": "LA LOMA (DEL VISO, PDO.GRAL.SARMIENTO)",
"state": "BUENOS AIRES",
"lat": -34.45,
"lon": -58.8,
},
{
"zip": 1672,
"name": "CORONEL FRANCISCO LINCH",
"state": "BUENOS AIRES",
"lat": -34.6,
"lon": -58.5333,
},
{
"zip": 1674,
"name": "SAENZ PE�A",
"state": "BUENOS AIRES",
"lat": -34.6,
"lon": -58.5333,
},
{
"zip": 1676,
"name": "ALIANZA TALLERES",
"state": "BUENOS AIRES",
"lat": -34.6,
"lon": -58.5333,
},
{
"zip": 1678,
"name": "CASEROS SUCURSAL No.1",
"state": "BUENOS AIRES",
"lat": -34.6,
"lon": -58.5333,
},
{
"zip": 1682,
"name": "MARTIN CORONADO",
"state": "BUENOS AIRES",
"lat": -34.5708,
"lon": -58.6243,
},
{
"zip": 1684,
"name": "EL PALOMAR ESTAFETA No.1",
"state": "BUENOS AIRES",
"lat": -34.5417,
"lon": -58.6153,
},
{
"zip": 1686,
"name": "BARRIO PARQUE QUIRNO",
"state": "BUENOS AIRES",
"lat": -34.6,
"lon": -58.6333,
},
{
"zip": 1688,
"name": "SANTOS TESEI",
"state": "BUENOS AIRES",
"lat": -34.5708,
"lon": -58.6243,
},
{
"zip": 1702,
"name": "CIUDADELA ESTAFETA No.1",
"state": "BUENOS AIRES",
"lat": -34.6333,
"lon": -58.5333,
},
{
"zip": 1704,
"name": "RAMOS MEJIA",
"state": "BUENOS AIRES",
"lat": -34.6333,
"lon": -58.5667,
},
{
"zip": 1706,
"name": "HAEDO",
"state": "BUENOS AIRES",
"lat": -34.6333,
"lon": -58.6,
},
{
"zip": 1708,
"name": "MORON",
"state": "BUENOS AIRES",
"lat": -34.6425,
"lon": -58.6181,
},
{
"zip": 1712,
"name": "CASTELAR",
"state": "BUENOS AIRES",
"lat": -34.6667,
"lon": -58.6667,
},
{
"zip": 1713,
"name": "PARQUE LELOIR",
"state": "BUENOS AIRES",
"lat": -34.675,
"lon": -58.6792,
},
{
"zip": 1714,
"name": "BARRIO VILLA ALBERDI",
"state": "BUENOS AIRES",
"lat": -34.6667,
"lon": -58.6667,
},
{
"zip": 1716,
"name": "LIBERTAD",
"state": "BUENOS AIRES",
"lat": -34.7,
"lon": -58.6833,
},
{
"zip": 1718,
"name": "SAN ANTONIO DE PADUA",
"state": "BUENOS AIRES",
"lat": -34.6667,
"lon": -58.7,
},
{
"zip": 1722,
"name": "BARRIO PARQUE SAN MARTIN",
"state": "BUENOS AIRES",
"lat": -34.67,
"lon": -58.7528,
},
{
"zip": 1723,
"name": "MARIANO ACOSTA",
"state": "BUENOS AIRES",
"lat": -34.7261,
"lon": -58.7908,
},
{
"zip": 1727,
"name": "COLONIA HOGAR RICARDO GUTIERREZ",
"state": "BUENOS AIRES",
"lat": -34.85,
"lon": -58.85,
},
{
"zip": 1733,
"name": "PLOMER",
"state": "BUENOS AIRES",
"lat": -34.8,
"lon": -59.0333,
},
{
"zip": 1735,
"name": "EL DURAZNO",
"state": "BUENOS AIRES",
"lat": -34.8465,
"lon": -59.0015,
},
{
"zip": 1737,
"name": "LA CHOZA",
"state": "BUENOS AIRES",
"lat": -34.7833,
"lon": -59.1167,
},
{
"zip": 1739,
"name": "HORNOS",
"state": "BUENOS AIRES",
"lat": -34.9014,
"lon": -58.9281,
},
{
"zip": 1741,
"name": "LOZANO",
"state": "BUENOS AIRES",
"lat": -34.85,
"lon": -59.05,
},
{
"zip": 1742,
"name": "PASO DEL REY ESTAFETA N�1",
"state": "BUENOS AIRES",
"lat": -34.65,
"lon": -58.7667,
},
{
"zip": 1744,
"name": "LOMAS DE SAN JOSE",
"state": "BUENOS AIRES",
"lat": -34.5483,
"lon": -58.8646,
},
{
"zip": 1746,
"name": "BARRIO LA TRADICION",
"state": "BUENOS AIRES",
"lat": -34.6333,
"lon": -58.8667,
},
{
"zip": 1748,
"name": "EL GRANADERO",
"state": "BUENOS AIRES",
"lat": -34.6181,
"lon": -58.9564,
},
{
"zip": 1752,
"name": "LOMAS DEL MIRADOR",
"state": "BUENOS AIRES",
"lat": -34.65,
"lon": -58.5333,
},
{
"zip": 1754,
"name": "SAN JUSTO",
"state": "BUENOS AIRES",
"lat": -34.6831,
"lon": -58.5519,
},
{
"zip": 1755,
"name": "RAFAEL CASTILLO ESTAFETA N�2",
"state": "BUENOS AIRES",
"lat": -34.7167,
"lon": -58.6167,
},
{
"zip": 1757,
"name": "BARRIO JORGE NEWBERY (LAFERRERE, PDO. LA MATANZA)",
"state": "BUENOS AIRES",
"lat": -34.75,
"lon": -58.5833,
},
{
"zip": 1759,
"name": "BARRIO NAHUEL",
"state": "BUENOS AIRES",
"lat": -34.7675,
"lon": -58.6428,
},
{
"zip": 1761,
"name": "BARRIO EL SOL (PONTEVEDRA, PDO. MERLO",
"state": "BUENOS AIRES",
"lat": -34.7517,
"lon": -58.7117,
},
{
"zip": 1763,
"name": "BARRIO SAN IGNACIO (VIRREY DEL PINO, PDO. LA MATANZA)",
"state": "BUENOS AIRES",
"lat": -34.7172,
"lon": -58.6094,
},
{
"zip": 1765,
"name": "ISIDRO CASANOVA ESTAFETA No.6",
"state": "BUENOS AIRES",
"lat": -34.7,
"lon": -58.5833,
},
{
"zip": 1766,
"name": "BARRIO ALMAFUERTE (TABLADA, PDO. LA MATANZA)",
"state": "BUENOS AIRES",
"lat": -34.7,
"lon": -58.5333,
},
{
"zip": 1770,
"name": "TAPIALES",
"state": "BUENOS AIRES",
"lat": -34.7,
"lon": -58.5167,
},
{
"zip": 1772,
"name": "BARRIO VICENTE LOPEZ Y PLANES",
"state": "BUENOS AIRES",
"lat": -34.7083,
"lon": -58.5083,
},
{
"zip": 1773,
"name": "INGENIERO BUDGE",
"state": "BUENOS AIRES",
"lat": -34.7167,
"lon": -58.4667,
},
{
"zip": 1776,
"name": "9 DE ABRIL",
"state": "BUENOS AIRES",
"lat": -34.7083,
"lon": -58.5083,
},
{
"zip": 1778,
"name": "JUSTO VILLEGAS (PARADA FCGB.)",
"state": "BUENOS AIRES",
"lat": -34.7167,
"lon": -58.5333,
},
{
"zip": 1802,
"name": "COLONIA LOS CHIVATOS",
"state": "BUENOS AIRES",
"lat": -34.8133,
"lon": -58.4753,
},
{
"zip": 1804,
"name": "EZEIZA",
"state": "BUENOS AIRES",
"lat": -34.8381,
"lon": -58.5172,
},
{
"zip": 1806,
"name": "BARRIO LUJAN",
"state": "BUENOS AIRES",
"lat": -34.8428,
"lon": -58.5044,
},
{
"zip": 1807,
"name": "BARRIO STELLA MARIS",
"state": "BUENOS AIRES",
"lat": -34.8428,
"lon": -58.5044,
},
{
"zip": 1808,
"name": "ALEJANDRO PETION (APEADERO FCGR)",
"state": "BUENOS AIRES",
"lat": -34.8428,
"lon": -58.5044,
},
{
"zip": 1812,
"name": "CARLOS SPEGAZZINI",
"state": "BUENOS AIRES",
"lat": -34.9333,
"lon": -58.6167,
},
{
"zip": 1814,
"name": "LA NORIA",
"state": "BUENOS AIRES",
"lat": -35.1667,
"lon": -58.8,
},
{
"zip": 1815,
"name": "KILOMETRO 88 (APEADERO FCGR.,URIBELARREA, PDO.LOBOS)",
"state": "BUENOS AIRES",
"lat": -35.049,
"lon": -58.7221,
},
{
"zip": 1816,
"name": "LOS AROMOS",
"state": "BUENOS AIRES",
"lat": -35.049,
"lon": -58.7221,
},
{
"zip": 1822,
"name": "BARRIO MARIANO MORENO (VALENTIN ALSINA, PDO. LANUS)",
"state": "BUENOS AIRES",
"lat": -34.7,
"lon": -58.4417,
},
{
"zip": 1824,
"name": "LANUS ESTAFETA No.9",
"state": "BUENOS AIRES",
"lat": -34.7153,
"lon": -58.4078,
},
{
"zip": 1825,
"name": "A. A. FERNANDEZ",
"state": "BUENOS AIRES",
"lat": -34.7167,
"lon": -58.3333,
},
{
"zip": 1826,
"name": "REMEDIOS DE ESCALADA ESTAFETA N�1",
"state": "BUENOS AIRES",
"lat": -34.7333,
"lon": -58.3833,
},
{
"zip": 1828,
"name": "BARRIO VILLA MAIPU",
"state": "BUENOS AIRES",
"lat": -34.75,
"lon": -58.4,
},
{
"zip": 1832,
"name": "LOMAS DE ZAMORA",
"state": "BUENOS AIRES",
"lat": -34.7667,
"lon": -58.4,
},
{
"zip": 1834,
"name": "PASCO (APEADERO FCGB)",
"state": "BUENOS AIRES",
"lat": -34.7833,
"lon": -58.4,
},
{
"zip": 1836,
"name": "SANTA CATALINA (ESTACION FCDFS, LLAVALLOL, PDO. L.DE ZAMORA)",
"state": "BUENOS AIRES",
"lat": -34.7833,
"lon": -58.4333,
},
{
"zip": 1838,
"name": "LUIS GUILLON",
"state": "BUENOS AIRES",
"lat": -34.8,
"lon": -58.45,
},
{
"zip": 1842,
"name": "BARRIO VILLA MARIA",
"state": "BUENOS AIRES",
"lat": -34.815,
"lon": -58.4792,
},
{
"zip": 1846,
"name": "JOSE MARMOL",
"state": "BUENOS AIRES",
"lat": -34.7833,
"lon": -58.3833,
},
{
"zip": 1847,
"name": "BARRIO ZABALA",
"state": "BUENOS AIRES",
"lat": -34.8,
"lon": -58.3667,
},
{
"zip": 1848,
"name": "LAS MALVINAS (PARADA FCDFS.)",
"state": "BUENOS AIRES",
"lat": -34.795,
"lon": -58.3861,
},
{
"zip": 1849,
"name": "BARRIO LAS TUNAS",
"state": "BUENOS AIRES",
"lat": -34.8,
"lon": -58.3333,
},
{
"zip": 1852,
"name": "BARRIO DO�A SOL",
"state": "BUENOS AIRES",
"lat": -34.8333,
"lon": -58.3833,
},
{
"zip": 1854,
"name": "LONGCHAMPS",
"state": "BUENOS AIRES",
"lat": -34.8517,
"lon": -58.3964,
},
{
"zip": 1856,
"name": "GLEW",
"state": "BUENOS AIRES",
"lat": -34.8881,
"lon": -58.3842,
},
{
"zip": 1858,
"name": "LA BICHA",
"state": "BUENOS AIRES",
"lat": -34.8516,
"lon": -58.3868,
},
{
"zip": 1862,
"name": "BARRIO SANTA MAGDALENA",
"state": "BUENOS AIRES",
"lat": -34.9172,
"lon": -58.3869,
},
{
"zip": 1864,
"name": "ALEJANDRO KORN",
"state": "BUENOS AIRES",
"lat": -34.9828,
"lon": -58.3825,
},
{
"zip": 1865,
"name": "SAN VICENTE",
"state": "BUENOS AIRES",
"lat": -34.9667,
"lon": -58.3667,
},
{
"zip": 1870,
"name": "BULLRICH",
"state": "BUENOS AIRES",
"lat": -34.6506,
"lon": -58.3822,
},
{
"zip": 1871,
"name": "ISLA MACIEL",
"state": "BUENOS AIRES",
"lat": -34.6333,
"lon": -58.3667,
},
{
"zip": 1872,
"name": "SARANDI",
"state": "BUENOS AIRES",
"lat": -34.6667,
"lon": -58.3333,
},
{
"zip": 1874,
"name": "LAS QUINTAS (VILLA DOMINICO, PDO.AVELLANEDA)",
"state": "BUENOS AIRES",
"lat": -34.6813,
"lon": -58.3225,
},
{
"zip": 1876,
"name": "DON BOSCO",
"state": "BUENOS AIRES",
"lat": -34.7,
"lon": -58.2833,
},
{
"zip": 1878,
"name": "QUILMES",
"state": "BUENOS AIRES",
"lat": -34.7203,
"lon": -58.2694,
},
{
"zip": 1879,
"name": "BARRIO SAN DOMINGO",
"state": "BUENOS AIRES",
"lat": -34.6813,
"lon": -58.3225,
},
{
"zip": 1881,
"name": "SAN FRANCISCO SOLANO",
"state": "BUENOS AIRES",
"lat": -34.7833,
"lon": -58.3167,
},
{
"zip": 1882,
"name": "EZPELETA",
"state": "BUENOS AIRES",
"lat": -34.7667,
"lon": -58.25,
},
{
"zip": 1884,
"name": "BERAZATEGUI",
"state": "BUENOS AIRES",
"lat": -34.7333,
"lon": -58.2553,
},
{
"zip": 1885,
"name": "BARRIO SNIAFA",
"state": "BUENOS AIRES",
"lat": -34.7833,
"lon": -58.175,
},
{
"zip": 1886,
"name": "BARRIO MARITIMO",
"state": "BUENOS AIRES",
"lat": -34.8,
"lon": -58.2,
},
{
"zip": 1888,
"name": "GOBERNADOR MONTEVERDE",
"state": "BUENOS AIRES",
"lat": -34.8,
"lon": -58.2667,
},
{
"zip": 1889,
"name": "BARRIO RICARDO ROJAS (EL ROCIO, PDO. FLORENCIO VARELA)",
"state": "BUENOS AIRES",
"lat": -34.8167,
"lon": -58.2333,
},
{
"zip": 1890,
"name": "RUTA 2 KILOMETRO 32 AL 36,900",
"state": "BUENOS AIRES",
"lat": -34.8333,
"lon": -58.2,
},
{
"zip": 1891,
"name": "INGENIERO JUAN ALLAN",
"state": "BUENOS AIRES",
"lat": -34.8667,
"lon": -58.0911,
},
{
"zip": 1893,
"name": "CENTRO AGRICOLA EL PATO",
"state": "BUENOS AIRES",
"lat": -34.8667,
"lon": -58.0911,
},
{
"zip": 1894,
"name": "DOCTOR RICARDO LEVENE",
"state": "BUENOS AIRES",
"lat": -34.8667,
"lon": -58.0911,
},
{
"zip": 1895,
"name": "ARTURO SEGUI",
"state": "BUENOS AIRES",
"lat": -34.8781,
"lon": -58.1358,
},
{
"zip": 1896,
"name": "CITY BELL",
"state": "BUENOS AIRES",
"lat": -34.8589,
"lon": -58.0542,
},
{
"zip": 1897,
"name": "MANUEL B. GONNET ESTAFETA No.2 REPUBLICA DE LOS NI�OS",
"state": "BUENOS AIRES",
"lat": -34.8644,
"lon": -58.0178,
},
{
"zip": 1900,
"name": "LA PLATA ESTAFETA No.9",
"state": "BUENOS AIRES",
"lat": -34.9314,
"lon": -57.9489,
},
{
"zip": 1901,
"name": "LISANDRO OLMOS",
"state": "BUENOS AIRES",
"lat": -34.9833,
"lon": -58.0333,
},
{
"zip": 1903,
"name": "SAN PONCIANO",
"state": "BUENOS AIRES",
"lat": -34.946,
"lon": -58.0964,
},
{
"zip": 1905,
"name": "KILOMETRO 82 (APEADERO FCGR., POBLET, PDO.LA PLATA)",
"state": "BUENOS AIRES",
"lat": -35.175,
"lon": -57.9083,
},
{
"zip": 1907,
"name": "NUEVA HERMOSURA",
"state": "BUENOS AIRES",
"lat": -35.0041,
"lon": -58.0001,
},
{
"zip": 1909,
"name": "IGNACIO CORREAS",
"state": "BUENOS AIRES",
"lat": -35.0333,
"lon": -57.85,
},
{
"zip": 1911,
"name": "GENERAL MANSILLA",
"state": "BUENOS AIRES",
"lat": -35.0833,
"lon": -57.75,
},
{
"zip": 1913,
"name": "JULIO ARDITI",
"state": "BUENOS AIRES",
"lat": -35.1333,
"lon": -57.65,
},
{
"zip": 1915,
"name": "KILOMETRO 103 (APEADERO FCGR)",
"state": "BUENOS AIRES",
"lat": -35.1667,
"lon": -57.65,
},
{
"zip": 1917,
"name": "MONTE VELOZ",
"state": "BUENOS AIRES",
"lat": -35.45,
"lon": -57.2833,
},
{
"zip": 1919,
"name": "BASE AERONAVAL DE PUNTA INDIO",
"state": "BUENOS AIRES",
"lat": -35.1714,
"lon": -57.519,
},
{
"zip": 1921,
"name": "LAS TAHONAS",
"state": "BUENOS AIRES",
"lat": -35.35,
"lon": -57.3833,
},
{
"zip": 1923,
"name": "BERISSO",
"state": "BUENOS AIRES",
"lat": -34.8667,
"lon": -57.8833,
},
{
"zip": 1925,
"name": "DESTILERIA FISCAL",
"state": "BUENOS AIRES",
"lat": -34.8758,
"lon": -57.8822,
},
{
"zip": 1927,
"name": "ESCUELA NAVAL MILITAR RIO SANTIAGO",
"state": "BUENOS AIRES",
"lat": -35.246,
"lon": -57.547,
},
{
"zip": 1929,
"name": "ISLA SANTIAGO",
"state": "BUENOS AIRES",
"lat": -35.246,
"lon": -57.547,
},
{
"zip": 1931,
"name": "PIRIA",
"state": "BUENOS AIRES",
"lat": -34.8167,
"lon": -57.9833,
},
{
"zip": 1980,
"name": "BARRIO LA DOLLY",
"state": "BUENOS AIRES",
"lat": -35.1942,
"lon": -58.2567,
},
{
"zip": 1981,
"name": "OLIDEN",
"state": "BUENOS AIRES",
"lat": -35.1833,
"lon": -57.95,
},
{
"zip": 1983,
"name": "GOMEZ DE LA VEGA",
"state": "BUENOS AIRES",
"lat": -35.0833,
"lon": -58.15,
},
{
"zip": 1984,
"name": "DOMSELAAR",
"state": "BUENOS AIRES",
"lat": -35.0667,
"lon": -58.3,
},
{
"zip": 1986,
"name": "JEPPENER",
"state": "BUENOS AIRES",
"lat": -35.2833,
"lon": -58.2,
},
{
"zip": 1987,
"name": "GENERAL PAZ",
"state": "BUENOS AIRES",
"lat": -35.5236,
"lon": -58.2956,
},
{
"zip": 2113,
"name": "ARROYO DEL MEDIO",
"state": "BUENOS AIRES",
"lat": -33.5833,
"lon": -60.7833,
},
{
"zip": 2346,
"name": "ESTINGANA",
"state": "BUENOS AIRES",
"lat": -30.1987,
"lon": -61.9244,
},
{
"zip": 2700,
"name": "FONTEZUELA",
"state": "BUENOS AIRES",
"lat": -33.9167,
"lon": -60.4667,
},
{
"zip": 2701,
"name": "MARIANO BENITEZ",
"state": "BUENOS AIRES",
"lat": -33.7167,
"lon": -60.5833,
},
{
"zip": 2703,
"name": "ROBERTO CANO",
"state": "BUENOS AIRES",
"lat": -34.0833,
"lon": -60.6667,
},
{
"zip": 2705,
"name": "ROJAS",
"state": "BUENOS AIRES",
"lat": -34.1953,
"lon": -60.735,
},
{
"zip": 2707,
"name": "HUNTER",
"state": "BUENOS AIRES",
"lat": -34.2333,
"lon": -60.5833,
},
{
"zip": 2709,
"name": "LOS INDIOS (PDO. ROJAS)",
"state": "BUENOS AIRES",
"lat": -33.9805,
"lon": -60.6348,
},
{
"zip": 2711,
"name": "PEARSON",
"state": "BUENOS AIRES",
"lat": -33.65,
"lon": -60.8833,
},
{
"zip": 2715,
"name": "LA VANGUARDIA",
"state": "BUENOS AIRES",
"lat": -33.7333,
"lon": -60.8,
},
{
"zip": 2717,
"name": "JUAN A. DE LA PE�A",
"state": "BUENOS AIRES",
"lat": -33.8333,
"lon": -60.4833,
},
{
"zip": 2718,
"name": "LIERRA ADJEMIRO",
"state": "BUENOS AIRES",
"lat": -33.9333,
"lon": -60.5667,
},
{
"zip": 2720,
"name": "HARAS EL PELADO",
"state": "BUENOS AIRES",
"lat": -33.8914,
"lon": -61.1097,
},
{
"zip": 2721,
"name": "SARASA",
"state": "BUENOS AIRES",
"lat": -34.0333,
"lon": -61.2,
},
{
"zip": 2723,
"name": "ESTANCIA LAS GAMAS",
"state": "BUENOS AIRES",
"lat": -33.7167,
"lon": -61.05,
},
{
"zip": 2740,
"name": "ARRECIFES",
"state": "BUENOS AIRES",
"lat": -34.05,
"lon": -60.1167,
},
{
"zip": 2741,
"name": "LAS CUATRO PUERTAS",
"state": "BUENOS AIRES",
"lat": -34.2989,
"lon": -60.2419,
},
{
"zip": 2743,
"name": "TACUARI",
"state": "BUENOS AIRES",
"lat": -34.2167,
"lon": -60.3167,
},
{
"zip": 2745,
"name": "LA INVENCIBLE",
"state": "BUENOS AIRES",
"lat": -34.2667,
"lon": -60.3833,
},
{
"zip": 2747,
"name": "INES INDART",
"state": "BUENOS AIRES",
"lat": -34.4,
"lon": -60.55,
},
{
"zip": 2751,
"name": "ALMACEN CASTRO",
"state": "BUENOS AIRES",
"lat": -33.7333,
"lon": -60.1667,
},
{
"zip": 2752,
"name": "RETIRO SAN PABLO",
"state": "BUENOS AIRES",
"lat": -34.15,
"lon": -59.8833,
},
{
"zip": 2754,
"name": "CAMPO CRISOL",
"state": "BUENOS AIRES",
"lat": -34.0111,
"lon": -59.9778,
},
{
"zip": 2760,
"name": "SAN ANTONIO DE ARECO",
"state": "BUENOS AIRES",
"lat": -34.25,
"lon": -59.4667,
},
{
"zip": 2761,
"name": "SANTA COLOMA",
"state": "BUENOS AIRES",
"lat": -34.0667,
"lon": -59.5667,
},
{
"zip": 2763,
"name": "PUESTO DEL MEDIO",
"state": "BUENOS AIRES",
"lat": -34.2083,
"lon": -59.4222,
},
{
"zip": 2764,
"name": "CHENAUT",
"state": "BUENOS AIRES",
"lat": -34.25,
"lon": -59.2167,
},
{
"zip": 2800,
"name": "ARROYO BOTIJA FALSA",
"state": "BUENOS AIRES",
"lat": -34.063,
"lon": -59.0531,
},
{
"zip": 2801,
"name": "ALTO VERDE",
"state": "BUENOS AIRES",
"lat": -34.1667,
"lon": -59.1167,
},
{
"zip": 2802,
"name": "OTAMENDI",
"state": "BUENOS AIRES",
"lat": -34.2342,
"lon": -58.8642,
},
{
"zip": 2804,
"name": "CAMPANA",
"state": "BUENOS AIRES",
"lat": -34.1769,
"lon": -58.9208,
},
{
"zip": 2805,
"name": "ARROYO TAJIBER",
"state": "BUENOS AIRES",
"lat": -33.7833,
"lon": -58.9167,
},
{"zip": 2806, "name": "LIMA", "state": "BUENOS AIRES", "lat": -34.05, "lon": -59.2},
{
"zip": 2808,
"name": "ATUCHA",
"state": "BUENOS AIRES",
"lat": -33.9667,
"lon": -59.3,
},
{
"zip": 2812,
"name": "CAPILLA",
"state": "BUENOS AIRES",
"lat": -34.3,
"lon": -59.1,
},
{
"zip": 2813,
"name": "ARROYO DE LA CRUZ",
"state": "BUENOS AIRES",
"lat": -34.3333,
"lon": -59.1167,
},
{
"zip": 2814,
"name": "LOS CARDALES",
"state": "BUENOS AIRES",
"lat": -34.3394,
"lon": -59.0133,
},
{
"zip": 2900,
"name": "SAN NICOLAS SUCURSAL N�1",
"state": "BUENOS AIRES",
"lat": -33.3303,
"lon": -60.2269,
},
{
"zip": 2901,
"name": "LA EMILIA (PDO. SAN NICOLAS)",
"state": "BUENOS AIRES",
"lat": -33.4961,
"lon": -60.2787,
},
{
"zip": 2903,
"name": "CAMPOS SALLES",
"state": "BUENOS AIRES",
"lat": -33.4,
"lon": -60.2333,
},
{
"zip": 2905,
"name": "COLONIA LA ALICIA",
"state": "BUENOS AIRES",
"lat": -33.4667,
"lon": -60.2833,
},
{
"zip": 2907,
"name": "INGENIERO URCELAY (ESTACION FCGB)",
"state": "BUENOS AIRES",
"lat": -33.6417,
"lon": -60.325,
},
{
"zip": 2909,
"name": "JUAN G. PUJOL",
"state": "BUENOS AIRES",
"lat": -33.4961,
"lon": -60.2787,
},
{
"zip": 2912,
"name": "SANTA TERESA (SANCHEZ, PDO.RAMALLO)",
"state": "BUENOS AIRES",
"lat": -33.4333,
"lon": -60.15,
},
{
"zip": 2914,
"name": "COSTA BRAVA",
"state": "BUENOS AIRES",
"lat": -33.3298,
"lon": -60.2674,
},
{
"zip": 2915,
"name": "RAMALLO",
"state": "BUENOS AIRES",
"lat": -33.4786,
"lon": -60.0208,
},
{
"zip": 2916,
"name": "HARAS EL OMBU",
"state": "BUENOS AIRES",
"lat": -33.3298,
"lon": -60.2674,
},
{
"zip": 2930,
"name": "SAN PEDRO",
"state": "BUENOS AIRES",
"lat": -33.6706,
"lon": -59.6828,
},
{
"zip": 2931,
"name": "ISLA LOS LAURELES",
"state": "BUENOS AIRES",
"lat": -33.5833,
"lon": -59.8167,
},
{
"zip": 2933,
"name": "LA BOLSA",
"state": "BUENOS AIRES",
"lat": -33.7667,
"lon": -59.9833,
},
{
"zip": 2935,
"name": "EL DESCANSO",
"state": "BUENOS AIRES",
"lat": -33.8917,
"lon": -59.8083,
},
{
"zip": 2938,
"name": "ALSINA",
"state": "BUENOS AIRES",
"lat": -33.9,
"lon": -59.3833,
},
{
"zip": 2942,
"name": "ESTACION BARADERO",
"state": "BUENOS AIRES",
"lat": -33.7967,
"lon": -59.5208,
},
{
"zip": 2943,
"name": "IRENEO PORTELA",
"state": "BUENOS AIRES",
"lat": -33.9667,
"lon": -59.6667,
},
{
"zip": 2944,
"name": "RIO TALA",
"state": "BUENOS AIRES",
"lat": -33.75,
"lon": -59.6333,
},
{
"zip": 2946,
"name": "KILOMETRO 184 (APEADERO FCGM.)",
"state": "BUENOS AIRES",
"lat": -33.65,
"lon": -59.8833,
},
{
"zip": 3403,
"name": "SANTOS LUGARES",
"state": "BUENOS AIRES",
"lat": -27.5619,
"lon": -58.3024,
},
{
"zip": 3445,
"name": "COSTA BONITA",
"state": "BUENOS AIRES",
"lat": -28.6867,
"lon": -58.6833,
},
{
"zip": 3713,
"name": "MANUEL OCAMPO",
"state": "BUENOS AIRES",
"lat": -26.1625,
"lon": -62.2222,
},
{
"zip": 6000,
"name": "JUNIN ESTAFETA No.3",
"state": "BUENOS AIRES",
"lat": -34.585,
"lon": -60.9589,
},
{
"zip": 6001,
"name": "RAFAEL OBLIGADO",
"state": "BUENOS AIRES",
"lat": -34.35,
"lon": -60.8,
},
{
"zip": 6003,
"name": "ESCUELA AGRICOLA SALESIANA (C. G. UNZUE)",
"state": "BUENOS AIRES",
"lat": -34.2944,
"lon": -60.7917,
},
{
"zip": 6005,
"name": "LA HUAYQUERIA",
"state": "BUENOS AIRES",
"lat": -34.2722,
"lon": -61.2722,
},
{
"zip": 6007,
"name": "COLONIA LOS HORNOS",
"state": "BUENOS AIRES",
"lat": -34.2111,
"lon": -61.3222,
},
{
"zip": 6013,
"name": "LAPLACETTE",
"state": "BUENOS AIRES",
"lat": -34.7167,
"lon": -61.1667,
},
{
"zip": 6015,
"name": "LOS HUESOS",
"state": "BUENOS AIRES",
"lat": -34.995,
"lon": -61.0367,
},
{
"zip": 6017,
"name": "LA DELFINA",
"state": "BUENOS AIRES",
"lat": -34.9333,
"lon": -61.1667,
},
{
"zip": 6018,
"name": "COLONIA LOS HUESOS",
"state": "BUENOS AIRES",
"lat": -34.8667,
"lon": -61.15,
},
{
"zip": 6022,
"name": "LA ORIENTAL",
"state": "BUENOS AIRES",
"lat": -34.5667,
"lon": -60.8167,
},
{
"zip": 6023,
"name": "LEANDRO N.ALEM",
"state": "BUENOS AIRES",
"lat": -34.5,
"lon": -61.4,
},
{
"zip": 6030,
"name": "EDMUNDO B. PERKINS (ESTACION FCGSM)",
"state": "BUENOS AIRES",
"lat": -34.4667,
"lon": -61.6667,
},
{
"zip": 6031,
"name": "DESVIO EL CHINGOLO",
"state": "BUENOS AIRES",
"lat": -34.4389,
"lon": -61.5444,
},
{
"zip": 6032,
"name": "BLANDENGUES",
"state": "BUENOS AIRES",
"lat": -34.5333,
"lon": -61.2667,
},
{
"zip": 6034,
"name": "JUAN BAUTISTA ALBERDI",
"state": "BUENOS AIRES",
"lat": -34.4333,
"lon": -61.8,
},
{
"zip": 6042,
"name": "IRIARTE",
"state": "BUENOS AIRES",
"lat": -34.4167,
"lon": -61.9167,
},
{
"zip": 6050,
"name": "DUSSAUD",
"state": "BUENOS AIRES",
"lat": -34.7833,
"lon": -61.7667,
},
{
"zip": 6051,
"name": "PICHINCHA",
"state": "BUENOS AIRES",
"lat": -34.5667,
"lon": -62.35,
},
{
"zip": 6053,
"name": "EL PEREGRINO",
"state": "BUENOS AIRES",
"lat": -34.575,
"lon": -61.9833,
},
{
"zip": 6058,
"name": "PAZOS KANKI",
"state": "BUENOS AIRES",
"lat": -34.7667,
"lon": -62.0833,
},
{
"zip": 6062,
"name": "LOS CALLEJONES (ESTACION FCGM.)",
"state": "BUENOS AIRES",
"lat": -34.7833,
"lon": -62.2,
},
{
"zip": 6063,
"name": "PORVENIR",
"state": "BUENOS AIRES",
"lat": -34.95,
"lon": -62.2333,
},
{
"zip": 6064,
"name": "SALALE",
"state": "BUENOS AIRES",
"lat": -34.825,
"lon": -62.4,
},
{
"zip": 6065,
"name": "BLAQUIER",
"state": "BUENOS AIRES",
"lat": -34.6333,
"lon": -62.4833,
},
{
"zip": 6070,
"name": "KILOMETRO 321 (APEADERO FCDFS.)",
"state": "BUENOS AIRES",
"lat": -34.8325,
"lon": -61.5993,
},
{
"zip": 6071,
"name": "SANTA MARIA",
"state": "BUENOS AIRES",
"lat": -33.8,
"lon": -60.1833,
},
{
"zip": 6073,
"name": "FORTIN VIGILANCIA",
"state": "BUENOS AIRES",
"lat": -35.1167,
"lon": -61.6833,
},
{
"zip": 6075,
"name": "HARAS TRUJUI",
"state": "BUENOS AIRES",
"lat": -35.0583,
"lon": -61.8583,
},
{
"zip": 6077,
"name": "NECOL (ESTACION FCGM)",
"state": "BUENOS AIRES",
"lat": -35.125,
"lon": -62.3083,
},
{
"zip": 6078,
"name": "BAYAUCA",
"state": "BUENOS AIRES",
"lat": -34.85,
"lon": -61.3,
},
{
"zip": 6105,
"name": "CA�ADA SECA",
"state": "BUENOS AIRES",
"lat": -34.4333,
"lon": -62.95,
},
{
"zip": 6223,
"name": "FERNANDO MARTI",
"state": "BUENOS AIRES",
"lat": -34.6667,
"lon": -63.3667,
},
{
"zip": 6230,
"name": "GENERAL VILLEGAS ESTAFETA No.1",
"state": "BUENOS AIRES",
"lat": -34.9917,
"lon": -62.9583,
},
{
"zip": 6231,
"name": "CUENCA",
"state": "BUENOS AIRES",
"lat": -35.2926,
"lon": -62.9981,
},
{
"zip": 6233,
"name": "SANSINENA",
"state": "BUENOS AIRES",
"lat": -35.2667,
"lon": -63.2,
},
{
"zip": 6237,
"name": "AMERICA",
"state": "BUENOS AIRES",
"lat": -35.4583,
"lon": -62.8833,
},
{
"zip": 6239,
"name": "GONZALEZ MORENO",
"state": "BUENOS AIRES",
"lat": -35.55,
"lon": -63.3667,
},
{
"zip": 6241,
"name": "EMILIO V. BUNGE",
"state": "BUENOS AIRES",
"lat": -34.7667,
"lon": -63.2,
},
{
"zip": 6242,
"name": "LOS CALDENES",
"state": "BUENOS AIRES",
"lat": -34.975,
"lon": -62.95,
},
{
"zip": 6244,
"name": "BANDERALO",
"state": "BUENOS AIRES",
"lat": -35,
"lon": -63.35,
},
{
"zip": 6332,
"name": "BENITEZ",
"state": "BUENOS AIRES",
"lat": -36.4167,
"lon": -63.287,
},
{
"zip": 6335,
"name": "GRACIARENA (PDO. SALLIQUELO)",
"state": "BUENOS AIRES",
"lat": -36.5833,
"lon": -63.1,
},
{
"zip": 6337,
"name": "MARIA P. MORENO",
"state": "BUENOS AIRES",
"lat": -36.6333,
"lon": -62.9167,
},
{
"zip": 6338,
"name": "LEUBUCO",
"state": "BUENOS AIRES",
"lat": -36.8833,
"lon": -63.0333,
},
{
"zip": 6339,
"name": "SALLIQUELO",
"state": "BUENOS AIRES",
"lat": -36.75,
"lon": -62.9333,
},
{
"zip": 6341,
"name": "LA PALA",
"state": "BUENOS AIRES",
"lat": -36.7,
"lon": -63.3667,
},
{
"zip": 6343,
"name": "MAZA",
"state": "BUENOS AIRES",
"lat": -36.8333,
"lon": -63.3167,
},
{
"zip": 6346,
"name": "PELLEGRINI",
"state": "BUENOS AIRES",
"lat": -36.2667,
"lon": -63.15,
},
{
"zip": 6348,
"name": "BOCAYUVA",
"state": "BUENOS AIRES",
"lat": -36.2167,
"lon": -63.0667,
},
{
"zip": 6400,
"name": "MARTIN FIERRO (PDO.TRENQUE LAUQUEN)",
"state": "BUENOS AIRES",
"lat": -36.0389,
"lon": -62.9389,
},
{
"zip": 6401,
"name": "SUNBLAD",
"state": "BUENOS AIRES",
"lat": -36.0819,
"lon": -62.7528,
},
{
"zip": 6403,
"name": "FORTIN OLAVARRIA",
"state": "BUENOS AIRES",
"lat": -35.7,
"lon": -63,
},
{
"zip": 6405,
"name": "CORAZZI",
"state": "BUENOS AIRES",
"lat": -36.2,
"lon": -62.4,
},
{
"zip": 6407,
"name": "LA PORTE�A (PDO.TRENQUE LAUQUEN)",
"state": "BUENOS AIRES",
"lat": -36.3667,
"lon": -62.35,
},
{
"zip": 6409,
"name": "JOSE MARIA BLANCO",
"state": "BUENOS AIRES",
"lat": -36.45,
"lon": -62.85,
},
{
"zip": 6411,
"name": "BRAVO DEL DOS",
"state": "BUENOS AIRES",
"lat": -36.55,
"lon": -62.5833,
},
{
"zip": 6417,
"name": "FORTIN PAUNERO",
"state": "BUENOS AIRES",
"lat": -36.7917,
"lon": -62.5167,
},
{
"zip": 6422,
"name": "PRIMERA JUNTA",
"state": "BUENOS AIRES",
"lat": -35.9167,
"lon": -62.6333,
},
{
"zip": 6424,
"name": "SAN RAMON (BERUTTI, PDO. TRENQUE LAUQUEN)",
"state": "BUENOS AIRES",
"lat": -35.8667,
"lon": -62.5,
},
{
"zip": 6430,
"name": "ROLITO (ESTACION FCGB)",
"state": "BUENOS AIRES",
"lat": -37.1111,
"lon": -62.7667,
},
{
"zip": 6431,
"name": "ESTACION LAGO EPECUEN",
"state": "BUENOS AIRES",
"lat": -37.1333,
"lon": -62.8,
},
{
"zip": 6433,
"name": "ARTURO VATTEONE",
"state": "BUENOS AIRES",
"lat": -37.2,
"lon": -62.8333,
},
{
"zip": 6435,
"name": "LAGUNA DEL MONTE (GUAMINI, PDO.GUAMINI)",
"state": "BUENOS AIRES",
"lat": -37.0333,
"lon": -62.4167,
},
{
"zip": 6437,
"name": "LA GREGORIA",
"state": "BUENOS AIRES",
"lat": -37,
"lon": -62.45,
},
{
"zip": 6438,
"name": "MASUREL",
"state": "BUENOS AIRES",
"lat": -36.975,
"lon": -62.4889,
},
{
"zip": 6439,
"name": "LAGUNA ALSINA",
"state": "BUENOS AIRES",
"lat": -36.8167,
"lon": -62.2167,
},
{
"zip": 6441,
"name": "COLONIA BARON HIRSCH",
"state": "BUENOS AIRES",
"lat": -37.2,
"lon": -63.2333,
},
{
"zip": 6443,
"name": "MALABIA",
"state": "BUENOS AIRES",
"lat": -37.0833,
"lon": -63.2667,
},
{
"zip": 6450,
"name": "PEHUAJO ESTAFETA N�2",
"state": "BUENOS AIRES",
"lat": -35.7074,
"lon": -61.961,
},
{
"zip": 6451,
"name": "LOS INDIOS (CURARU, PDO. CARLOS TEJEDOR)",
"state": "BUENOS AIRES",
"lat": -35.7648,
"lon": -61.9167,
},
{
"zip": 6453,
"name": "LA PRADERA",
"state": "BUENOS AIRES",
"lat": -35.3667,
"lon": -61.8917,
},
{
"zip": 6455,
"name": "HUSARES",
"state": "BUENOS AIRES",
"lat": -35.5167,
"lon": -62.3333,
},
{
"zip": 6457,
"name": "KILOMETRO 386 (APEADERO FCDFS.)",
"state": "BUENOS AIRES",
"lat": -35.5,
"lon": -62.2333,
},
{
"zip": 6459,
"name": "SANTA INES (PDO. CARLOS TEJEDOR)",
"state": "BUENOS AIRES",
"lat": -35.4333,
"lon": -62.7,
},
{
"zip": 6461,
"name": "LA COTORRA",
"state": "BUENOS AIRES",
"lat": -36.0333,
"lon": -62.2333,
},
{
"zip": 6463,
"name": "ALAGON",
"state": "BUENOS AIRES",
"lat": -35.95,
"lon": -61.9667,
},
{
"zip": 6465,
"name": "HENDERSON",
"state": "BUENOS AIRES",
"lat": -36.3,
"lon": -61.7167,
},
{
"zip": 6467,
"name": "KILOMETRO 393 (APEADERO FCDFS.)",
"state": "BUENOS AIRES",
"lat": -36.2917,
"lon": -61.7333,
},
{
"zip": 6469,
"name": "MONES CAZON",
"state": "BUENOS AIRES",
"lat": -36.2333,
"lon": -62,
},
{
"zip": 6471,
"name": "MOURAS",
"state": "BUENOS AIRES",
"lat": -36.3167,
"lon": -62.2167,
},
{
"zip": 6472,
"name": "FRANCISCO MADERO",
"state": "BUENOS AIRES",
"lat": -35.8667,
"lon": -62.05,
},
{
"zip": 6474,
"name": "EL RECADO",
"state": "BUENOS AIRES",
"lat": -35.7833,
"lon": -62.25,
},
{
"zip": 6475,
"name": "LA HIGUERA",
"state": "BUENOS AIRES",
"lat": -35.65,
"lon": -62.4167,
},
{
"zip": 6476,
"name": "PUEBLO VILLA SAN ESTEBAN",
"state": "BUENOS AIRES",
"lat": -35.6889,
"lon": -61.6944,
},
{"zip": 6500, "name": "FAUZON", "state": "BUENOS AIRES", "lat": -35.4, "lon": -61},
{
"zip": 6501,
"name": "LAGUNA DEL CURA",
"state": "BUENOS AIRES",
"lat": -35.55,
"lon": -60.8083,
},
{
"zip": 6503,
"name": "KILOMETRO 234",
"state": "BUENOS AIRES",
"lat": -35.45,
"lon": -60.7,
},
{
"zip": 6505,
"name": "DUDIGNAC",
"state": "BUENOS AIRES",
"lat": -35.65,
"lon": -60.7333,
},
{
"zip": 6507,
"name": "LAS NEGRAS",
"state": "BUENOS AIRES",
"lat": -35.6667,
"lon": -60.725,
},
{
"zip": 6508,
"name": "STROEDER",
"state": "BUENOS AIRES",
"lat": -35.6167,
"lon": -60.7667,
},
{
"zip": 6509,
"name": "ESCUELA AGRICOLA SALESIANA",
"state": "BUENOS AIRES",
"lat": -35.9,
"lon": -60.7167,
},
{"zip": 6511, "name": "HUETEL", "state": "BUENOS AIRES", "lat": -36, "lon": -60.85},
{
"zip": 6513,
"name": "COLONIA LAS YESCAS",
"state": "BUENOS AIRES",
"lat": -35.45,
"lon": -61.1833,
},
{
"zip": 6515,
"name": "CARLOS MARIA NAON",
"state": "BUENOS AIRES",
"lat": -35.2333,
"lon": -60.8333,
},
{
"zip": 6516,
"name": "BACACAY",
"state": "BUENOS AIRES",
"lat": -35.45,
"lon": -60.9333,
},
{
"zip": 6530,
"name": "SAN JUAN DE NELSON",
"state": "BUENOS AIRES",
"lat": -35.6549,
"lon": -61.4385,
},
{
"zip": 6531,
"name": "COLONIA LA ESPERANZA",
"state": "BUENOS AIRES",
"lat": -35.509,
"lon": -61.4542,
},
{
"zip": 6533,
"name": "QUIROGA",
"state": "BUENOS AIRES",
"lat": -35.2814,
"lon": -61.4192,
},
{
"zip": 6535,
"name": "BELLOCQ",
"state": "BUENOS AIRES",
"lat": -35.9167,
"lon": -61.5333,
},
{
"zip": 6537,
"name": "EL CARPINCHO",
"state": "BUENOS AIRES",
"lat": -35.9083,
"lon": -61.2167,
},
{
"zip": 6538,
"name": "LA DORITA",
"state": "BUENOS AIRES",
"lat": -35.6,
"lon": -61.25,
},
{
"zip": 6550,
"name": "BOLIVAR",
"state": "BUENOS AIRES",
"lat": -36.25,
"lon": -61.1,
},
{
"zip": 6551,
"name": "JUAN F. IBARRA",
"state": "BUENOS AIRES",
"lat": -36.35,
"lon": -61.25,
},
{
"zip": 6553,
"name": "NUEVA ESPA�A",
"state": "BUENOS AIRES",
"lat": -36.45,
"lon": -61.4333,
},
{
"zip": 6555,
"name": "ALFALAD",
"state": "BUENOS AIRES",
"lat": -36.6167,
"lon": -61.9333,
},
{
"zip": 6557,
"name": "MAPIS",
"state": "BUENOS AIRES",
"lat": -36.7833,
"lon": -61.3,
},
{
"zip": 6559,
"name": "RECALDE",
"state": "BUENOS AIRES",
"lat": -36.65,
"lon": -61.0833,
},
{
"zip": 6561,
"name": "SAN BERNARDO (PDO. TAPALQUE)",
"state": "BUENOS AIRES",
"lat": -36.4167,
"lon": -60.6667,
},
{
"zip": 6567,
"name": "COLONIA INCHAUSTI",
"state": "BUENOS AIRES",
"lat": -36.4167,
"lon": -60.6667,
},
{
"zip": 6600,
"name": "MERCEDES",
"state": "BUENOS AIRES",
"lat": -34.6544,
"lon": -59.4344,
},
{
"zip": 6601,
"name": "ESPORA",
"state": "BUENOS AIRES",
"lat": -34.5333,
"lon": -59.6,
},
{
"zip": 6603,
"name": "KILOMETRO 117 (APEADERO FCGB.)",
"state": "BUENOS AIRES",
"lat": -34.9167,
"lon": -59.5667,
},
{
"zip": 6604,
"name": "LA VERDE (PDO.MERCEDES)",
"state": "BUENOS AIRES",
"lat": -34.7865,
"lon": -59.3765,
},
{
"zip": 6605,
"name": "KILOMETRO 83",
"state": "BUENOS AIRES",
"lat": -34.9417,
"lon": -59.2417,
},
{
"zip": 6607,
"name": "ESTEBAN DIAZ",
"state": "BUENOS AIRES",
"lat": -35.0417,
"lon": -59.4583,
},
{
"zip": 6608,
"name": "OLIVERA",
"state": "BUENOS AIRES",
"lat": -34.6333,
"lon": -59.25,
},
{
"zip": 6612,
"name": "SUIPACHA",
"state": "BUENOS AIRES",
"lat": -34.7781,
"lon": -59.6889,
},
{
"zip": 6614,
"name": "RIVAS",
"state": "BUENOS AIRES",
"lat": -34.6167,
"lon": -59.7667,
},
{
"zip": 6616,
"name": "LA CALIFORNA ARGENTINA",
"state": "BUENOS AIRES",
"lat": -34.6,
"lon": -59.9,
},
{
"zip": 6620,
"name": "CHIVILCOY ESTAFETA No.3",
"state": "BUENOS AIRES",
"lat": -34.9083,
"lon": -60.0306,
},
{
"zip": 6621,
"name": "ANDERSON",
"state": "BUENOS AIRES",
"lat": -35.2833,
"lon": -60.2667,
},
{
"zip": 6623,
"name": "SAN SEBASTIAN",
"state": "BUENOS AIRES",
"lat": -34.95,
"lon": -59.7167,
},
{
"zip": 6625,
"name": "CA�ADA LA RICA",
"state": "BUENOS AIRES",
"lat": -35.0072,
"lon": -60.0531,
},
{
"zip": 6627,
"name": "MOLL",
"state": "BUENOS AIRES",
"lat": -35.0667,
"lon": -59.65,
},
{
"zip": 6628,
"name": "CORONEL MOM",
"state": "BUENOS AIRES",
"lat": -34.8333,
"lon": -60.3,
},
{
"zip": 6632,
"name": "GOROSTIAGA",
"state": "BUENOS AIRES",
"lat": -34.8167,
"lon": -59.8667,
},
{
"zip": 6634,
"name": "ANDRES VACCAREZZA",
"state": "BUENOS AIRES",
"lat": -35.0917,
"lon": -60.2833,
},
{
"zip": 6640,
"name": "LA MARIA",
"state": "BUENOS AIRES",
"lat": -35.1675,
"lon": -60.4182,
},
{
"zip": 6641,
"name": "COMODORO PY",
"state": "BUENOS AIRES",
"lat": -35.3167,
"lon": -60.5167,
},
{
"zip": 6643,
"name": "SAN JOSE (BAUDRIX, PDO. ALBERTI)",
"state": "BUENOS AIRES",
"lat": -35.275,
"lon": -60.325,
},
{
"zip": 6645,
"name": "LA LIMPIA (PDO. BRAGADO)",
"state": "BUENOS AIRES",
"lat": -35.0667,
"lon": -60.7167,
},
{
"zip": 6646,
"name": "COLONIA SAN EDUARDO",
"state": "BUENOS AIRES",
"lat": -34.9,
"lon": -60.75,
},
{
"zip": 6648,
"name": "MECHA",
"state": "BUENOS AIRES",
"lat": -35.0667,
"lon": -60.4,
},
{
"zip": 6652,
"name": "OLASCOAGA",
"state": "BUENOS AIRES",
"lat": -35.2,
"lon": -60.6,
},
{
"zip": 6660,
"name": "ORTIZ DE ROSAS",
"state": "BUENOS AIRES",
"lat": -35.4,
"lon": -60.3833,
},
{
"zip": 6661,
"name": "LUCAS MONTEVERDE",
"state": "BUENOS AIRES",
"lat": -35.5,
"lon": -59.9833,
},
{
"zip": 6663,
"name": "JUAN VELA",
"state": "BUENOS AIRES",
"lat": -35.2667,
"lon": -59.7667,
},
{
"zip": 6665,
"name": "LA GLORIA",
"state": "BUENOS AIRES",
"lat": -35.2583,
"lon": -59.6083,
},
{
"zip": 6667,
"name": "LA RABIA",
"state": "BUENOS AIRES",
"lat": -35.4417,
"lon": -60.15,
},
{
"zip": 6700,
"name": "CUARTEL IV",
"state": "BUENOS AIRES",
"lat": -34.5703,
"lon": -59.105,
},
{
"zip": 6701,
"name": "CARLOS KEEN",
"state": "BUENOS AIRES",
"lat": -34.4833,
"lon": -59.2333,
},
{
"zip": 6703,
"name": "PARADA ROBLES",
"state": "BUENOS AIRES",
"lat": -34.4333,
"lon": -59.0833,
},
{
"zip": 6706,
"name": "JAUREGUI",
"state": "BUENOS AIRES",
"lat": -34.6,
"lon": -59.1667,
},
{
"zip": 6708,
"name": "MARISCAL SUCRE",
"state": "BUENOS AIRES",
"lat": -34.5,
"lon": -59.1,
},
{
"zip": 6712,
"name": "CORTINES",
"state": "BUENOS AIRES",
"lat": -34.5667,
"lon": -59.2167,
},
{
"zip": 6720,
"name": "SAN ANDRES DE GILES",
"state": "BUENOS AIRES",
"lat": -34.4497,
"lon": -59.4433,
},
{
"zip": 6721,
"name": "TATAY",
"state": "BUENOS AIRES",
"lat": -34.3667,
"lon": -59.9333,
},
{
"zip": 6723,
"name": "KILOMETRO 108",
"state": "BUENOS AIRES",
"lat": -34.4333,
"lon": -59.5083,
},
{
"zip": 6725,
"name": "SAN ERNESTO",
"state": "BUENOS AIRES",
"lat": -34.3858,
"lon": -59.8294,
},
{
"zip": 6727,
"name": "GOUIN",
"state": "BUENOS AIRES",
"lat": -34.4667,
"lon": -59.8,
},
{
"zip": 6734,
"name": "SAN PATRICIO",
"state": "BUENOS AIRES",
"lat": -34.6,
"lon": -60.2333,
},
{
"zip": 6740,
"name": "GREGORIO VILLAFA�E",
"state": "BUENOS AIRES",
"lat": -34.7167,
"lon": -60.6,
},
{
"zip": 6743,
"name": "INGENIERO SILVEYRA",
"state": "BUENOS AIRES",
"lat": -34.5333,
"lon": -60.2,
},
{
"zip": 6746,
"name": "CUCHA CUCHA",
"state": "BUENOS AIRES",
"lat": -34.6167,
"lon": -60.4167,
},
{
"zip": 6748,
"name": "O'HIGGINS",
"state": "BUENOS AIRES",
"lat": -34.5833,
"lon": -60.7,
},
{
"zip": 7000,
"name": "TANDIL ESTAFETA N�4",
"state": "BUENOS AIRES",
"lat": -37.3167,
"lon": -59.15,
},
{
"zip": 7001,
"name": "LA PASTORA (PDO. TANDIL)",
"state": "BUENOS AIRES",
"lat": -37.5821,
"lon": -59.0885,
},
{
"zip": 7003,
"name": "MARIA IGNACIA",
"state": "BUENOS AIRES",
"lat": -37.4,
"lon": -59.5,
},
{
"zip": 7005,
"name": "KILOMETRO 404",
"state": "BUENOS AIRES",
"lat": -37.7,
"lon": -59.325,
},
{
"zip": 7007,
"name": "LA PALMA",
"state": "BUENOS AIRES",
"lat": -37.7033,
"lon": -58.7867,
},
{
"zip": 7009,
"name": "IRAOLA",
"state": "BUENOS AIRES",
"lat": -37.25,
"lon": -58.9167,
},
{
"zip": 7011,
"name": "JUAN N. FERNANDEZ",
"state": "BUENOS AIRES",
"lat": -38,
"lon": -59.2667,
},
{
"zip": 7013,
"name": "EGA�A",
"state": "BUENOS AIRES",
"lat": -36.9833,
"lon": -59.1,
},
{
"zip": 7020,
"name": "BENITO JUAREZ",
"state": "BUENOS AIRES",
"lat": -37.6667,
"lon": -59.8,
},
{
"zip": 7021,
"name": "TEDIN URIBURU",
"state": "BUENOS AIRES",
"lat": -37.3667,
"lon": -59.7667,
},
{
"zip": 7100,
"name": "LOMA DE SALOMON",
"state": "BUENOS AIRES",
"lat": -36.3917,
"lon": -57.7167,
},
{
"zip": 7101,
"name": "CERRO DE LA GLORIA",
"state": "BUENOS AIRES",
"lat": -35.9667,
"lon": -57.45,
},
{
"zip": 7103,
"name": "FARO SAN ANTONIO",
"state": "BUENOS AIRES",
"lat": -36.4,
"lon": -56.9667,
},
{
"zip": 7105,
"name": "AUTOCAMPING SAN CLEMENTE",
"state": "BUENOS AIRES",
"lat": -36.3667,
"lon": -56.7167,
},
{
"zip": 7106,
"name": "LAS TONINAS",
"state": "BUENOS AIRES",
"lat": -36.4833,
"lon": -56.7,
},
{
"zip": 7107,
"name": "SANTA TERESITA ESTAFETA N�2",
"state": "BUENOS AIRES",
"lat": -36.5333,
"lon": -56.6833,
},
{
"zip": 7108,
"name": "MAR DEL TUYU",
"state": "BUENOS AIRES",
"lat": -36.55,
"lon": -56.6833,
},
{
"zip": 7109,
"name": "BARRIO VILLA CLELIA",
"state": "BUENOS AIRES",
"lat": -36.7167,
"lon": -56.6667,
},
{
"zip": 7111,
"name": "PLAYA SAN BERNARDO (MUNICIPIO URBANO DE LA COSTA)",
"state": "BUENOS AIRES",
"lat": -36.2611,
"lon": -57.6764,
},
{
"zip": 7112,
"name": "LA POSTA",
"state": "BUENOS AIRES",
"lat": -36.2611,
"lon": -57.6764,
},
{
"zip": 7113,
"name": "LA LUCILA DEL MAR",
"state": "BUENOS AIRES",
"lat": -36.6667,
"lon": -56.6667,
},
{
"zip": 7114,
"name": "CASTELLI",
"state": "BUENOS AIRES",
"lat": -36.1,
"lon": -57.7833,
},
{
"zip": 7116,
"name": "MANUEL J.COBO",
"state": "BUENOS AIRES",
"lat": -35.8667,
"lon": -57.9,
},
{
"zip": 7118,
"name": "GENERAL GUIDO",
"state": "BUENOS AIRES",
"lat": -36.6667,
"lon": -57.7667,
},
{
"zip": 7119,
"name": "CARI LARQUEA",
"state": "BUENOS AIRES",
"lat": -36.8056,
"lon": -57.4611,
},
{
"zip": 7122,
"name": "NAPOSTA",
"state": "BUENOS AIRES",
"lat": -36.5697,
"lon": -57.5907,
},
{
"zip": 7130,
"name": "LA LIMPIA (CHASCOMUS, PDO. CHASCOMUS)",
"state": "BUENOS AIRES",
"lat": -35.5567,
"lon": -57.84,
},
{
"zip": 7134,
"name": "NICOLAS LEVALLE",
"state": "BUENOS AIRES",
"lat": -35.5567,
"lon": -57.84,
},
{
"zip": 7135,
"name": "KILOMETRO 158 (APEADERO FCGR.)",
"state": "BUENOS AIRES",
"lat": -35.5556,
"lon": -57.7111,
},
{
"zip": 7136,
"name": "HARAS SAN IGNACIO",
"state": "BUENOS AIRES",
"lat": -35.5583,
"lon": -58.0333,
},
{
"zip": 7142,
"name": "OMBUCTA",
"state": "BUENOS AIRES",
"lat": -36.5697,
"lon": -57.5907,
},
{
"zip": 7150,
"name": "AYACUCHO",
"state": "BUENOS AIRES",
"lat": -37.15,
"lon": -58.4833,
},
{
"zip": 7151,
"name": "SOLANET",
"state": "BUENOS AIRES",
"lat": -36.85,
"lon": -58.5167,
},
{
"zip": 7153,
"name": "CANGALLO",
"state": "BUENOS AIRES",
"lat": -37.2167,
"lon": -58.7,
},
{
"zip": 7160,
"name": "MAIPU",
"state": "BUENOS AIRES",
"lat": -36.8667,
"lon": -57.8667,
},
{
"zip": 7161,
"name": "LABARDEN",
"state": "BUENOS AIRES",
"lat": -36.95,
"lon": -58.1,
},
{
"zip": 7163,
"name": "SANTA TERESA (GRAL. MADARIAGA, PDO. GRAL. LAVALLE)",
"state": "BUENOS AIRES",
"lat": -36.975,
"lon": -57.1833,
},
{
"zip": 7164,
"name": "SAN MARTIN DE TOURS",
"state": "BUENOS AIRES",
"lat": -36.9806,
"lon": -57.2361,
},
{
"zip": 7165,
"name": "LIBRES DE SUD",
"state": "BUENOS AIRES",
"lat": -35.7167,
"lon": -57.7167,
},
{
"zip": 7167,
"name": "OSTENDE",
"state": "BUENOS AIRES",
"lat": -37.15,
"lon": -56.8833,
},
{
"zip": 7169,
"name": "MACEDO",
"state": "BUENOS AIRES",
"lat": -37.25,
"lon": -57.2,
},
{
"zip": 7172,
"name": "HOGAR MARIANO ORTIZ BASUALDO",
"state": "BUENOS AIRES",
"lat": -37.1833,
"lon": -57.7833,
},
{
"zip": 7174,
"name": "ESCUELA AGRICOLA RURAL N. EZEIZA",
"state": "BUENOS AIRES",
"lat": -37.45,
"lon": -57.7167,
},
{
"zip": 7200,
"name": "LAS FLORES",
"state": "BUENOS AIRES",
"lat": -36.05,
"lon": -59.1167,
},
{
"zip": 7201,
"name": "MARTIN COLMAN",
"state": "BUENOS AIRES",
"lat": -36.4167,
"lon": -59.15,
},
{
"zip": 7203,
"name": "EL CARMEN DE LANGUEYU",
"state": "BUENOS AIRES",
"lat": -36.7083,
"lon": -59.125,
},
{
"zip": 7205,
"name": "ROSAS",
"state": "BUENOS AIRES",
"lat": -35.9667,
"lon": -58.9333,
},
{
"zip": 7207,
"name": "LA PORTE�A (EL TRIGO, PDO. LAS FLORES)",
"state": "BUENOS AIRES",
"lat": -35.9167,
"lon": -59.4583,
},
{
"zip": 7208,
"name": "CORONEL BOERR",
"state": "BUENOS AIRES",
"lat": -35.95,
"lon": -59.0667,
},
{
"zip": 7212,
"name": "NARANJA",
"state": "BUENOS AIRES",
"lat": -36.15,
"lon": -59.2667,
},
{
"zip": 7214,
"name": "LAGUNA MEDINA",
"state": "BUENOS AIRES",
"lat": -36.3583,
"lon": -59.4833,
},
{
"zip": 7215,
"name": "EL SOCORRO",
"state": "BUENOS AIRES",
"lat": -36.2533,
"lon": -59.3733,
},
{
"zip": 7220,
"name": "GUARDIA DEL MONTE",
"state": "BUENOS AIRES",
"lat": -35.3333,
"lon": -58.7167,
},
{
"zip": 7221,
"name": "GOBERNADOR UDAONDO",
"state": "BUENOS AIRES",
"lat": -35.3,
"lon": -58.6,
},
{
"zip": 7223,
"name": "NEWTON",
"state": "BUENOS AIRES",
"lat": -35.9333,
"lon": -58.7833,
},
{
"zip": 7225,
"name": "LA VICTORIA (PDO. PILA)",
"state": "BUENOS AIRES",
"lat": -36.2417,
"lon": -58.5667,
},
{
"zip": 7226,
"name": "KILOMETRO 146 (APEADERO FCGR.)",
"state": "BUENOS AIRES",
"lat": -35.6833,
"lon": -58.95,
},
{
"zip": 7228,
"name": "ABBOTT",
"state": "BUENOS AIRES",
"lat": -35.2833,
"lon": -58.8,
},
{
"zip": 7240,
"name": "KILOMETRO 112 (APEADERO FCGR.)",
"state": "BUENOS AIRES",
"lat": -35.1853,
"lon": -59.0947,
},
{
"zip": 7241,
"name": "SALVADOR MARIA",
"state": "BUENOS AIRES",
"lat": -35.3,
"lon": -59.1667,
},
{
"zip": 7243,
"name": "ANTONIO CARBONI",
"state": "BUENOS AIRES",
"lat": -35.2,
"lon": -59.3333,
},
{
"zip": 7245,
"name": "ROQUE PEREZ",
"state": "BUENOS AIRES",
"lat": -35.4167,
"lon": -59.3333,
},
{
"zip": 7247,
"name": "BARRIENTOS",
"state": "BUENOS AIRES",
"lat": -35.4389,
"lon": -59.2556,
},
{
"zip": 7249,
"name": "EMPALME LOBOS",
"state": "BUENOS AIRES",
"lat": -35.15,
"lon": -59.0833,
},
{
"zip": 7260,
"name": "EMILIANO REYNOSO (ESTACION FCGB)",
"state": "BUENOS AIRES",
"lat": -35.6778,
"lon": -59.8691,
},
{
"zip": 7261,
"name": "SAN BENITO",
"state": "BUENOS AIRES",
"lat": -35.6667,
"lon": -59.9,
},
{
"zip": 7263,
"name": "EL PARCHE",
"state": "BUENOS AIRES",
"lat": -35.8667,
"lon": -60.25,
},
{
"zip": 7265,
"name": "DEL CARRIL",
"state": "BUENOS AIRES",
"lat": -35.5167,
"lon": -59.5,
},
{
"zip": 7267,
"name": "ALVAREZ DE TOLEDO",
"state": "BUENOS AIRES",
"lat": -35.6333,
"lon": -59.6167,
},
{
"zip": 7300,
"name": "LAS CORTADERAS (AZUL, PDO. AZUL)",
"state": "BUENOS AIRES",
"lat": -36.7833,
"lon": -59.85,
},
{
"zip": 7301,
"name": "ARROYO DE LOS HUESOS",
"state": "BUENOS AIRES",
"lat": -37.0333,
"lon": -59.5667,
},
{
"zip": 7303,
"name": "SANTA ROSA",
"state": "BUENOS AIRES",
"lat": -36.4,
"lon": -60.0222,
},
{
"zip": 7305,
"name": "BA�ADO DE LAS FLORES",
"state": "BUENOS AIRES",
"lat": -36.2667,
"lon": -59.7417,
},
{
"zip": 7307,
"name": "REQUENA",
"state": "BUENOS AIRES",
"lat": -36.5833,
"lon": -60.1667,
},
{
"zip": 7311,
"name": "MARTIN FIERRO (CHILLAR, PDO.AZUL)",
"state": "BUENOS AIRES",
"lat": -37.3,
"lon": -59.9833,
},
{
"zip": 7313,
"name": "DIECISEIS DE JULIO",
"state": "BUENOS AIRES",
"lat": -37.1833,
"lon": -60.1667,
},
{
"zip": 7316,
"name": "LAS NIEVES",
"state": "BUENOS AIRES",
"lat": -36.6556,
"lon": -59.7833,
},
{
"zip": 7318,
"name": "COLONIA RUSA",
"state": "BUENOS AIRES",
"lat": -36.8667,
"lon": -60.1389,
},
{
"zip": 7400,
"name": "LAS PIEDRITAS (OLAVARRIA, PDO.OLAVARRIA)",
"state": "BUENOS AIRES",
"lat": -36.9,
"lon": -60.2833,
},
{
"zip": 7401,
"name": "EMPALME QUERANDIES",
"state": "BUENOS AIRES",
"lat": -37.0722,
"lon": -60.3833,
},
{
"zip": 7403,
"name": "CERRO AGUILA",
"state": "BUENOS AIRES",
"lat": -36.9738,
"lon": -60.1786,
},
{
"zip": 7404,
"name": "FORTIN LAVALLE",
"state": "BUENOS AIRES",
"lat": -37.1208,
"lon": -60.8292,
},
{
"zip": 7406,
"name": "SANTA CLEMENTINA",
"state": "BUENOS AIRES",
"lat": -37.2889,
"lon": -61.2278,
},
{
"zip": 7407,
"name": "LIBANO",
"state": "BUENOS AIRES",
"lat": -37.5333,
"lon": -61.3,
},
{
"zip": 7408,
"name": "LA COLINA",
"state": "BUENOS AIRES",
"lat": -37.3333,
"lon": -61.5333,
},
{
"zip": 7412,
"name": "LOS PINOS (VOLUNTAD, PDO. LAPRIDA)",
"state": "BUENOS AIRES",
"lat": -37.6583,
"lon": -60.9333,
},
{
"zip": 7414,
"name": "SANTA ELENA (PDO. LAPRIDA)",
"state": "BUENOS AIRES",
"lat": -37.55,
"lon": -60.8167,
},
{
"zip": 7500,
"name": "LA HORQUETA (TRES ARROYOS, PDO. TRES ARROYOS)",
"state": "BUENOS AIRES",
"lat": -38.2,
"lon": -60.2833,
},
{
"zip": 7501,
"name": "INDIO RICO",
"state": "BUENOS AIRES",
"lat": -38.3167,
"lon": -60.8833,
},
{
"zip": 7503,
"name": "EL CRISTIANO",
"state": "BUENOS AIRES",
"lat": -38.65,
"lon": -59.7,
},
{
"zip": 7505,
"name": "ESTACION SAN FRANCISCO DE BELLOCQ",
"state": "BUENOS AIRES",
"lat": -38.7375,
"lon": -60.1417,
},
{
"zip": 7507,
"name": "MICAELA CASCALLARES",
"state": "BUENOS AIRES",
"lat": -38.4833,
"lon": -60.45,
},
{
"zip": 7509,
"name": "ORIENTE",
"state": "BUENOS AIRES",
"lat": -38.7333,
"lon": -60.6167,
},
{
"zip": 7511,
"name": "RETA",
"state": "BUENOS AIRES",
"lat": -38.8333,
"lon": -60.3667,
},
{
"zip": 7513,
"name": "ADOLFO GONZALEZ CHAVES",
"state": "BUENOS AIRES",
"lat": -38.2551,
"lon": -60.3718,
},
{
"zip": 7515,
"name": "CLAUDIO C. MOLINA",
"state": "BUENOS AIRES",
"lat": -38.1,
"lon": -60.35,
},
{
"zip": 7517,
"name": "LA SORTIJA",
"state": "BUENOS AIRES",
"lat": -38.1,
"lon": -60.6833,
},
{
"zip": 7519,
"name": "BARROW",
"state": "BUENOS AIRES",
"lat": -38.3,
"lon": -60.2333,
},
{
"zip": 7521,
"name": "LA GAVIOTA",
"state": "BUENOS AIRES",
"lat": -38.3417,
"lon": -59.7,
},
{
"zip": 7530,
"name": "TEJO (GALERA)",
"state": "BUENOS AIRES",
"lat": -38.0056,
"lon": -61.3917,
},
{
"zip": 7531,
"name": "LARTIGAU",
"state": "BUENOS AIRES",
"lat": -38.45,
"lon": -61.5667,
},
{
"zip": 7533,
"name": "QUI�IHUAL",
"state": "BUENOS AIRES",
"lat": -37.7833,
"lon": -61.6,
},
{
"zip": 7535,
"name": "PONTAUT",
"state": "BUENOS AIRES",
"lat": -37.7333,
"lon": -61.3333,
},
{
"zip": 7536,
"name": "LA RESERVA",
"state": "BUENOS AIRES",
"lat": -37.8333,
"lon": -61.2167,
},
{
"zip": 7540,
"name": "SAUCE CORTO",
"state": "BUENOS AIRES",
"lat": -37.525,
"lon": -61.8667,
},
{
"zip": 7541,
"name": "PUEBLO SAN JOSE",
"state": "BUENOS AIRES",
"lat": -37.5167,
"lon": -61.9167,
},
{
"zip": 7543,
"name": "LA PRIMAVERA (PDO.CNEL.SUAREZ)",
"state": "BUENOS AIRES",
"lat": -37.3125,
"lon": -61.9681,
},
{
"zip": 7545,
"name": "HUANGUELEN",
"state": "BUENOS AIRES",
"lat": -37.0333,
"lon": -61.95,
},
{
"zip": 7547,
"name": "PASMAN",
"state": "BUENOS AIRES",
"lat": -37.2167,
"lon": -62.1333,
},
{
"zip": 7548,
"name": "PI�EYRO (PDO. CNEL. SUAREZ)",
"state": "BUENOS AIRES",
"lat": -37.4667,
"lon": -62.1,
},
{
"zip": 7600,
"name": "BARRIO DON BOSCO",
"state": "BUENOS AIRES",
"lat": -38,
"lon": -57.55,
},
{
"zip": 7601,
"name": "LA PEREGRINA",
"state": "BUENOS AIRES",
"lat": -37.9833,
"lon": -57.7833,
},
{
"zip": 7603,
"name": "LAS LOMAS",
"state": "BUENOS AIRES",
"lat": -38.1167,
"lon": -57.85,
},
{
"zip": 7605,
"name": "HARAS CHAPADMALAL",
"state": "BUENOS AIRES",
"lat": -38.0917,
"lon": -57.9667,
},
{
"zip": 7607,
"name": "PLA Y ROGNONI",
"state": "BUENOS AIRES",
"lat": -38.2889,
"lon": -57.8944,
},
{
"zip": 7609,
"name": "COLONIA DE VACACIONES CHAPADMALAL",
"state": "BUENOS AIRES",
"lat": -37.9,
"lon": -57.5056,
},
{
"zip": 7612,
"name": "EL REFUGIO",
"state": "BUENOS AIRES",
"lat": -37.8417,
"lon": -57.6167,
},
{
"zip": 7613,
"name": "CAMPAMENTO",
"state": "BUENOS AIRES",
"lat": -37.5333,
"lon": -57.4833,
},
{
"zip": 7620,
"name": "EL VOLANTE",
"state": "BUENOS AIRES",
"lat": -37.7625,
"lon": -58.2542,
},
{
"zip": 7621,
"name": "SAN SIMON",
"state": "BUENOS AIRES",
"lat": -37.5333,
"lon": -58.3333,
},
{
"zip": 7623,
"name": "SAN AGUSTIN",
"state": "BUENOS AIRES",
"lat": -38.0167,
"lon": -58.35,
},
{
"zip": 7630,
"name": "NECOCHEA",
"state": "BUENOS AIRES",
"lat": -38.55,
"lon": -58.75,
},
{
"zip": 7631,
"name": "PUERTO QUEQUEN",
"state": "BUENOS AIRES",
"lat": -38.5667,
"lon": -58.7,
},
{
"zip": 7633,
"name": "PIERES",
"state": "BUENOS AIRES",
"lat": -38.4,
"lon": -58.6667,
},
{
"zip": 7635,
"name": "LOS CERROS",
"state": "BUENOS AIRES",
"lat": -38.1,
"lon": -58.8167,
},
{
"zip": 7637,
"name": "NICANOR OLIVERA",
"state": "BUENOS AIRES",
"lat": -38.25,
"lon": -59.1667,
},
{
"zip": 7639,
"name": "COOPER",
"state": "BUENOS AIRES",
"lat": -38.2167,
"lon": -59.3167,
},
{
"zip": 7641,
"name": "RAMON SANTAMARINA",
"state": "BUENOS AIRES",
"lat": -38.4333,
"lon": -59.3333,
},
{
"zip": 8000,
"name": "BARRIO PARQUE PATAGONIA",
"state": "BUENOS AIRES",
"lat": -38.7042,
"lon": -62.2458,
},
{
"zip": 8010,
"name": "KILOMETRO 11 (APEADERO FCGR.)",
"state": "BUENOS AIRES",
"lat": -38.7042,
"lon": -62.2458,
},
{
"zip": 8101,
"name": "GRUNBEIN",
"state": "BUENOS AIRES",
"lat": -38.75,
"lon": -62.1833,
},
{
"zip": 8103,
"name": "INGENIERO WHITE",
"state": "BUENOS AIRES",
"lat": -38.7833,
"lon": -62.2667,
},
{
"zip": 8105,
"name": "CUATREROS",
"state": "BUENOS AIRES",
"lat": -38.7,
"lon": -62.4,
},
{
"zip": 8107,
"name": "BASE AERONAVAL COMANDANTE ESPORA",
"state": "BUENOS AIRES",
"lat": -38.7819,
"lon": -62.1875,
},
{
"zip": 8109,
"name": "ALMIRANTE SOLIER",
"state": "BUENOS AIRES",
"lat": -38.8833,
"lon": -62.0667,
},
{
"zip": 8111,
"name": "ARROYO PAREJA",
"state": "BUENOS AIRES",
"lat": -38.9333,
"lon": -62.0667,
},
{
"zip": 8113,
"name": "BATERIAS",
"state": "BUENOS AIRES",
"lat": -38.5412,
"lon": -62.1373,
},
{
"zip": 8115,
"name": "BAJO HONDO",
"state": "BUENOS AIRES",
"lat": -38.7667,
"lon": -61.9,
},
{
"zip": 8117,
"name": "EL CORTA PIE",
"state": "BUENOS AIRES",
"lat": -38.3806,
"lon": -62.6611,
},
{
"zip": 8118,
"name": "COCHRANE",
"state": "BUENOS AIRES",
"lat": -38.5667,
"lon": -61.9833,
},
{
"zip": 8122,
"name": "LA VITICOLA",
"state": "BUENOS AIRES",
"lat": -38.5167,
"lon": -62.3,
},
{
"zip": 8124,
"name": "GENERAL RONDEAU",
"state": "BUENOS AIRES",
"lat": -38.2333,
"lon": -63.1333,
},
{
"zip": 8126,
"name": "ALDEA SAN ANDRES",
"state": "BUENOS AIRES",
"lat": -38.2083,
"lon": -62.875,
},
{
"zip": 8127,
"name": "ESTELA",
"state": "BUENOS AIRES",
"lat": -38.1167,
"lon": -62.9167,
},
{
"zip": 8129,
"name": "GLORIALDO",
"state": "BUENOS AIRES",
"lat": -37.9667,
"lon": -62.8833,
},
{
"zip": 8132,
"name": "MEDANOS",
"state": "BUENOS AIRES",
"lat": -38.8167,
"lon": -62.6833,
},
{
"zip": 8133,
"name": "AVESTRUZ",
"state": "BUENOS AIRES",
"lat": -37.6667,
"lon": -63.35,
},
{
"zip": 8134,
"name": "CABEZA DE BUEY",
"state": "BUENOS AIRES",
"lat": -38.8444,
"lon": -63.1222,
},
{
"zip": 8136,
"name": "LA EVA",
"state": "BUENOS AIRES",
"lat": -38.9167,
"lon": -63.35,
},
{
"zip": 8138,
"name": "LA ADELA",
"state": "BUENOS AIRES",
"lat": -38.9542,
"lon": -64.175,
},
{
"zip": 8142,
"name": "JUAN A. PRADERE",
"state": "BUENOS AIRES",
"lat": -39.5833,
"lon": -62.65,
},
{
"zip": 8144,
"name": "MONTE LA PLATA",
"state": "BUENOS AIRES",
"lat": -39.0333,
"lon": -62.5833,
},
{
"zip": 8146,
"name": "EL RINCON (MAYOR BURATOVICH, PDO. VILLARINO)",
"state": "BUENOS AIRES",
"lat": -39.25,
"lon": -62.6167,
},
{
"zip": 8148,
"name": "PEDRO LURO",
"state": "BUENOS AIRES",
"lat": -39.4833,
"lon": -62.6833,
},
{
"zip": 8150,
"name": "EL ZORRO",
"state": "BUENOS AIRES",
"lat": -38.55,
"lon": -61.0833,
},
{
"zip": 8151,
"name": "GIL",
"state": "BUENOS AIRES",
"lat": -38.7833,
"lon": -60.9,
},
{
"zip": 8153,
"name": "MONTE HERMOSO",
"state": "BUENOS AIRES",
"lat": -38.65,
"lon": -61.3,
},
{
"zip": 8154,
"name": "LA SOBERANA",
"state": "BUENOS AIRES",
"lat": -38.75,
"lon": -61.4667,
},
{
"zip": 8156,
"name": "JOSE A. GUISASOLA",
"state": "BUENOS AIRES",
"lat": -38.6667,
"lon": -61.0833,
},
{
"zip": 8158,
"name": "PARAJE LA AURORA",
"state": "BUENOS AIRES",
"lat": -38.6167,
"lon": -60.8667,
},
{
"zip": 8160,
"name": "FUERTE ARGENTINO",
"state": "BUENOS AIRES",
"lat": -38.0944,
"lon": -62.1278,
},
{
"zip": 8162,
"name": "GARCIA DEL RIO",
"state": "BUENOS AIRES",
"lat": -38.35,
"lon": -62.2,
},
{
"zip": 8164,
"name": "COLONIA SAN PEDRO",
"state": "BUENOS AIRES",
"lat": -37.9333,
"lon": -62.35,
},
{
"zip": 8166,
"name": "SALDUNGARAY",
"state": "BUENOS AIRES",
"lat": -38.2,
"lon": -61.7833,
},
{
"zip": 8168,
"name": "SIERRA DE LA VENTANA",
"state": "BUENOS AIRES",
"lat": -38.15,
"lon": -61.8,
},
{
"zip": 8170,
"name": "ABRA DE HINOJO",
"state": "BUENOS AIRES",
"lat": -37.5417,
"lon": -62.4167,
},
{
"zip": 8171,
"name": "ESPARTILLAR (PDO. SAAVEDRA)",
"state": "BUENOS AIRES",
"lat": -37.6458,
"lon": -62.4583,
},
{
"zip": 8172,
"name": "ARROYO CORTO (PDO. SAAVEDRA)",
"state": "BUENOS AIRES",
"lat": -37.6458,
"lon": -62.4583,
},
{
"zip": 8174,
"name": "LA SAUDADE",
"state": "BUENOS AIRES",
"lat": -37.75,
"lon": -62.3667,
},
{
"zip": 8175,
"name": "GOYENA",
"state": "BUENOS AIRES",
"lat": -37.75,
"lon": -62.6333,
},
{
"zip": 8180,
"name": "COLONIA DOCTOR GOBERNADOR UDAONDO",
"state": "BUENOS AIRES",
"lat": -37.55,
"lon": -62.7167,
},
{
"zip": 8181,
"name": "LAS VASCONGADAS",
"state": "BUENOS AIRES",
"lat": -37.6056,
"lon": -62.8111,
},
{
"zip": 8183,
"name": "CA�ADA MARIANO (EMBARCADERO FCDFS)",
"state": "BUENOS AIRES",
"lat": -37.7,
"lon": -63.1667,
},
{
"zip": 8185,
"name": "LA FLORIDA (ESTEBAN A. GASCON, PDO. ADOLFO ALSINA)",
"state": "BUENOS AIRES",
"lat": -37.4583,
"lon": -63.2208,
},
{
"zip": 8187,
"name": "BORDENAVE",
"state": "BUENOS AIRES",
"lat": -37.8,
"lon": -63.05,
},
{
"zip": 8504,
"name": "EL BAGUAL",
"state": "BUENOS AIRES",
"lat": -40.8,
"lon": -62.9833,
},
{
"zip": 8506,
"name": "CARDENAL CAGLIERO",
"state": "BUENOS AIRES",
"lat": -40.65,
"lon": -62.75,
},
{
"zip": 8508,
"name": "JARILLA",
"state": "BUENOS AIRES",
"lat": -40.6417,
"lon": -63.2898,
},
{
"zip": 8512,
"name": "IGARZABAL",
"state": "BUENOS AIRES",
"lat": -39.7667,
"lon": -62.6167,
},
{
"zip": 4703,
"name": "ALTO",
"state": "SAN LUIS",
"lat": -28.6275,
"lon": -65.6261,
},
{
"zip": 5216,
"name": "LA AURORA (UNION - DPTO. GOBERNADOR V. DUPUY)",
"state": "SAN LUIS",
"lat": -29.8583,
"lon": -64.6667,
},
{
"zip": 5421,
"name": "LA TRANCA",
"state": "SAN LUIS",
"lat": -32.35,
"lon": -67.2833,
},
{
"zip": 5598,
"name": "DESAGUADERO",
"state": "SAN LUIS",
"lat": -33.4167,
"lon": -67.1833,
},
{
"zip": 5700,
"name": "SANTA TERESA (SAN LUIS, DPTO. LA CAPITAL)",
"state": "SAN LUIS",
"lat": -33.3,
"lon": -66.35,
},
{
"zip": 5701,
"name": "CERCADITO",
"state": "SAN LUIS",
"lat": -33.0352,
"lon": -66.0926,
},
{
"zip": 5703,
"name": "HIPOLITO YRIGOYEN",
"state": "SAN LUIS",
"lat": -32.9167,
"lon": -66.3333,
},
{
"zip": 5705,
"name": "CAMPANARIO",
"state": "SAN LUIS",
"lat": -32.5889,
"lon": -66.1778,
},
{
"zip": 5706,
"name": "LA BAVA",
"state": "SAN LUIS",
"lat": -32.8049,
"lon": -66.2152,
},
{
"zip": 5707,
"name": "ALGARROBAL (BALDE DE PUERTAS, DPTO. AYACUCHO)",
"state": "SAN LUIS",
"lat": -32.4187,
"lon": -66.4833,
},
{
"zip": 5709,
"name": "SANTA TERESITA",
"state": "SAN LUIS",
"lat": -32.5333,
"lon": -65.9333,
},
{
"zip": 5711,
"name": "REPRESA DEL MONTE",
"state": "SAN LUIS",
"lat": -32.1667,
"lon": -65.875,
},
{
"zip": 5712,
"name": "BALDE DE LA LINEA",
"state": "SAN LUIS",
"lat": -32.2351,
"lon": -66.0825,
},
{
"zip": 5713,
"name": "LAS CHIMBAS",
"state": "SAN LUIS",
"lat": -32.1333,
"lon": -66.55,
},
{
"zip": 5714,
"name": "SANTA RITA",
"state": "SAN LUIS",
"lat": -32.2351,
"lon": -66.0825,
},
{
"zip": 5715,
"name": "SANTA LUCIA (LAS PALOMAS, DPTO.JUNIN)",
"state": "SAN LUIS",
"lat": -32.325,
"lon": -65.7042,
},
{
"zip": 5717,
"name": "EL PIMPOLLO",
"state": "SAN LUIS",
"lat": -31.8833,
"lon": -65.9333,
},
{
"zip": 5719,
"name": "ALGARROBOS GRANDES",
"state": "SAN LUIS",
"lat": -32.9333,
"lon": -66.5667,
},
{
"zip": 5721,
"name": "PUNTA DEL CERRO",
"state": "SAN LUIS",
"lat": -33.8708,
"lon": -66.4875,
},
{
"zip": 5722,
"name": "EL RIECITO",
"state": "SAN LUIS",
"lat": -33.3833,
"lon": -66.0167,
},
{
"zip": 5724,
"name": "BALDE (DPTO. LA CAPITAL)",
"state": "SAN LUIS",
"lat": -33.4111,
"lon": -66.9278,
},
{
"zip": 5730,
"name": "EL FORTIN",
"state": "SAN LUIS",
"lat": -34.7667,
"lon": -65.5167,
},
{
"zip": 5731,
"name": "EL MORRO",
"state": "SAN LUIS",
"lat": -33.2167,
"lon": -65.4833,
},
{
"zip": 5733,
"name": "VILLA REYNOLDS",
"state": "SAN LUIS",
"lat": -33.7167,
"lon": -65.3833,
},
{
"zip": 5735,
"name": "ISONDU",
"state": "SAN LUIS",
"lat": -33.2667,
"lon": -65.6167,
},
{
"zip": 5736,
"name": "SAN IGNACIO",
"state": "SAN LUIS",
"lat": -32.0833,
"lon": -66.1,
},
{
"zip": 5738,
"name": "EL CARMEN (JUSTO DARACT, DPTO. GRAL. PEDERNERA)",
"state": "SAN LUIS",
"lat": -33.875,
"lon": -65.1,
},
{
"zip": 5741,
"name": "BAJOS HONDOS",
"state": "SAN LUIS",
"lat": -33.15,
"lon": -66.35,
},
{
"zip": 5743,
"name": "NUEVA ESCOCIA",
"state": "SAN LUIS",
"lat": -33.7833,
"lon": -65.7333,
},
{
"zip": 5750,
"name": "EL PORTEZUELO",
"state": "SAN LUIS",
"lat": -33.1167,
"lon": -66.8333,
},
{
"zip": 5751,
"name": "AGUA SALADA",
"state": "SAN LUIS",
"lat": -33.2,
"lon": -65.85,
},
{
"zip": 5753,
"name": "CERRO COLORADO",
"state": "SAN LUIS",
"lat": -32.6625,
"lon": -65.7167,
},
{
"zip": 5755,
"name": "EL PEJE",
"state": "SAN LUIS",
"lat": -32.6111,
"lon": -65.9667,
},
{
"zip": 5757,
"name": "SAN LORENZO (EL ARENAL, DPTO. PRINGLES)",
"state": "SAN LUIS",
"lat": -32.7807,
"lon": -65.8,
},
{
"zip": 5759,
"name": "HUCHISSON",
"state": "SAN LUIS",
"lat": -32.7833,
"lon": -65.475,
},
{
"zip": 5763,
"name": "GENERAL URQUIZA",
"state": "SAN LUIS",
"lat": -32.9074,
"lon": -65.9665,
},
{
"zip": 5770,
"name": "EL SAUCE (CONCARAN, DPTO. CHACABUCO)",
"state": "SAN LUIS",
"lat": -32.5667,
"lon": -65.25,
},
{
"zip": 5771,
"name": "LAS AGUADAS",
"state": "SAN LUIS",
"lat": -32.3667,
"lon": -65.5,
},
{"zip": 5773, "name": "OLMO", "state": "SAN LUIS", "lat": -32.575, "lon": -65.3333},
{
"zip": 5775,
"name": "RENCA",
"state": "SAN LUIS",
"lat": -32.7667,
"lon": -65.3667,
},
{
"zip": 5777,
"name": "EL DURAZNITO",
"state": "SAN LUIS",
"lat": -32.3633,
"lon": -65.21,
},
{
"zip": 5779,
"name": "LA CHILCA (DPTO. JUNIN)",
"state": "SAN LUIS",
"lat": -32.4692,
"lon": -65.4233,
},
{
"zip": 5789,
"name": "LOS MOLLECITOS",
"state": "SAN LUIS",
"lat": -32.9074,
"lon": -65.9665,
},
{
"zip": 5831,
"name": "LA CA�ADA (SAN NICOLAS PUNILLA, DPTO. GRAL. PEDERNERA)",
"state": "SAN LUIS",
"lat": -33.3833,
"lon": -64.4917,
},
{
"zip": 5835,
"name": "LA AURORA (VILLA DEL CARMEN, DPTO. CHACABUCO)",
"state": "SAN LUIS",
"lat": -32.95,
"lon": -65.05,
},
{
"zip": 5871,
"name": "LA FINCA",
"state": "SAN LUIS",
"lat": -31.7783,
"lon": -65.3867,
},
{
"zip": 5873,
"name": "ISLA (PUNTA DE AGUA, DPTO. JUNIN)",
"state": "SAN LUIS",
"lat": -32.1458,
"lon": -65.2292,
},
{
"zip": 5881,
"name": "CERRO DE ORO",
"state": "SAN LUIS",
"lat": -32.3833,
"lon": -65,
},
{
"zip": 5883,
"name": "CHA�ARITOS",
"state": "SAN LUIS",
"lat": -32.5792,
"lon": -65.075,
},
{
"zip": 5888,
"name": "CA�ADA LA NEGRA",
"state": "SAN LUIS",
"lat": -32.1694,
"lon": -65.0278,
},
{
"zip": 6121,
"name": "LAS LAGUNAS (NUEVA GALIA, DPTO. GOBERNADOR V. DUPUY)",
"state": "SAN LUIS",
"lat": -33.7,
"lon": -63.55,
},
{
"zip": 6179,
"name": "SAN ISIDRO (BATAVIA, DPTO. GOBERNADOR V. DUPUY)",
"state": "SAN LUIS",
"lat": -34.1146,
"lon": -63.5348,
},
{
"zip": 6214,
"name": "CASIMIRO GOMEZ",
"state": "SAN LUIS",
"lat": -35.0833,
"lon": -65.1167,
},
{
"zip": 6215,
"name": "POLLADO",
"state": "SAN LUIS",
"lat": -34.947,
"lon": -65.2879,
},
{
"zip": 6216,
"name": "LAS CORTADERAS",
"state": "SAN LUIS",
"lat": -32.5,
"lon": -65,
},
{
"zip": 6269,
"name": "LA COLINA",
"state": "SAN LUIS",
"lat": -34.9224,
"lon": -64.4976,
},
{
"zip": 6277,
"name": "BUENA ESPERANZA",
"state": "SAN LUIS",
"lat": -34.75,
"lon": -65.25,
},
{
"zip": 6279,
"name": "EL AGUILA",
"state": "SAN LUIS",
"lat": -35.0667,
"lon": -66.5,
},
{
"zip": 6389,
"name": "ANCHORENA",
"state": "SAN LUIS",
"lat": -35.6833,
"lon": -65.45,
},
{
"zip": 2000,
"name": "VILLA ANGELICA",
"state": "ENTRE RIOS",
"lat": -32.9333,
"lon": -60.5833,
},
{
"zip": 2100,
"name": "CHARIGUE",
"state": "ENTRE RIOS",
"lat": -33.2677,
"lon": -60.6792,
},
{
"zip": 2381,
"name": "COLONIA N�6",
"state": "ENTRE RIOS",
"lat": -30.4782,
"lon": -61.8647,
},
{
"zip": 2820,
"name": "BARRIO VILLA MARIA",
"state": "ENTRE RIOS",
"lat": -33.0201,
"lon": -58.6436,
},
{
"zip": 2821,
"name": "ARROYO DEL CURA",
"state": "ENTRE RIOS",
"lat": -32.8967,
"lon": -58.7606,
},
{
"zip": 2823,
"name": "CEIBAS",
"state": "ENTRE RIOS",
"lat": -33.4333,
"lon": -58.75,
},
{
"zip": 2824,
"name": "FAUSTINO M. PARERA",
"state": "ENTRE RIOS",
"lat": -32.8,
"lon": -58.8833,
},
{
"zip": 2826,
"name": "ALDEA SAN ANTONIO",
"state": "ENTRE RIOS",
"lat": -32.6333,
"lon": -58.7,
},
{
"zip": 2828,
"name": "ESCRI�A",
"state": "ENTRE RIOS",
"lat": -32.5833,
"lon": -58.9,
},
{
"zip": 2840,
"name": "GUALEGUAY",
"state": "ENTRE RIOS",
"lat": -33.1444,
"lon": -59.3292,
},
{
"zip": 2841,
"name": "LAZO",
"state": "ENTRE RIOS",
"lat": -32.8667,
"lon": -59.4333,
},
{
"zip": 2843,
"name": "GALARZA",
"state": "ENTRE RIOS",
"lat": -32.7167,
"lon": -59.4,
},
{
"zip": 2845,
"name": "GOBERNADOR MANSILLA",
"state": "ENTRE RIOS",
"lat": -32.55,
"lon": -59.3667,
},
{
"zip": 2846,
"name": "PARANACITO",
"state": "ENTRE RIOS",
"lat": -33.6833,
"lon": -59.0167,
},
{
"zip": 2848,
"name": "MEDANOS",
"state": "ENTRE RIOS",
"lat": -33.4,
"lon": -59.0833,
},
{
"zip": 2852,
"name": "IRAZUSTA",
"state": "ENTRE RIOS",
"lat": -32.9333,
"lon": -58.95,
},
{
"zip": 2854,
"name": "DOS HERMANAS",
"state": "ENTRE RIOS",
"lat": -33.1667,
"lon": -58.9,
},
{
"zip": 2864,
"name": "KILOMETRO 340 (PARADA FCGU)",
"state": "ENTRE RIOS",
"lat": -33.2943,
"lon": -59.025,
},
{
"zip": 3100,
"name": "BARRIO ESCUELA HOGAR",
"state": "ENTRE RIOS",
"lat": -31.7556,
"lon": -60.5444,
},
{
"zip": 3101,
"name": "COSTA GRANDE DOLL",
"state": "ENTRE RIOS",
"lat": -32.1,
"lon": -60.4795,
},
{
"zip": 3103,
"name": "VILLA LIBERTADOR GENERAL SAN MARTIN",
"state": "ENTRE RIOS",
"lat": -32.05,
"lon": -60.45,
},
{
"zip": 3105,
"name": "DIAMANTE",
"state": "ENTRE RIOS",
"lat": -32.0644,
"lon": -60.6425,
},
{
"zip": 3107,
"name": "PRESIDENTE AVELLANEDA",
"state": "ENTRE RIOS",
"lat": -31.8083,
"lon": -60.4,
},
{
"zip": 3109,
"name": "VIALE",
"state": "ENTRE RIOS",
"lat": -31.8833,
"lon": -60.0167,
},
{
"zip": 3111,
"name": "TABOSSI",
"state": "ENTRE RIOS",
"lat": -31.8,
"lon": -59.95,
},
{
"zip": 3112,
"name": "COLONIA LOMA NEGRA",
"state": "ENTRE RIOS",
"lat": -31.8474,
"lon": -60.1912,
},
{
"zip": 3113,
"name": "CURTIEMBRE",
"state": "ENTRE RIOS",
"lat": -31.4667,
"lon": -60.1667,
},
{
"zip": 3114,
"name": "LAS DELICIAS",
"state": "ENTRE RIOS",
"lat": -31.9333,
"lon": -60.4167,
},
{
"zip": 3116,
"name": "ALDEA SAN RAFAEL",
"state": "ENTRE RIOS",
"lat": -31.95,
"lon": -60.25,
},
{
"zip": 3117,
"name": "SEGUI",
"state": "ENTRE RIOS",
"lat": -31.95,
"lon": -60.1333,
},
{
"zip": 3118,
"name": "PASO DE LA ARENA",
"state": "ENTRE RIOS",
"lat": -31.75,
"lon": -60.1667,
},
{
"zip": 3122,
"name": "MORENO",
"state": "ENTRE RIOS",
"lat": -31.5833,
"lon": -60.0667,
},
{
"zip": 3123,
"name": "COLONIA RIVADAVIA",
"state": "ENTRE RIOS",
"lat": -31.8667,
"lon": -59.9167,
},
{
"zip": 3125,
"name": "GENERAL GUEMES",
"state": "ENTRE RIOS",
"lat": -31.4,
"lon": -59.95,
},
{
"zip": 3126,
"name": "PUEBLO BRUGO",
"state": "ENTRE RIOS",
"lat": -31.3833,
"lon": -60.1,
},
{
"zip": 3127,
"name": "VILLA HERNANDARIAS",
"state": "ENTRE RIOS",
"lat": -31.2167,
"lon": -59.9833,
},
{
"zip": 3128,
"name": "COLONIA BERRO",
"state": "ENTRE RIOS",
"lat": -31.4449,
"lon": -59.9641,
},
{
"zip": 3129,
"name": "PIEDRAS BLANCAS",
"state": "ENTRE RIOS",
"lat": -31.1833,
"lon": -59.9333,
},
{
"zip": 3132,
"name": "EL PINGO",
"state": "ENTRE RIOS",
"lat": -31.5833,
"lon": -59.8833,
},
{
"zip": 3133,
"name": "SOSA",
"state": "ENTRE RIOS",
"lat": -31.7333,
"lon": -59.9167,
},
{
"zip": 3134,
"name": "HASENKAMP",
"state": "ENTRE RIOS",
"lat": -31.5167,
"lon": -59.85,
},
{
"zip": 3136,
"name": "TALITAS (APEADERO FCGU)",
"state": "ENTRE RIOS",
"lat": -31.4167,
"lon": -59.75,
},
{
"zip": 3137,
"name": "EL SOLAR",
"state": "ENTRE RIOS",
"lat": -31.5611,
"lon": -59.8167,
},
{
"zip": 3138,
"name": "ESTANCIA LA GAMA",
"state": "ENTRE RIOS",
"lat": -31.4667,
"lon": -59.6,
},
{
"zip": 3142,
"name": "BOVRIL",
"state": "ENTRE RIOS",
"lat": -31.35,
"lon": -59.4333,
},
{
"zip": 3144,
"name": "SAUCE DE LUNA",
"state": "ENTRE RIOS",
"lat": -31.2333,
"lon": -59.2167,
},
{
"zip": 3150,
"name": "NOGOYA",
"state": "ENTRE RIOS",
"lat": -32.3997,
"lon": -59.7994,
},
{
"zip": 3151,
"name": "ANTELO",
"state": "ENTRE RIOS",
"lat": -32.5333,
"lon": -60.05,
},
{
"zip": 3153,
"name": "PUENTE VICTORIA",
"state": "ENTRE RIOS",
"lat": -32.6184,
"lon": -60.1548,
},
{
"zip": 3155,
"name": "SEXTO DISTRITO",
"state": "ENTRE RIOS",
"lat": -32.7417,
"lon": -59.8417,
},
{
"zip": 3156,
"name": "TRES ESQUINAS",
"state": "ENTRE RIOS",
"lat": -32.25,
"lon": -59.95,
},
{
"zip": 3158,
"name": "LA COLINA",
"state": "ENTRE RIOS",
"lat": -32.4389,
"lon": -59.6,
},
{
"zip": 3159,
"name": "LOS PARAISOS (20 DE SETIEMBRE, DPTO. NOGOYA)",
"state": "ENTRE RIOS",
"lat": -32.4591,
"lon": -59.8567,
},
{
"zip": 3162,
"name": "CHILCAS SUD",
"state": "ENTRE RIOS",
"lat": -32.25,
"lon": -60.1667,
},
{
"zip": 3164,
"name": "RIVAS",
"state": "ENTRE RIOS",
"lat": -32.1375,
"lon": -60.1583,
},
{
"zip": 3170,
"name": "BASAVILBASO",
"state": "ENTRE RIOS",
"lat": -32.3667,
"lon": -58.8833,
},
{
"zip": 3172,
"name": "KILOMETRO 208 (APEADERO FCGU)",
"state": "ENTRE RIOS",
"lat": -32.3417,
"lon": -59.2083,
},
{
"zip": 3174,
"name": "RINCON DE LAS GUACHAS",
"state": "ENTRE RIOS",
"lat": -32.2827,
"lon": -59.1576,
},
{
"zip": 3176,
"name": "SOLA",
"state": "ENTRE RIOS",
"lat": -32.3333,
"lon": -59.3667,
},
{
"zip": 3177,
"name": "GUARDAMONTE",
"state": "ENTRE RIOS",
"lat": -32.0833,
"lon": -59.3,
},
{
"zip": 3180,
"name": "DIEGO LOPEZ",
"state": "ENTRE RIOS",
"lat": -30.95,
"lon": -58.8,
},
{"zip": 3181, "name": "CHA�AR", "state": "ENTRE RIOS", "lat": -31.2, "lon": -58.7},
{
"zip": 3183,
"name": "MI�ONES",
"state": "ENTRE RIOS",
"lat": -30.7,
"lon": -58.5833,
},
{
"zip": 3185,
"name": "JUAN B. ARRUABARRENA",
"state": "ENTRE RIOS",
"lat": -30.3333,
"lon": -58.3167,
},
{
"zip": 3187,
"name": "CHIRCALITO",
"state": "ENTRE RIOS",
"lat": -30.4389,
"lon": -58.6778,
},
{
"zip": 3188,
"name": "ALDEA SAN ISIDRO",
"state": "ENTRE RIOS",
"lat": -31.0222,
"lon": -59.0556,
},
{
"zip": 3190,
"name": "TACUARAS YACARE",
"state": "ENTRE RIOS",
"lat": -30.7167,
"lon": -59.4556,
},
{
"zip": 3191,
"name": "LAS MULITAS",
"state": "ENTRE RIOS",
"lat": -30.3667,
"lon": -58.9833,
},
{
"zip": 3192,
"name": "EL COLORADO",
"state": "ENTRE RIOS",
"lat": -30.4157,
"lon": -59.198,
},
{
"zip": 3194,
"name": "PASO TELEGRAFO",
"state": "ENTRE RIOS",
"lat": -30.4157,
"lon": -59.198,
},
{
"zip": 3200,
"name": "CONCORDIA",
"state": "ENTRE RIOS",
"lat": -31.4,
"lon": -58.0333,
},
{
"zip": 3201,
"name": "CAMBA PASO",
"state": "ENTRE RIOS",
"lat": -31.5,
"lon": -58.0278,
},
{
"zip": 3203,
"name": "BENITO LEGEREN",
"state": "ENTRE RIOS",
"lat": -31.5667,
"lon": -58.525,
},
{
"zip": 3204,
"name": "AYUI (PARADA EMBARCADERO FCGU)",
"state": "ENTRE RIOS",
"lat": -31.1833,
"lon": -57.9667,
},
{
"zip": 3206,
"name": "KILOMETRO 47 (APEADERO FCGU)",
"state": "ENTRE RIOS",
"lat": -31,
"lon": -57.9,
},
{
"zip": 3208,
"name": "SANTA ANA",
"state": "ENTRE RIOS",
"lat": -30.9,
"lon": -57.9333,
},
{
"zip": 3212,
"name": "EL REDOMON",
"state": "ENTRE RIOS",
"lat": -31.1,
"lon": -58.3,
},
{
"zip": 3214,
"name": "YUQUERI",
"state": "ENTRE RIOS",
"lat": -31.3833,
"lon": -58.1167,
},
{
"zip": 3216,
"name": "LAS MOCHAS",
"state": "ENTRE RIOS",
"lat": -31.5056,
"lon": -58.4111,
},
{
"zip": 3218,
"name": "COLONIA NUEVA ALEMANIA",
"state": "ENTRE RIOS",
"lat": -31.6,
"lon": -58.8167,
},
{
"zip": 3220,
"name": "KILOMETRO 161 (PARADA FCGU)",
"state": "ENTRE RIOS",
"lat": -30.25,
"lon": -57.65,
},
{
"zip": 3224,
"name": "LA BLANQUEADA",
"state": "ENTRE RIOS",
"lat": -30.05,
"lon": -57.8167,
},
{
"zip": 3228,
"name": "CHAJARI",
"state": "ENTRE RIOS",
"lat": -30.7667,
"lon": -57.9833,
},
{
"zip": 3229,
"name": "SAN RAMON",
"state": "ENTRE RIOS",
"lat": -30.8333,
"lon": -58.25,
},
{
"zip": 3240,
"name": "SEGUNDA SECCION LUCAS AL SUD",
"state": "ENTRE RIOS",
"lat": -31.85,
"lon": -59.0167,
},
{
"zip": 3241,
"name": "RINCON LUCAS NORTE",
"state": "ENTRE RIOS",
"lat": -31.5708,
"lon": -59.0208,
},
{
"zip": 3244,
"name": "LIBAROS",
"state": "ENTRE RIOS",
"lat": -32.2667,
"lon": -58.9167,
},
{
"zip": 3246,
"name": "COLONIA SAGASTUME",
"state": "ENTRE RIOS",
"lat": -32.05,
"lon": -58.75,
},
{
"zip": 3248,
"name": "CARAGUATA",
"state": "ENTRE RIOS",
"lat": -32.1667,
"lon": -58.8833,
},
{
"zip": 3252,
"name": "VILLA CLARA",
"state": "ENTRE RIOS",
"lat": -31.8333,
"lon": -58.8167,
},
{
"zip": 3254,
"name": "COLONIA SAN ERNESTO",
"state": "ENTRE RIOS",
"lat": -31.7333,
"lon": -58.6167,
},
{
"zip": 3260,
"name": "CONCEPCION DEL URUGUAY",
"state": "ENTRE RIOS",
"lat": -32.4833,
"lon": -58.2283,
},
{
"zip": 3261,
"name": "COLONIA ELIA",
"state": "ENTRE RIOS",
"lat": -32.6667,
"lon": -58.3167,
},
{
"zip": 3262,
"name": "CASEROS",
"state": "ENTRE RIOS",
"lat": -32.4667,
"lon": -58.4833,
},
{
"zip": 3263,
"name": "COLONIA CARMELO",
"state": "ENTRE RIOS",
"lat": -32.35,
"lon": -58.4333,
},
{
"zip": 3265,
"name": "VILLA ELISA",
"state": "ENTRE RIOS",
"lat": -32.1667,
"lon": -58.4,
},
{
"zip": 3267,
"name": "CA�ADA DE LAS OVEJAS",
"state": "ENTRE RIOS",
"lat": -32.1667,
"lon": -58.55,
},
{
"zip": 3269,
"name": "COLONIA BALLINA",
"state": "ENTRE RIOS",
"lat": -31.9389,
"lon": -58.4611,
},
{
"zip": 3272,
"name": "HERRERA",
"state": "ENTRE RIOS",
"lat": -32.4333,
"lon": -58.6333,
},
{
"zip": 3280,
"name": "COLON",
"state": "ENTRE RIOS",
"lat": -32.2167,
"lon": -58.1333,
},
{
"zip": 3281,
"name": "LIEBIG",
"state": "ENTRE RIOS",
"lat": -32.1333,
"lon": -58.2667,
},
{
"zip": 3283,
"name": "VILLA SAN JOSE",
"state": "ENTRE RIOS",
"lat": -32.2,
"lon": -58.2167,
},
{
"zip": 3285,
"name": "JUAN JORGE",
"state": "ENTRE RIOS",
"lat": -31.9389,
"lon": -58.3111,
},
{
"zip": 3287,
"name": "UBAJAY",
"state": "ENTRE RIOS",
"lat": -31.7833,
"lon": -58.3,
},
{
"zip": 3322,
"name": "PUERTO YEBEBIRI",
"state": "ENTRE RIOS",
"lat": -27.2667,
"lon": -55.5333,
},
{
"zip": 3446,
"name": "KILOMETRO 376 (APEADERO FCGU)",
"state": "ENTRE RIOS",
"lat": -28.7611,
"lon": -58.6722,
},
{
"zip": 3826,
"name": "ALDEA SAN JUAN",
"state": "ENTRE RIOS",
"lat": -32.7,
"lon": -58.7667,
},
{
"zip": 5263,
"name": "ESPERANZA DE LOS CERRILLOS",
"state": "LA RIOJA",
"lat": -29.8667,
"lon": -65.6333,
},
{
"zip": 5272,
"name": "COMANDANTE LEAL",
"state": "LA RIOJA",
"lat": -30.8833,
"lon": -65.7833,
},
{
"zip": 5274,
"name": "LA ISLA",
"state": "LA RIOJA",
"lat": -31.3667,
"lon": -65.9667,
},
{
"zip": 5275,
"name": "LOS ALANICES",
"state": "LA RIOJA",
"lat": -30.7667,
"lon": -65.9833,
},
{
"zip": 5276,
"name": "EL BORDO",
"state": "LA RIOJA",
"lat": -30.6,
"lon": -65.9056,
},
{
"zip": 5300,
"name": "LA ESPERANZA",
"state": "LA RIOJA",
"lat": -29.4667,
"lon": -66.75,
},
{
"zip": 5301,
"name": "SAN PEDRO",
"state": "LA RIOJA",
"lat": -29.5333,
"lon": -66.3333,
},
{
"zip": 5303,
"name": "ANJULLON",
"state": "LA RIOJA",
"lat": -28.7167,
"lon": -66.9333,
},
{
"zip": 5304,
"name": "LA TALA",
"state": "LA RIOJA",
"lat": -29.7167,
"lon": -66.85,
},
{
"zip": 5310,
"name": "LOS BALDES",
"state": "LA RIOJA",
"lat": -28.55,
"lon": -66.8167,
},
{
"zip": 5311,
"name": "MACHIGASTA",
"state": "LA RIOJA",
"lat": -28.55,
"lon": -66.8,
},
{
"zip": 5313,
"name": "ESTACION MAZAN",
"state": "LA RIOJA",
"lat": -28.6667,
"lon": -66.5667,
},
{
"zip": 5316,
"name": "BUENA VISTA (CAMPANA, DPTO. FAMATINA)",
"state": "LA RIOJA",
"lat": -28.3462,
"lon": -66.3654,
},
{
"zip": 5325,
"name": "ALPASINCHE",
"state": "LA RIOJA",
"lat": -28.3167,
"lon": -67.05,
},
{
"zip": 5327,
"name": "CHAUPIHUASI",
"state": "LA RIOJA",
"lat": -28.3667,
"lon": -67.05,
},
{
"zip": 5329,
"name": "SCHAQUI",
"state": "LA RIOJA",
"lat": -28.4667,
"lon": -67.1333,
},
{"zip": 5350, "name": "EL MOLLE", "state": "LA RIOJA", "lat": -29.3, "lon": -68.2},
{
"zip": 5351,
"name": "LOS PALACIOS",
"state": "LA RIOJA",
"lat": -29.3667,
"lon": -68.1833,
},
{
"zip": 5353,
"name": "LOS NACIMIENTOS",
"state": "LA RIOJA",
"lat": -29.5236,
"lon": -68.5578,
},
{
"zip": 5355,
"name": "VILLA CASTELLI",
"state": "LA RIOJA",
"lat": -29,
"lon": -68.1833,
},
{
"zip": 5357,
"name": "EL HORNO",
"state": "LA RIOJA",
"lat": -28.75,
"lon": -68.1667,
},
{
"zip": 5359,
"name": "CASA PINTADA",
"state": "LA RIOJA",
"lat": -28.6333,
"lon": -68.4,
},
{
"zip": 5360,
"name": "CHILECITO",
"state": "LA RIOJA",
"lat": -29.1667,
"lon": -67.5,
},
{
"zip": 5361,
"name": "ALTO CARRIZAL",
"state": "LA RIOJA",
"lat": -28.905,
"lon": -67.5267,
},
{
"zip": 5363,
"name": "ANGUINAN",
"state": "LA RIOJA",
"lat": -29.2,
"lon": -67.4667,
},
{
"zip": 5365,
"name": "FAMATINA",
"state": "LA RIOJA",
"lat": -28.9167,
"lon": -67.5167,
},
{"zip": 5367, "name": "SA�OGASTA", "state": "LA RIOJA", "lat": -29.3, "lon": -67.6},
{
"zip": 5369,
"name": "PAGANZILLO",
"state": "LA RIOJA",
"lat": -29.0062,
"lon": -67.5375,
},
{
"zip": 5370,
"name": "SANTA CRUZ (CHEPES, DPTO. ROSARIO VERA PE�ALOZA)",
"state": "LA RIOJA",
"lat": -29.3917,
"lon": -67.5083,
},
{"zip": 5372, "name": "NONOGASTA", "state": "LA RIOJA", "lat": -29.3, "lon": -67.5},
{
"zip": 5374,
"name": "CATINZACO (EMBARCADERO FCGB)",
"state": "LA RIOJA",
"lat": -29.4833,
"lon": -67.5167,
},
{
"zip": 5380,
"name": "GOBERNADOR GORDILLO",
"state": "LA RIOJA",
"lat": -30.35,
"lon": -66.3167,
},
{
"zip": 5381,
"name": "ILIAR",
"state": "LA RIOJA",
"lat": -30.5556,
"lon": -66.2222,
},
{
"zip": 5383,
"name": "EL ALTO",
"state": "LA RIOJA",
"lat": -30.3667,
"lon": -66.5833,
},
{
"zip": 5384,
"name": "PUNTA DE LOS LLANOS",
"state": "LA RIOJA",
"lat": -30.15,
"lon": -66.55,
},
{
"zip": 5385,
"name": "MOLLACO",
"state": "LA RIOJA",
"lat": -30.9667,
"lon": -66.4333,
},
{
"zip": 5386,
"name": "LOS COLORADOS",
"state": "LA RIOJA",
"lat": -29.9,
"lon": -67.15,
},
{
"zip": 5470,
"name": "AGUA DE PIEDRA",
"state": "LA RIOJA",
"lat": -31.2667,
"lon": -66.6,
},
{
"zip": 5471,
"name": "LA CALERA",
"state": "LA RIOJA",
"lat": -31.2167,
"lon": -66.55,
},
{
"zip": 5472,
"name": "LA AMERICA",
"state": "LA RIOJA",
"lat": -31.3841,
"lon": -66.3761,
},
{
"zip": 5473,
"name": "BALDE DEL QUEBRACHO",
"state": "LA RIOJA",
"lat": -31.6405,
"lon": -66.1024,
},
{
"zip": 5474,
"name": "DESIDERIO TELLO",
"state": "LA RIOJA",
"lat": -31.2167,
"lon": -66.3167,
},
{
"zip": 5475,
"name": "AGUA DE LA PIEDRA",
"state": "LA RIOJA",
"lat": -30.6444,
"lon": -66.1167,
},
{
"zip": 2340,
"name": "NUEVA CERES",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.8833,
"lon": -61.95,
},
{
"zip": 2341,
"name": "COLONIA GERALDINA",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.9167,
"lon": -62.1722,
},
{
"zip": 2354,
"name": "FORTIN LA VIUDA",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.4667,
"lon": -62.3867,
},
{
"zip": 2356,
"name": "LAS ALMAS",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.2933,
"lon": -62.8533,
},
{
"zip": 2357,
"name": "LAS ABRAS-MITRE",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.4,
"lon": -62.7833,
},
{
"zip": 3054,
"name": "LA PANCHITA",
"state": "SANTIAGO DEL ESTERO",
"lat": -30.1083,
"lon": -60.4167,
},
{
"zip": 3061,
"name": "CUATRO BOCAS",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.8667,
"lon": -61.85,
},
{
"zip": 3062,
"name": "LA AIDA",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.2833,
"lon": -62.0625,
},
{
"zip": 3064,
"name": "LA EULALIA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.9583,
"lon": -62.2917,
},
{
"zip": 3165,
"name": "TOMAS YOUNG",
"state": "SANTIAGO DEL ESTERO",
"lat": -32.16,
"lon": -60.16,
},
{
"zip": 3238,
"name": "TABLEADO",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.6452,
"lon": -57.131,
},
{
"zip": 3712,
"name": "CORONEL MANUEL LEONCIO RICO",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.4,
"lon": -61.8333,
},
{
"zip": 3714,
"name": "LA VIRTUD",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.1333,
"lon": -63.3833,
},
{
"zip": 3731,
"name": "SACHAYOJ",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.6833,
"lon": -61.8333,
},
{
"zip": 3734,
"name": "EL ARBOLITO (GANCEDO, DPTO.MORENO)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.4667,
"lon": -61.6875,
},
{
"zip": 3736,
"name": "CAMPO EL ROSARIO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.6944,
"lon": -61.9778,
},
{
"zip": 3740,
"name": "LAGUNA BRAVA (EMBARCADERO FCGB)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.6333,
"lon": -62.4167,
},
{
"zip": 3741,
"name": "KILOMETRO 606 (APEADERO FCGB)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.3681,
"lon": -62.4792,
},
{
"zip": 3743,
"name": "TINTINA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.0333,
"lon": -62.7167,
},
{
"zip": 3745,
"name": "PUESTO DEL MEDIO (SANTOS LUGARES, DPTO. ALBERDI)",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.92,
"lon": -63.0633,
},
{
"zip": 3747,
"name": "MONTE VERDE (CAMPO GALLO, DPTO. ALBERDI)",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.6083,
"lon": -62.85,
},
{
"zip": 3749,
"name": "CUQUERO",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.65,
"lon": -63.2,
},
{
"zip": 3752,
"name": "PUNA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.8,
"lon": -62.5167,
},
{
"zip": 3760,
"name": "A�ATUYA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.4667,
"lon": -62.8333,
},
{
"zip": 3761,
"name": "LOS LINARES",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.3667,
"lon": -62.55,
},
{
"zip": 3763,
"name": "LOS JURIES",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.4667,
"lon": -62.1,
},
{
"zip": 3765,
"name": "EL CUADRADO",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.2167,
"lon": -61.9667,
},
{
"zip": 3766,
"name": "AGUA DULCE",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.6917,
"lon": -62.525,
},
{
"zip": 4176,
"name": "EL ARBOLITO (ARBOLES GRANDES, DPTO.RIO HONDO)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.5944,
"lon": -65.1917,
},
{
"zip": 4178,
"name": "EL PACARA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.02,
"lon": -65.0167,
},
{
"zip": 4184,
"name": "ANIMAS",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.3864,
"lon": -64.6864,
},
{
"zip": 4186,
"name": "EL PALOMAR (DPTO. JIMENEZ)",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.8333,
"lon": -64.8333,
},
{
"zip": 4187,
"name": "EL BOBADAL",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.7111,
"lon": -64.3944,
},
{
"zip": 4189,
"name": "SANTA MARIA DE LAS CHACRAS",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.3972,
"lon": -64.1556,
},
{
"zip": 4195,
"name": "QUEBRACHO COTO",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.2833,
"lon": -64.4667,
},
{
"zip": 4197,
"name": "BAGUAL MUERTO",
"state": "SANTIAGO DEL ESTERO",
"lat": -26.11,
"lon": -64.13,
},
{
"zip": 4200,
"name": "HUAICO HONDO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.75,
"lon": -64.5833,
},
{
"zip": 4201,
"name": "LOS QUIROGA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.6667,
"lon": -64.3667,
},
{
"zip": 4203,
"name": "CABRA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.1606,
"lon": -64.7318,
},
{
"zip": 4205,
"name": "LAPRIDA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.3833,
"lon": -64.5333,
},
{
"zip": 4206,
"name": "CAMPO GRANDE (SIMBOL, DPTO.CAPITAL)",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.1188,
"lon": -64.2917,
},
{
"zip": 4208,
"name": "LORETO",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.3,
"lon": -64.2,
},
{
"zip": 4212,
"name": "SAN VICENTE-LORETO",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.7667,
"lon": -64.0833,
},
{
"zip": 4220,
"name": "LOS CASTILLOS",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.5333,
"lon": -64.8833,
},
{
"zip": 4221,
"name": "YUTU YACO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.4417,
"lon": -64.9583,
},
{
"zip": 4223,
"name": "PALMA REDONDA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.3889,
"lon": -64.8167,
},
{
"zip": 4225,
"name": "ABRA DE LA CRUZ",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.3833,
"lon": -64.64,
},
{
"zip": 4230,
"name": "CHA�AR LAGUNA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.8667,
"lon": -64.9542,
},
{
"zip": 4231,
"name": "25 DE MAYO (POZANCONES, DPTO.CHOYA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.6333,
"lon": -65.25,
},
{
"zip": 4233,
"name": "RODEITO",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.2958,
"lon": -64.8583,
},
{
"zip": 4234,
"name": "EL JUNCAL",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.3571,
"lon": -65.1238,
},
{
"zip": 4237,
"name": "ZORRO HUARCUNA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.2667,
"lon": -64.9417,
},
{
"zip": 4238,
"name": "PROVIDENCIA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.65,
"lon": -64.55,
},
{
"zip": 4239,
"name": "POZANCON",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.9667,
"lon": -64.9333,
},
{
"zip": 4296,
"name": "CARDOZOS",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.9333,
"lon": -64.1667,
},
{
"zip": 4298,
"name": "ALBARDON CHU�A",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.9333,
"lon": -64.1667,
},
{
"zip": 4300,
"name": "BARRIO ESTE",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.7222,
"lon": -64.1944,
},
{
"zip": 4301,
"name": "SEPULTURA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.75,
"lon": -63.95,
},
{
"zip": 4302,
"name": "ALTO POZO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.3667,
"lon": -64.4667,
},
{
"zip": 4303,
"name": "ANCHORIGA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.1846,
"lon": -64.0675,
},
{
"zip": 4304,
"name": "ALGARROBALES",
"state": "SANTIAGO DEL ESTERO",
"lat": -25.7667,
"lon": -64.0167,
},
{
"zip": 4306,
"name": "CASILLA DEL MEDIO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.1967,
"lon": -64.7033,
},
{
"zip": 4308,
"name": "CACHICO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.7889,
"lon": -64.0278,
},
{
"zip": 4312,
"name": "MORELLO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.8833,
"lon": -63.9667,
},
{
"zip": 4313,
"name": "CHARQUINA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.425,
"lon": -63.7542,
},
{
"zip": 4314,
"name": "TORO POZO (GARZA, DPTO.SARMIENTO)",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.4786,
"lon": -63.775,
},
{
"zip": 4315,
"name": "COLLERA HURCUNA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.4611,
"lon": -63.9056,
},
{
"zip": 4317,
"name": "SOCONCHO",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.5333,
"lon": -63.7583,
},
{
"zip": 4318,
"name": "VACA HUMAN",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.4786,
"lon": -63.775,
},
{
"zip": 4319,
"name": "TOLOZAS",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.6667,
"lon": -63.6583,
},
{
"zip": 4321,
"name": "PASO DEL SALADILLO",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.9958,
"lon": -63.35,
},
{
"zip": 4322,
"name": "SAN VICENTE (FERNANDEZ, DPTO.RBLES)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.8222,
"lon": -64.1222,
},
{
"zip": 4324,
"name": "GARZA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.15,
"lon": -63.5333,
},
{
"zip": 4326,
"name": "SAN ROQUE (LUGONES-DPTO.AVELLANEDA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.275,
"lon": -63.5042,
},
{
"zip": 4328,
"name": "SAN JOSE-AVELLANEDA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.55,
"lon": -63.2167,
},
{
"zip": 4332,
"name": "LIBANESA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.65,
"lon": -63.0167,
},
{
"zip": 4334,
"name": "ORO PAMPA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.75,
"lon": -62.875,
},
{
"zip": 4336,
"name": "LA AURORA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.5,
"lon": -64.25,
},
{
"zip": 4338,
"name": "LAS CHACRAS (CLODOMIRA-DPTO.BANDA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.5722,
"lon": -64.1056,
},
{
"zip": 4339,
"name": "SANTA ROSA (SIMBOLAR-DPTO.BANDA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.97,
"lon": -63.71,
},
{
"zip": 4343,
"name": "SANTO DOMINGO (LUGONES, DPTO.AVELLANEDA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.7196,
"lon": -63.7164,
},
{
"zip": 4349,
"name": "MONTE RICO (DPTO. ALBERDI)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.7196,
"lon": -63.7164,
},
{
"zip": 4350,
"name": "CRUZ LOMA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.8033,
"lon": -63.43,
},
{
"zip": 4351,
"name": "KISKA LORO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.4528,
"lon": -62.8417,
},
{
"zip": 4352,
"name": "ALZA NUEVA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.5853,
"lon": -63.3627,
},
{
"zip": 4353,
"name": "VILLA FIGUEROA",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.7167,
"lon": -63.5333,
},
{
"zip": 4354,
"name": "CAVADO",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.5183,
"lon": -63.7017,
},
{
"zip": 4355,
"name": "TIUN PUNCO",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.05,
"lon": -63.3167,
},
{
"zip": 4356,
"name": "CASA ALTA",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.1792,
"lon": -63.1625,
},
{
"zip": 4361,
"name": "EL PALOMAR (VILLA BRANA, DPTO. MORENO)",
"state": "SANTIAGO DEL ESTERO",
"lat": -27.7196,
"lon": -63.7164,
},
{
"zip": 4436,
"name": "ABRA CORO",
"state": "SANTIAGO DEL ESTERO",
"lat": -24.8492,
"lon": -64.8348,
},
{
"zip": 4514,
"name": "LA NORIA (ESTACION ATAMISQUI, DPTO. ATAMISQUI)",
"state": "SANTIAGO DEL ESTERO",
"lat": -23.7833,
"lon": -64.7833,
},
{
"zip": 4613,
"name": "SUNCHO POZO (BREA POZO, DPTO.SAN MARTIN)",
"state": "SANTIAGO DEL ESTERO",
"lat": -23.9538,
"lon": -65.3744,
},
{
"zip": 4643,
"name": "QUERA",
"state": "SANTIAGO DEL ESTERO",
"lat": -22.7267,
"lon": -66.4083,
},
{
"zip": 5150,
"name": "POZO CEBADO (OJO DE AGUA, DPTO. OJO DE AGUA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -31.3603,
"lon": -64.6885,
},
{
"zip": 5206,
"name": "LA HIGUERA (ARRAGA, DPTO.SILIPICA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -30.3402,
"lon": -64.1561,
},
{
"zip": 5249,
"name": "EL BARRIAL (CANDELARIA, DPTO. OJO DE AGUA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.8333,
"lon": -63.0833,
},
{
"zip": 5250,
"name": "LA PRIMAVERA",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.1833,
"lon": -63.8833,
},
{
"zip": 5251,
"name": "AMBARGASTA",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.05,
"lon": -63.95,
},
{
"zip": 5253,
"name": "SUMAMPA",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.3667,
"lon": -63.4667,
},
{
"zip": 5255,
"name": "AMOLADERAS",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.5396,
"lon": -63.3292,
},
{
"zip": 5257,
"name": "RAMA PASO",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.2119,
"lon": -63.3762,
},
{
"zip": 5258,
"name": "SAN FRANCISCO (PIEDRA BLANCA, DPTO.OJO DE AGUA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.1333,
"lon": -63.9667,
},
{
"zip": 5260,
"name": "EL CHA�AR (RECREO,DPTO. CHOYA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -29.225,
"lon": -65.1333,
},
{
"zip": 5266,
"name": "PARANA (QUIROS, DPTO. CHOYA)",
"state": "SANTIAGO DEL ESTERO",
"lat": -28.7833,
"lon": -65.1167,
},
{"zip": 3500, "name": "TIGRE", "state": "CHACO", "lat": -27.45, "lon": -58.9833},
{
"zip": 3501,
"name": "EL PALMAR (RESISTENCIA, SUC. N�1 PDO. SAN FERNANDO)",
"state": "CHACO",
"lat": -27.1357,
"lon": -59.1119,
},
{
"zip": 3503,
"name": "VILLA FORESTACION",
"state": "CHACO",
"lat": -27.5,
"lon": -58.9333,
},
{
"zip": 3505,
"name": "COLONIA BENITEZ",
"state": "CHACO",
"lat": -27.3333,
"lon": -58.9333,
},
{
"zip": 3506,
"name": "LOS ALGARROBOS",
"state": "CHACO",
"lat": -27.7,
"lon": -59.1167,
},
{
"zip": 3507,
"name": "PAMPA ALMIRON",
"state": "CHACO",
"lat": -26.7,
"lon": -59.1333,
},
{
"zip": 3508,
"name": "KILOMETRO 39 (EMBARACADERO FCGB)",
"state": "CHACO",
"lat": -27.1357,
"lon": -59.1119,
},
{
"zip": 3509,
"name": "CAMPO WINTER",
"state": "CHACO",
"lat": -26.4722,
"lon": -59.3556,
},
{
"zip": 3511,
"name": "PRESIDENCIA ROCA",
"state": "CHACO",
"lat": -26.1333,
"lon": -59.6,
},
{
"zip": 3513,
"name": "GENERAL OBLIGADO",
"state": "CHACO",
"lat": -27.4167,
"lon": -59.4333,
},
{"zip": 3514, "name": "CACUI", "state": "CHACO", "lat": -27.2476, "lon": -59.2619},
{
"zip": 3515,
"name": "COLONIA ELISA",
"state": "CHACO",
"lat": -26.9333,
"lon": -59.5333,
},
{"zip": 3516, "name": "EL BA�ADO", "state": "CHACO", "lat": -27.95, "lon": -59.275},
{
"zip": 3518,
"name": "CANCHA LARGA",
"state": "CHACO",
"lat": -27.0083,
"lon": -58.7583,
},
{
"zip": 3522,
"name": "GENERAL VEDIA",
"state": "CHACO",
"lat": -26.9333,
"lon": -58.6667,
},
{"zip": 3524, "name": "SOLALINDE", "state": "CHACO", "lat": -26.9333, "lon": -58.5},
{"zip": 3526, "name": "VELAZ", "state": "CHACO", "lat": -26.7, "lon": -58.6667},
{
"zip": 3530,
"name": "QUITILIPI",
"state": "CHACO",
"lat": -26.8667,
"lon": -60.2167,
},
{"zip": 3531, "name": "NAPALPI", "state": "CHACO", "lat": -26.9, "lon": -60.1333},
{
"zip": 3532,
"name": "CURANDU",
"state": "CHACO",
"lat": -27.0667,
"lon": -59.6333,
},
{
"zip": 3534,
"name": "LOTE 3 (ESCUELA 144) (MACHAGAI, PDO. 25 DE MAYO)",
"state": "CHACO",
"lat": -26.7917,
"lon": -60.0167,
},
{
"zip": 3536,
"name": "CORONEL BRANDSEN",
"state": "CHACO",
"lat": -26.9389,
"lon": -60.2889,
},
{
"zip": 3540,
"name": "VILLA ANGELA",
"state": "CHACO",
"lat": -27.5833,
"lon": -60.7167,
},
{
"zip": 3541,
"name": "SANTA SYLVINA",
"state": "CHACO",
"lat": -27.8167,
"lon": -61.15,
},
{"zip": 3543, "name": "LOTE 12", "state": "CHACO", "lat": -27.525, "lon": -60.2708},
{
"zip": 3545,
"name": "VILLA BERTHET",
"state": "CHACO",
"lat": -27.2833,
"lon": -60.4167,
},
{
"zip": 3620,
"name": "KILOMETRO 184",
"state": "CHACO",
"lat": -25.1542,
"lon": -59.3875,
},
{
"zip": 3700,
"name": "PAMPA GALPON",
"state": "CHACO",
"lat": -26.7833,
"lon": -60.45,
},
{
"zip": 3701,
"name": "COLONIA BERNARDINO RIVADAVIA",
"state": "CHACO",
"lat": -27.0667,
"lon": -60.5967,
},
{
"zip": 3703,
"name": "PAMPA FLORIDA",
"state": "CHACO",
"lat": -26.55,
"lon": -60.4,
},
{
"zip": 3705,
"name": "COLONIA JUAN JOSE CASTELLI",
"state": "CHACO",
"lat": -25.95,
"lon": -60.6167,
},
{
"zip": 3706,
"name": "COLONIA AGRICOLA PAMPA DE NAPENAY",
"state": "CHACO",
"lat": -26.65,
"lon": -60.6389,
},
{
"zip": 3708,
"name": "PAMPA HERMOSA",
"state": "CHACO",
"lat": -26.5583,
"lon": -61.0583,
},
{
"zip": 3716,
"name": "FORTIN LAS CHU�AS",
"state": "CHACO",
"lat": -26.8,
"lon": -60.8333,
},
{"zip": 3718, "name": "CORZUELA", "state": "CHACO", "lat": -26.95, "lon": -60.9667},
{
"zip": 3722,
"name": "POZO DEL INDIO",
"state": "CHACO",
"lat": -27.0167,
"lon": -61.0167,
},
{"zip": 3730, "name": "CABRAL", "state": "CHACO", "lat": -27.0722, "lon": -61.2667},
{
"zip": 3732,
"name": "COLONIA BRAVO",
"state": "CHACO",
"lat": -27.35,
"lon": -61.1833,
},
{
"zip": 3733,
"name": "VENADOS GRANDES",
"state": "CHACO",
"lat": -27.8167,
"lon": -61.3833,
},
{
"zip": 5400,
"name": "PRESBITERO FRANCISCO PEREZ HERNANDEZ",
"state": "SAN JUAN",
"lat": -31.4374,
"lon": -68.5612,
},
{"zip": 5401, "name": "ZONDA", "state": "SAN JUAN", "lat": -31.55, "lon": -68.7333},
{
"zip": 5403,
"name": "CASTA�O NUEVO",
"state": "SAN JUAN",
"lat": -31.0333,
"lon": -69.55,
},
{
"zip": 5405,
"name": "TONTAL",
"state": "SAN JUAN",
"lat": -31.6333,
"lon": -69.4667,
},
{
"zip": 5407,
"name": "VILLA OBRERA",
"state": "SAN JUAN",
"lat": -31.525,
"lon": -68.6167,
},
{
"zip": 5409,
"name": "NIQUIVIL",
"state": "SAN JUAN",
"lat": -30.4092,
"lon": -68.6889,
},
{
"zip": 5411,
"name": "LA LEGUA",
"state": "SAN JUAN",
"lat": -31.5333,
"lon": -68.4833,
},
{
"zip": 5413,
"name": "CHIMBAS DEL NORTE",
"state": "SAN JUAN",
"lat": -31.4833,
"lon": -68.5333,
},
{
"zip": 5415,
"name": "ALAMITO",
"state": "SAN JUAN",
"lat": -31.4492,
"lon": -68.3812,
},
{
"zip": 5417,
"name": "LA MAJADITA (DPTO. 9 DE JULIO)",
"state": "SAN JUAN",
"lat": -31.65,
"lon": -68.4,
},
{
"zip": 5419,
"name": "LOS PUESTOS",
"state": "SAN JUAN",
"lat": -31.4186,
"lon": -68.5128,
},
{
"zip": 5423,
"name": "VILLA SANTA ANITA",
"state": "SAN JUAN",
"lat": -31.8472,
"lon": -68.2028,
},
{
"zip": 5424,
"name": "VILLA LARGA",
"state": "SAN JUAN",
"lat": -31.8472,
"lon": -68.2028,
},
{
"zip": 5425,
"name": "SEGUNDO CUARTEL",
"state": "SAN JUAN",
"lat": -31.5667,
"lon": -68.5333,
},
{
"zip": 5427,
"name": "VILLA ABERASTAIN",
"state": "SAN JUAN",
"lat": -31.65,
"lon": -68.5833,
},
{
"zip": 5429,
"name": "RINCONADA",
"state": "SAN JUAN",
"lat": -31.7167,
"lon": -68.5833,
},
{
"zip": 5431,
"name": "PEDERNAL",
"state": "SAN JUAN",
"lat": -31.9833,
"lon": -68.7333,
},
{
"zip": 5433,
"name": "ESTACION LA RINCONADA",
"state": "SAN JUAN",
"lat": -31.9026,
"lon": -68.4343,
},
{
"zip": 5435,
"name": "LAS LAGUNAS",
"state": "SAN JUAN",
"lat": -32.0212,
"lon": -68.2672,
},
{
"zip": 5436,
"name": "PUNTA DE LAGUNA",
"state": "SAN JUAN",
"lat": -31.9026,
"lon": -68.4343,
},
{
"zip": 5438,
"name": "ALTO DE SIERRA",
"state": "SAN JUAN",
"lat": -31.5333,
"lon": -68.4167,
},
{
"zip": 5439,
"name": "SAN MARTIN",
"state": "SAN JUAN",
"lat": -31.55,
"lon": -68.3333,
},
{
"zip": 5442,
"name": "LOTES RIVERA",
"state": "SAN JUAN",
"lat": -31.6542,
"lon": -68.2853,
},
{
"zip": 5443,
"name": "TUPELI",
"state": "SAN JUAN",
"lat": -31.7024,
"lon": -68.2609,
},
{
"zip": 5444,
"name": "BERMEJO",
"state": "SAN JUAN",
"lat": -31.6167,
"lon": -67.65,
},
{
"zip": 5446,
"name": "MARAYES",
"state": "SAN JUAN",
"lat": -31.4833,
"lon": -67.3333,
},
{
"zip": 5447,
"name": "LAS DELICIAS",
"state": "SAN JUAN",
"lat": -30.8667,
"lon": -67.35,
},
{
"zip": 5449,
"name": "BALDE DEL ROSARIO",
"state": "SAN JUAN",
"lat": -30.3167,
"lon": -67.6833,
},
{
"zip": 5460,
"name": "CERRO NEGRO (JACHAL, DPTO. JACHAL)",
"state": "SAN JUAN",
"lat": -30.2425,
"lon": -68.7458,
},
{
"zip": 5461,
"name": "LA FALDA",
"state": "SAN JUAN",
"lat": -30.1667,
"lon": -68.65,
},
{
"zip": 5463,
"name": "PASO DEL LAMAR",
"state": "SAN JUAN",
"lat": -30.2,
"lon": -68.4667,
},
{
"zip": 5465,
"name": "COLOLA",
"state": "SAN JUAN",
"lat": -30.1833,
"lon": -69.0833,
},
{
"zip": 5467,
"name": "LAS FLORES",
"state": "SAN JUAN",
"lat": -30.3242,
"lon": -69.2108,
},
{
"zip": 5480,
"name": "TRANCAS",
"state": "SAN JUAN",
"lat": -31.1896,
"lon": -68.0708,
},
{
"zip": 5546,
"name": "COLANGUIL",
"state": "SAN JUAN",
"lat": -32.7896,
"lon": -68.8979,
},
{
"zip": 4139,
"name": "CERRILLOS",
"state": "CATAMARCA",
"lat": -26.7717,
"lon": -66.285,
},
{
"zip": 4235,
"name": "PUEBLITO",
"state": "CATAMARCA",
"lat": -28.4685,
"lon": -65.4204,
},
{
"zip": 4700,
"name": "LOMA CORTADA",
"state": "CATAMARCA",
"lat": -28.4633,
"lon": -65.7667,
},
{
"zip": 4701,
"name": "BARRANCAS",
"state": "CATAMARCA",
"lat": -28.7833,
"lon": -65.5139,
},
{
"zip": 4705,
"name": "HUAYCAMA (DPTO. VALLE VIEJO)",
"state": "CATAMARCA",
"lat": -28.5,
"lon": -65.75,
},
{
"zip": 4707,
"name": "SAN ANTONIO DE PIEDRA BLANCA",
"state": "CATAMARCA",
"lat": -28.4444,
"lon": -65.7333,
},
{
"zip": 4709,
"name": "SAN JOSE (PIEDRA BLANCA-DPTO.FRAY MAMERTO ESQUIU)",
"state": "CATAMARCA",
"lat": -28.3833,
"lon": -65.7,
},
{
"zip": 4711,
"name": "AMBATO",
"state": "CATAMARCA",
"lat": -27.9074,
"lon": -65.9667,
},
{
"zip": 4713,
"name": "OCHO VADOS",
"state": "CATAMARCA",
"lat": -28.1386,
"lon": -65.8088,
},
{
"zip": 4715,
"name": "VILLA QUINTIN AHUMADA",
"state": "CATAMARCA",
"lat": -28.2167,
"lon": -65.8667,
},
{
"zip": 4716,
"name": "YOCAN",
"state": "CATAMARCA",
"lat": -28.4167,
"lon": -65.6333,
},
{
"zip": 4718,
"name": "LA ESQUINA",
"state": "CATAMARCA",
"lat": -29.1,
"lon": -65.5833,
},
{
"zip": 4719,
"name": "VILLA COLLANTES",
"state": "CATAMARCA",
"lat": -27.8833,
"lon": -65.7167,
},
{
"zip": 4722,
"name": "LAS HUERTAS",
"state": "CATAMARCA",
"lat": -28.05,
"lon": -65.5167,
},
{
"zip": 4723,
"name": "LOS TRONCOS",
"state": "CATAMARCA",
"lat": -28.0852,
"lon": -65.3704,
},
{
"zip": 4724,
"name": "CONETA",
"state": "CATAMARCA",
"lat": -28.5833,
"lon": -65.9,
},
{
"zip": 4726,
"name": "CONCEPCION (DPTO. CAPAYAN)",
"state": "CATAMARCA",
"lat": -28.7375,
"lon": -66.0667,
},
{
"zip": 4728,
"name": "SAN JERONIMO",
"state": "CATAMARCA",
"lat": -28.8667,
"lon": -66.2333,
},
{
"zip": 4740,
"name": "EL COLEGIO",
"state": "CATAMARCA",
"lat": -27.6083,
"lon": -66.3208,
},
{
"zip": 4741,
"name": "CARAPUNCO",
"state": "CATAMARCA",
"lat": -27.5708,
"lon": -66.3125,
},
{
"zip": 4743,
"name": "ALUMBRERA",
"state": "CATAMARCA",
"lat": -27.5167,
"lon": -66.0333,
},
{
"zip": 4750,
"name": "AMPUJACO",
"state": "CATAMARCA",
"lat": -27.8222,
"lon": -66.3611,
},
{
"zip": 4751,
"name": "LAS JUNTAS (LAS BARRANCAS-DPTO.BELEN)",
"state": "CATAMARCA",
"lat": -27.1861,
"lon": -66.8389,
},
{
"zip": 4753,
"name": "LONDRES",
"state": "CATAMARCA",
"lat": -27.7167,
"lon": -67.1167,
},
{
"zip": 4758,
"name": "SAN BUENA VENTURA",
"state": "CATAMARCA",
"lat": -27.4455,
"lon": -66.7667,
},
{
"zip": 5139,
"name": "CAMPITOS",
"state": "CATAMARCA",
"lat": -30.9778,
"lon": -62.8556,
},
{
"zip": 5261,
"name": "LA VALENTINA",
"state": "CATAMARCA",
"lat": -28.9922,
"lon": -65.4289,
},
{"zip": 5264, "name": "TULA", "state": "CATAMARCA", "lat": -28.9333, "lon": -65.1},
{
"zip": 5265,
"name": "LA BARROSA (BAVIANO, DPTO. LA PAZ)",
"state": "CATAMARCA",
"lat": -28.7963,
"lon": -65.5593,
},
{
"zip": 5315,
"name": "ESTACION POMAN (EL PAJONAL, DPTO. POMAN)",
"state": "CATAMARCA",
"lat": -28.3833,
"lon": -66.2583,
},
{
"zip": 5317,
"name": "RETIRO DE COLANA",
"state": "CATAMARCA",
"lat": -28.3083,
"lon": -66.1542,
},
{
"zip": 5319,
"name": "SAN JOSE (COIPES-DPTO.POMAN)",
"state": "CATAMARCA",
"lat": -28.05,
"lon": -66.2167,
},
{
"zip": 5321,
"name": "EL POTRERO (SAUJIL, DPTO. POMAN)",
"state": "CATAMARCA",
"lat": -28.1111,
"lon": -66.3333,
},
{"zip": 5331, "name": "SALADO", "state": "CATAMARCA", "lat": -28.3, "lon": -67.25},
{
"zip": 5333,
"name": "BANDA DE LUCERO",
"state": "CATAMARCA",
"lat": -28.1833,
"lon": -67.575,
},
{
"zip": 5340,
"name": "TINOGASTA",
"state": "CATAMARCA",
"lat": -28.0667,
"lon": -67.5667,
},
{
"zip": 5341,
"name": "LAS PAPAS",
"state": "CATAMARCA",
"lat": -27.4905,
"lon": -67.6405,
},
{
"zip": 5343,
"name": "SANTA ROSA (DPTO.TINOGASTA)",
"state": "CATAMARCA",
"lat": -27.5759,
"lon": -67.6315,
},
{
"zip": 5345,
"name": "LOS MORTEROS (FIAMBALA-DPTO.TINOGASTA)",
"state": "CATAMARCA",
"lat": -27.6833,
"lon": -67.6333,
},
{
"zip": 5621,
"name": "AGUA DE TORRE",
"state": "LA PAMPA",
"lat": -35.7375,
"lon": -68.0792,
},
{
"zip": 6200,
"name": "REALICO",
"state": "LA PAMPA",
"lat": -35.0333,
"lon": -64.25,
},
{"zip": 6201, "name": "CHANILAO", "state": "LA PAMPA", "lat": -35.3333, "lon": -64},
{
"zip": 6203,
"name": "EL TIGRE",
"state": "LA PAMPA",
"lat": -35.4167,
"lon": -64.3,
},
{
"zip": 6205,
"name": "LOTE 5 (CALEUFU, ESCUELA 120)",
"state": "LA PAMPA",
"lat": -35.4167,
"lon": -64.4833,
},
{"zip": 6207, "name": "OJEDA", "state": "LA PAMPA", "lat": -35.3333, "lon": -64},
{
"zip": 6212,
"name": "SAN JUAN (SIMSON, DPTO. REALICO)",
"state": "LA PAMPA",
"lat": -35.0708,
"lon": -64.2667,
},
{
"zip": 6213,
"name": "LOTE 11 (ESCUELA 107)",
"state": "LA PAMPA",
"lat": -35.1333,
"lon": -64.5333,
},
{
"zip": 6220,
"name": "BERNARDO LARROUDE",
"state": "LA PAMPA",
"lat": -35.0333,
"lon": -63.5667,
},
{
"zip": 6221,
"name": "INTENDENTE ALVEAR",
"state": "LA PAMPA",
"lat": -35.2333,
"lon": -63.5833,
},
{
"zip": 6228,
"name": "COLONIA DENEVI",
"state": "LA PAMPA",
"lat": -35.0125,
"lon": -63.8458,
},
{
"zip": 6300,
"name": "SANTA ROSA",
"state": "LA PAMPA",
"lat": -36.6167,
"lon": -64.2833,
},
{
"zip": 6301,
"name": "LA DOLORES",
"state": "LA PAMPA",
"lat": -36.8833,
"lon": -63.8375,
},
{"zip": 6303, "name": "TOAY", "state": "LA PAMPA", "lat": -36.6667, "lon": -64.35},
{"zip": 6305, "name": "ROLON", "state": "LA PAMPA", "lat": -37.1833, "lon": -63.4},
{
"zip": 6307,
"name": "LA JOSEFINA",
"state": "LA PAMPA",
"lat": -37.1583,
"lon": -63.5917,
},
{
"zip": 6309,
"name": "APUYACO (APEADERO FCDFS)",
"state": "LA PAMPA",
"lat": -37.4167,
"lon": -63.6833,
},
{
"zip": 6311,
"name": "LOS TOROS",
"state": "LA PAMPA",
"lat": -37.6167,
"lon": -63.475,
},
{
"zip": 6312,
"name": "RICARDO LAVALLE",
"state": "LA PAMPA",
"lat": -35.1333,
"lon": -64.1667,
},
{
"zip": 6313,
"name": "COLONIA SAN FELIPE",
"state": "LA PAMPA",
"lat": -36.275,
"lon": -64.3667,
},
{
"zip": 6315,
"name": "MAURICIO MAYER",
"state": "LA PAMPA",
"lat": -36.2333,
"lon": -64.0333,
},
{
"zip": 6317,
"name": "LOVENTUEL",
"state": "LA PAMPA",
"lat": -36.1833,
"lon": -65.3,
},
{
"zip": 6319,
"name": "LA MOROCHA",
"state": "LA PAMPA",
"lat": -36.4611,
"lon": -65.3556,
},
{
"zip": 6321,
"name": "NANQUEL HUITRE",
"state": "LA PAMPA",
"lat": -36.6375,
"lon": -66.5208,
},
{"zip": 6323, "name": "BUTALO", "state": "LA PAMPA", "lat": -36.2, "lon": -67.1},
{
"zip": 6325,
"name": "QUEHUE",
"state": "LA PAMPA",
"lat": -37.1333,
"lon": -64.5167,
},
{
"zip": 6326,
"name": "ANGUIL",
"state": "LA PAMPA",
"lat": -36.5167,
"lon": -64.0167,
},
{
"zip": 6330,
"name": "CATRILO",
"state": "LA PAMPA",
"lat": -36.4333,
"lon": -63.4,
},
{
"zip": 6331,
"name": "MIGUEL CANE",
"state": "LA PAMPA",
"lat": -36.1667,
"lon": -63.5,
},
{
"zip": 6333,
"name": "MARI-MARI",
"state": "LA PAMPA",
"lat": -36.025,
"lon": -63.625,
},
{
"zip": 6336,
"name": "LA JAPONESA",
"state": "LA PAMPA",
"lat": -36.4167,
"lon": -63.287,
},
{
"zip": 6345,
"name": "LA BILBAINA",
"state": "LA PAMPA",
"lat": -36.5095,
"lon": -63.3214,
},
{
"zip": 6352,
"name": "COLONIA SAN MIGUEL",
"state": "LA PAMPA",
"lat": -36.4667,
"lon": -63.6167,
},
{
"zip": 6354,
"name": "LA CATALINA (URIBURU, DPTO. CATRILO)",
"state": "LA PAMPA",
"lat": -36.5,
"lon": -63.8333,
},
{
"zip": 6360,
"name": "LA GUE�ITA",
"state": "LA PAMPA",
"lat": -35.6667,
"lon": -63.7333,
},
{
"zip": 6361,
"name": "LA VICTORIA (AGUSTONI, DPTO. MARACO)",
"state": "LA PAMPA",
"lat": -35.6833,
"lon": -63.4917,
},
{
"zip": 6365,
"name": "SPELUZZI",
"state": "LA PAMPA",
"lat": -35.5333,
"lon": -63.8,
},
{
"zip": 6367,
"name": "ZONA RURAL (METILEO, DPTO. CONELO)",
"state": "LA PAMPA",
"lat": -35.75,
"lon": -63.9333,
},
{
"zip": 6369,
"name": "CAMPO SALUSSO",
"state": "LA PAMPA",
"lat": -35.7,
"lon": -64.1333,
},
{
"zip": 6380,
"name": "EDUARDO CASTEX",
"state": "LA PAMPA",
"lat": -35.9,
"lon": -64.3,
},
{
"zip": 6381,
"name": "COLONIA EL DESTINO",
"state": "LA PAMPA",
"lat": -36.0333,
"lon": -64.7083,
},
{
"zip": 6383,
"name": "SECCION PRIMERA CONELO",
"state": "LA PAMPA",
"lat": -35.8667,
"lon": -64.15,
},
{
"zip": 6385,
"name": "PICHI-HUINCA",
"state": "LA PAMPA",
"lat": -35.6333,
"lon": -64.7667,
},
{
"zip": 6387,
"name": "LOTE 15 (ESCUELA 18)",
"state": "LA PAMPA",
"lat": -35.5833,
"lon": -64.55,
},
{
"zip": 8200,
"name": "COLONIA LIA Y ALLENDE",
"state": "LA PAMPA",
"lat": -37.2667,
"lon": -64.7389,
},
{
"zip": 8201,
"name": "LA REFORMA",
"state": "LA PAMPA",
"lat": -37.55,
"lon": -66.2333,
},
{
"zip": 8203,
"name": "SAN ANTONIO",
"state": "LA PAMPA",
"lat": -37.2667,
"lon": -64.5667,
},
{
"zip": 8204,
"name": "BERNASCONI",
"state": "LA PAMPA",
"lat": -37.9,
"lon": -63.7167,
},
{
"zip": 8206,
"name": "LA PORTE�A",
"state": "LA PAMPA",
"lat": -38.2667,
"lon": -63.7333,
},
{
"zip": 8208,
"name": "JACINTO ARAUZ",
"state": "LA PAMPA",
"lat": -38.0667,
"lon": -63.4333,
},
{
"zip": 8212,
"name": "DOS AMIGOS (HUCAL, DPTO. HUCAL)",
"state": "LA PAMPA",
"lat": -37.7,
"lon": -64.0833,
},
{
"zip": 8214,
"name": "EPU PEL",
"state": "LA PAMPA",
"lat": -37.5667,
"lon": -64.2833,
},
{
"zip": 8238,
"name": "LOS MORROS",
"state": "LA PAMPA",
"lat": -37.6641,
"lon": -64.8647,
},
{
"zip": 8307,
"name": "PUELEN",
"state": "LA PAMPA",
"lat": -37.3667,
"lon": -67.6167,
},
{
"zip": 8336,
"name": "LA PROVIDENCIA",
"state": "LA PAMPA",
"lat": -39,
"lon": -66.9042,
},
{
"zip": 5500,
"name": "MENDOZA",
"state": "MENDOZA",
"lat": -32.8833,
"lon": -68.8167,
},
{
"zip": 5501,
"name": "BARRIO TRAPICHE",
"state": "MENDOZA",
"lat": -32.9167,
"lon": -68.8333,
},
{
"zip": 5503,
"name": "PASO DE LOS ANDES",
"state": "MENDOZA",
"lat": -33.0778,
"lon": -68.8556,
},
{
"zip": 5505,
"name": "CARBOMETAL",
"state": "MENDOZA",
"lat": -32.9778,
"lon": -68.8667,
},
{
"zip": 5507,
"name": "BA�OS LUNLUNTA",
"state": "MENDOZA",
"lat": -33.0667,
"lon": -68.8333,
},
{
"zip": 5509,
"name": "ANCHORIS",
"state": "MENDOZA",
"lat": -33.3333,
"lon": -68.9167,
},
{
"zip": 5511,
"name": "BARRIO SARMIENTO",
"state": "MENDOZA",
"lat": -32.95,
"lon": -68.8,
},
{
"zip": 5513,
"name": "BARRIO JARDIN LUZURIAGA",
"state": "MENDOZA",
"lat": -32.9583,
"lon": -68.7833,
},
{
"zip": 5515,
"name": "MAIPU",
"state": "MENDOZA",
"lat": -32.9667,
"lon": -68.7833,
},
{
"zip": 5517,
"name": "TRES BANDERAS",
"state": "MENDOZA",
"lat": -33.269,
"lon": -68.8524,
},
{
"zip": 5519,
"name": "BARRIO GRAFICO",
"state": "MENDOZA",
"lat": -33.1561,
"lon": -68.8288,
},
{
"zip": 5521,
"name": "BARRIO RESIDENCIAL SANTA ANA",
"state": "MENDOZA",
"lat": -32.9,
"lon": -68.7833,
},
{
"zip": 5523,
"name": "BUENA NUEVA",
"state": "MENDOZA",
"lat": -32.9,
"lon": -68.75,
},
{
"zip": 5525,
"name": "RODEO DE LA CRUZ",
"state": "MENDOZA",
"lat": -32.9167,
"lon": -68.7333,
},
{"zip": 5527, "name": "VERGEL", "state": "MENDOZA", "lat": -32.8, "lon": -68.6333},
{
"zip": 5529,
"name": "COLONIA JARA",
"state": "MENDOZA",
"lat": -32.975,
"lon": -68.6833,
},
{
"zip": 5531,
"name": "FINCA EL ARROZ",
"state": "MENDOZA",
"lat": -33.0333,
"lon": -68.6667,
},
{
"zip": 5533,
"name": "EL CHIRCAL",
"state": "MENDOZA",
"lat": -32.7833,
"lon": -68.669,
},
{
"zip": 5535,
"name": "EL ALPERO (EMBARCADERO FCGB)",
"state": "MENDOZA",
"lat": -32.7333,
"lon": -68.3556,
},
{
"zip": 5537,
"name": "SAN MIGUEL",
"state": "MENDOZA",
"lat": -32.2833,
"lon": -67.5167,
},
{
"zip": 5539,
"name": "SANCHEZ DE BUSTAMANTE",
"state": "MENDOZA",
"lat": -32.8417,
"lon": -68.8417,
},
{
"zip": 5541,
"name": "EL BORBOLLON",
"state": "MENDOZA",
"lat": -32.8,
"lon": -68.75,
},
{
"zip": 5543,
"name": "EL RESGUARDO",
"state": "MENDOZA",
"lat": -32.6833,
"lon": -68.71,
},
{
"zip": 5544,
"name": "GOBERNADOR BENEGAS",
"state": "MENDOZA",
"lat": -32.95,
"lon": -68.85,
},
{
"zip": 5545,
"name": "TERMAS VILLAVICENCIO",
"state": "MENDOZA",
"lat": -32.5167,
"lon": -69.0167,
},
{
"zip": 5547,
"name": "BARRIO VILLA DEL PARQUE",
"state": "MENDOZA",
"lat": -32.9167,
"lon": -68.8333,
},
{
"zip": 5549,
"name": "ALVAREZ CONDARCO (APEADERO FCGB)",
"state": "MENDOZA",
"lat": -32.9625,
"lon": -69.1333,
},
{
"zip": 5551,
"name": "POLVAREDA",
"state": "MENDOZA",
"lat": -32.7833,
"lon": -69.6333,
},
{
"zip": 5553,
"name": "ZANJON AMARILLO",
"state": "MENDOZA",
"lat": -32.85,
"lon": -69.75,
},
{
"zip": 5555,
"name": "PUENTE DEL INCA",
"state": "MENDOZA",
"lat": -32.8167,
"lon": -69.9167,
},
{
"zip": 5557,
"name": "LAS CUEVAS",
"state": "MENDOZA",
"lat": -32.8167,
"lon": -70.05,
},
{
"zip": 5560,
"name": "ARROYO CLARO",
"state": "MENDOZA",
"lat": -33.525,
"lon": -69.0083,
},
{
"zip": 5561,
"name": "CORDON DEL PLATA",
"state": "MENDOZA",
"lat": -33.37,
"lon": -68.9033,
},
{
"zip": 5563,
"name": "VILLA SECA (LOS ARBOLES DE VILLEGAS, DPTO. TUNUYAN)",
"state": "MENDOZA",
"lat": -33.5222,
"lon": -69.2389,
},
{
"zip": 5565,
"name": "LA PRIMAVERA (VISTA FLORES, DPTO. TUNUYAN)",
"state": "MENDOZA",
"lat": -33.6444,
"lon": -69.1333,
},
{
"zip": 5567,
"name": "LA CA�ADA",
"state": "MENDOZA",
"lat": -33.7333,
"lon": -69.1167,
},
{
"zip": 5569,
"name": "TIERRAS BLANCAS",
"state": "MENDOZA",
"lat": -33.8367,
"lon": -69.0567,
},
{
"zip": 5570,
"name": "SAN MARTIN",
"state": "MENDOZA",
"lat": -33.0667,
"lon": -68.4667,
},
{
"zip": 5571,
"name": "CHIVILCOY",
"state": "MENDOZA",
"lat": -33.2894,
"lon": -68.4576,
},
{
"zip": 5573,
"name": "VILLA DE JUNIN",
"state": "MENDOZA",
"lat": -33.15,
"lon": -68.4833,
},
{
"zip": 5575,
"name": "LOS ARBOLES",
"state": "MENDOZA",
"lat": -33.1833,
"lon": -68.5667,
},
{
"zip": 5577,
"name": "EL ALTO",
"state": "MENDOZA",
"lat": -33.1833,
"lon": -68.4667,
},
{
"zip": 5579,
"name": "MINELLI",
"state": "MENDOZA",
"lat": -33.2444,
"lon": -68.3833,
},
{
"zip": 5580,
"name": "COLONIA TABANERA",
"state": "MENDOZA",
"lat": -33.0222,
"lon": -68.5056,
},
{
"zip": 5582,
"name": "EL RAMBLON",
"state": "MENDOZA",
"lat": -33.1167,
"lon": -68.4167,
},
{
"zip": 5584,
"name": "PALMIRA",
"state": "MENDOZA",
"lat": -33.05,
"lon": -68.5667,
},
{
"zip": 5585,
"name": "LOS BARRIALES",
"state": "MENDOZA",
"lat": -33.15,
"lon": -68.6083,
},
{
"zip": 5587,
"name": "VALLE HERMOSO",
"state": "MENDOZA",
"lat": -33.0333,
"lon": -68.5833,
},
{
"zip": 5589,
"name": "EL CENTRAL",
"state": "MENDOZA",
"lat": -32.8,
"lon": -68.3667,
},
{
"zip": 5590,
"name": "MAQUINISTA LEVET",
"state": "MENDOZA",
"lat": -33.6,
"lon": -67.1167,
},
{
"zip": 5591,
"name": "VILLA LA PAZ",
"state": "MENDOZA",
"lat": -33.45,
"lon": -67.6333,
},
{
"zip": 5592,
"name": "LA DORMIDA",
"state": "MENDOZA",
"lat": -33.35,
"lon": -67.9167,
},
{
"zip": 5594,
"name": "COMANDANTE SALAS",
"state": "MENDOZA",
"lat": -33.85,
"lon": -68,
},
{
"zip": 5595,
"name": "�ACU�AN",
"state": "MENDOZA",
"lat": -34.05,
"lon": -67.9667,
},
{
"zip": 5596,
"name": "SANTA ROSA",
"state": "MENDOZA",
"lat": -33.25,
"lon": -68.15,
},
{
"zip": 5600,
"name": "EL CERRITO",
"state": "MENDOZA",
"lat": -34.6,
"lon": -68.3333,
},
{
"zip": 5601,
"name": "EL USILLAL",
"state": "MENDOZA",
"lat": -34.5667,
"lon": -68.3333,
},
{
"zip": 5603,
"name": "GOUDGE",
"state": "MENDOZA",
"lat": -34.6667,
"lon": -68.1333,
},
{
"zip": 5605,
"name": "SALINAS EL DIAMANTE (ESTACION FCGSM)",
"state": "MENDOZA",
"lat": -34.9667,
"lon": -68.5278,
},
{
"zip": 5607,
"name": "COLONIA ESPA�OLA",
"state": "MENDOZA",
"lat": -34.6167,
"lon": -68.2833,
},
{
"zip": 5609,
"name": "MONTE COMAN",
"state": "MENDOZA",
"lat": -34.6,
"lon": -67.9,
},
{
"zip": 5611,
"name": "LOS PARLAMENTOS (APEADERO FCGSM)",
"state": "MENDOZA",
"lat": -35.9444,
"lon": -69.7806,
},
{
"zip": 5613,
"name": "RIO GRANDE",
"state": "MENDOZA",
"lat": -35.6917,
"lon": -69.7,
},
{
"zip": 5615,
"name": "COLONIA PASCUAL IACARINI",
"state": "MENDOZA",
"lat": -34.5833,
"lon": -68.55,
},
{
"zip": 5620,
"name": "GENERAL ALVEAR",
"state": "MENDOZA",
"lat": -34.9667,
"lon": -67.7,
},
{
"zip": 5622,
"name": "VILLA ATUEL",
"state": "MENDOZA",
"lat": -34.8333,
"lon": -67.9,
},
{
"zip": 5623,
"name": "JAIME PRATS",
"state": "MENDOZA",
"lat": -34.9,
"lon": -67.8,
},
{
"zip": 5624,
"name": "REAL DEL PADRE",
"state": "MENDOZA",
"lat": -34.8333,
"lon": -67.7667,
},
{
"zip": 5632,
"name": "LA MARZOLINA",
"state": "MENDOZA",
"lat": -35,
"lon": -67.6167,
},
{
"zip": 5634,
"name": "LA ESCANDINAVA",
"state": "MENDOZA",
"lat": -34.9167,
"lon": -67.5333,
},
{
"zip": 5635,
"name": "KILOMETRO 43",
"state": "MENDOZA",
"lat": -34.9905,
"lon": -67.2524,
},
{
"zip": 5636,
"name": "LOS HUARPES (APEADERO FCDFS)",
"state": "MENDOZA",
"lat": -35.1583,
"lon": -66.6833,
},
{
"zip": 5637,
"name": "PAMPA DEL TIGRE (EMBARCADERO FCGSM)",
"state": "MENDOZA",
"lat": -34.7,
"lon": -67.0667,
},
{
"zip": 5677,
"name": "LOS CAMPAMENTOS (RIVADAVIA, DPTO. RIVADAVIA)",
"state": "MENDOZA",
"lat": -35.1174,
"lon": -68.3656,
},
{
"zip": 3300,
"name": "POSADAS",
"state": "MISIONES",
"lat": -27.3833,
"lon": -55.8833,
},
{
"zip": 3304,
"name": "VILLA LANUS",
"state": "MISIONES",
"lat": -27.4333,
"lon": -55.8833,
},
{
"zip": 3306,
"name": "MANANTIALES",
"state": "MISIONES",
"lat": -27.5833,
"lon": -55.5767,
},
{
"zip": 3308,
"name": "CANDELARIA",
"state": "MISIONES",
"lat": -27.4667,
"lon": -55.7333,
},
{
"zip": 3309,
"name": "BELLA VISTA",
"state": "MISIONES",
"lat": -27.5333,
"lon": -55.5333,
},
{
"zip": 3311,
"name": "PICADA GALITZIANA",
"state": "MISIONES",
"lat": -27.55,
"lon": -55.4667,
},
{
"zip": 3313,
"name": "CERRO AZUL",
"state": "MISIONES",
"lat": -27.6333,
"lon": -55.4833,
},
{
"zip": 3315,
"name": "POZO FEO",
"state": "MISIONES",
"lat": -27.6722,
"lon": -55.2444,
},
{
"zip": 3316,
"name": "YERBAL MAMBORETA",
"state": "MISIONES",
"lat": -27.3778,
"lon": -55.4667,
},
{
"zip": 3317,
"name": "ALMAFUERTE",
"state": "MISIONES",
"lat": -27.5167,
"lon": -55.4,
},
{
"zip": 3318,
"name": "COLONIA MARTIRES",
"state": "MISIONES",
"lat": -27.5512,
"lon": -55.4143,
},
{
"zip": 3324,
"name": "GOBERNADOR ROCA",
"state": "MISIONES",
"lat": -27.1833,
"lon": -55.4667,
},
{
"zip": 3326,
"name": "PUERTO NARANJITO",
"state": "MISIONES",
"lat": -26.95,
"lon": -55.3,
},
{
"zip": 3327,
"name": "PUERTO DOCE",
"state": "MISIONES",
"lat": -27.15,
"lon": -55.6167,
},
{
"zip": 3328,
"name": "JARDIN AMERICA",
"state": "MISIONES",
"lat": -27.0435,
"lon": -55.227,
},
{"zip": 3332, "name": "MBOPICUA", "state": "MISIONES", "lat": -26.9, "lon": -55.05},
{
"zip": 3334,
"name": "PUERTO ORO VERDE",
"state": "MISIONES",
"lat": -26.8667,
"lon": -55.1333,
},
{"zip": 3350, "name": "TIGRE", "state": "MISIONES", "lat": -27.2667, "lon": -54.95},
{
"zip": 3351,
"name": "CAPON BONITO",
"state": "MISIONES",
"lat": -28.0667,
"lon": -55.5833,
},
{
"zip": 3353,
"name": "ITACARUARE",
"state": "MISIONES",
"lat": -27.8667,
"lon": -55.2667,
},
{
"zip": 3355,
"name": "CONCEPCION DE LA SIERRA",
"state": "MISIONES",
"lat": -27.9833,
"lon": -55.5167,
},
{
"zip": 3357,
"name": "BARRA BONITA",
"state": "MISIONES",
"lat": -27.5833,
"lon": -54.85,
},
{
"zip": 3358,
"name": "EL RANCHO",
"state": "MISIONES",
"lat": -27.9,
"lon": -55.8583,
},
{
"zip": 3360,
"name": "PUEBLO SALTO",
"state": "MISIONES",
"lat": -27.5,
"lon": -55.2,
},
{
"zip": 3361,
"name": "YERBAL VIEJO SECCION 5",
"state": "MISIONES",
"lat": -27.5278,
"lon": -55.1278,
},
{
"zip": 3362,
"name": "YERBAL VIEJO SECCION 7",
"state": "MISIONES",
"lat": -27.3,
"lon": -55,
},
{
"zip": 3363,
"name": "COLONIA AURORA",
"state": "MISIONES",
"lat": -27.4667,
"lon": -54.4833,
},
{
"zip": 3364,
"name": "FRACRAN",
"state": "MISIONES",
"lat": -26.7667,
"lon": -54.2667,
},
{
"zip": 3366,
"name": "SAN ANTONIO",
"state": "MISIONES",
"lat": -26.1167,
"lon": -53.75,
},
{
"zip": 3370,
"name": "PUERTO AGUIRRE",
"state": "MISIONES",
"lat": -25.5991,
"lon": -54.5736,
},
{
"zip": 3371,
"name": "CABURE I",
"state": "MISIONES",
"lat": -25.6833,
"lon": -54.1333,
},
{
"zip": 3372,
"name": "CATARATAS DEL IGUAZU",
"state": "MISIONES",
"lat": -25.6667,
"lon": -54.45,
},
{
"zip": 3374,
"name": "SEGUNDA ZONA",
"state": "MISIONES",
"lat": -25.8889,
"lon": -54.5889,
},
{
"zip": 3376,
"name": "GOBERNADOR JUAN J. LANUSSE",
"state": "MISIONES",
"lat": -25.9667,
"lon": -54.4667,
},
{
"zip": 3378,
"name": "VEINTIDOS DE DICIEMBRE",
"state": "MISIONES",
"lat": -26.0292,
"lon": -54.6333,
},
{
"zip": 3380,
"name": "9 DE JULIO (ELDORADO, DPTO. ELDORADO)",
"state": "MISIONES",
"lat": -26.4,
"lon": -54.6333,
},
{
"zip": 3381,
"name": "SANTIAGO DE LINIERS",
"state": "MISIONES",
"lat": -26.3458,
"lon": -54.6458,
},
{
"zip": 3382,
"name": "COLONIA VICTORIA",
"state": "MISIONES",
"lat": -26.375,
"lon": -54.6667,
},
{
"zip": 3384,
"name": "LARRAQUE",
"state": "MISIONES",
"lat": -26.4208,
"lon": -54.6083,
},
{
"zip": 3386,
"name": "PUERTO PRESIDENTE AVELLANEDA",
"state": "MISIONES",
"lat": -26.65,
"lon": -54.7917,
},
{
"zip": 3601,
"name": "PICADITA",
"state": "MISIONES",
"lat": -26.19,
"lon": -58.5733,
},
{
"zip": 3525,
"name": "VILLA ESCOLAR",
"state": "FORMOSA",
"lat": -26.6167,
"lon": -58.6667,
},
{
"zip": 3600,
"name": "FORMOSA",
"state": "FORMOSA",
"lat": -26.1833,
"lon": -58.1833,
},
{
"zip": 3603,
"name": "GENERAL PABLO RICCHIERI",
"state": "FORMOSA",
"lat": -25.9655,
"lon": -58.5429,
},
{
"zip": 3604,
"name": "MARIANO BOEDO",
"state": "FORMOSA",
"lat": -26.1167,
"lon": -58.4833,
},
{
"zip": 3606,
"name": "LOMA SENES",
"state": "FORMOSA",
"lat": -25.7167,
"lon": -59.1,
},
{
"zip": 3608,
"name": "PALO SANTO",
"state": "FORMOSA",
"lat": -25.5667,
"lon": -59.35,
},
{
"zip": 3610,
"name": "ESTANCIA LAS HORQUETAS",
"state": "FORMOSA",
"lat": -25.2333,
"lon": -57.8417,
},
{
"zip": 3611,
"name": "COLONIA BOUVIER",
"state": "FORMOSA",
"lat": -25.45,
"lon": -57.5833,
},
{
"zip": 3613,
"name": "FRONTERA",
"state": "FORMOSA",
"lat": -25.4,
"lon": -58.1833,
},
{
"zip": 3615,
"name": "TACAAGLE",
"state": "FORMOSA",
"lat": -24.9667,
"lon": -58.8167,
},
{
"zip": 3621,
"name": "FORTIN LUGONES",
"state": "FORMOSA",
"lat": -24.2833,
"lon": -59.8333,
},
{
"zip": 3622,
"name": "TENIENTE BROWN",
"state": "FORMOSA",
"lat": -24.8,
"lon": -60.4667,
},
{
"zip": 3624,
"name": "LEGUA A",
"state": "FORMOSA",
"lat": -25.4667,
"lon": -59.8833,
},
{
"zip": 3626,
"name": "ESTANISLAO DEL CAMPO",
"state": "FORMOSA",
"lat": -25.05,
"lon": -60.1,
},
{
"zip": 3628,
"name": "POZO DEL TIGRE",
"state": "FORMOSA",
"lat": -24.9,
"lon": -60.3167,
},
{
"zip": 3630,
"name": "KILOMETRO 642 (NAVEGACION RIO BERMEJO)",
"state": "FORMOSA",
"lat": -24.19,
"lon": -60.6767,
},
{
"zip": 3632,
"name": "KILOMETRO 1695 (APEADERO FCGB)",
"state": "FORMOSA",
"lat": -24.2667,
"lon": -61.0571,
},
{
"zip": 3634,
"name": "JOAQUIN V. GONZALEZ",
"state": "FORMOSA",
"lat": -24.25,
"lon": -61.25,
},
{
"zip": 8300,
"name": "NEUQUEN",
"state": "NEUQUEN",
"lat": -38.95,
"lon": -68.0667,
},
{
"zip": 8301,
"name": "PLANICIE BANDERITA",
"state": "NEUQUEN",
"lat": -38.8083,
"lon": -68.0833,
},
{"zip": 8305, "name": "TRATAYEN", "state": "NEUQUEN", "lat": -38.4, "lon": -68.6},
{
"zip": 8309,
"name": "VISTA ALEGRE NORTE",
"state": "NEUQUEN",
"lat": -38.8,
"lon": -68.1333,
},
{
"zip": 8311,
"name": "VILLA EL CHOCON",
"state": "NEUQUEN",
"lat": -39.2333,
"lon": -68.75,
},
{
"zip": 8313,
"name": "LIMAY CENTRO",
"state": "NEUQUEN",
"lat": -39.2278,
"lon": -68.8056,
},
{
"zip": 8315,
"name": "BAJADA COLORADA",
"state": "NEUQUEN",
"lat": -39.85,
"lon": -69.7333,
},
{
"zip": 8316,
"name": "CHINA MUERTA",
"state": "NEUQUEN",
"lat": -38.9833,
"lon": -68.325,
},
{
"zip": 8318,
"name": "PLAZA HUINCUL",
"state": "NEUQUEN",
"lat": -38.9167,
"lon": -69.15,
},
{
"zip": 8319,
"name": "CAMPAMENTO SOL",
"state": "NEUQUEN",
"lat": -39.4467,
"lon": -69.3489,
},
{
"zip": 8322,
"name": "BARRIO PELIGROSO",
"state": "NEUQUEN",
"lat": -38.9333,
"lon": -69.2333,
},
{
"zip": 8340,
"name": "AGUADA FLORENCIO",
"state": "NEUQUEN",
"lat": -38.7167,
"lon": -70.1583,
},
{
"zip": 8341,
"name": "CATAN LIL",
"state": "NEUQUEN",
"lat": -39.75,
"lon": -70.6167,
},
{
"zip": 8345,
"name": "HARAS PATRIA",
"state": "NEUQUEN",
"lat": -39.0667,
"lon": -70.8333,
},
{
"zip": 8347,
"name": "PINO SOLO",
"state": "NEUQUEN",
"lat": -38.2867,
"lon": -70.49,
},
{
"zip": 8349,
"name": "CERRO DE LA PARVA",
"state": "NEUQUEN",
"lat": -37.8061,
"lon": -70.6485,
},
{
"zip": 8351,
"name": "TRAHUNCURA",
"state": "NEUQUEN",
"lat": -38.15,
"lon": -70.1139,
},
{
"zip": 8353,
"name": "CHACAY MELEHUE",
"state": "NEUQUEN",
"lat": -37.2333,
"lon": -70.3667,
},
]
from locations.items import GeojsonPointItem
DAYS = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]
class CarrefourSpider(scrapy.Spider):
name = "carrefour"
item_attributes = {"brand": "Carrefour"}
allowed_domains = ["carrefour.com.ar"]
start_urls = ("https://www.carrefour.com.ar/storelocator/index/",)
def get_string(self, stri):
day_parts = (
stri.replace("horas", "")
.replace("hrs.", "")
.replace("hrs", "")
.replace("de ", "")
.split(" y ")
)
res = ""
for days in day_parts:
time_parts = days.split(" a ")
if len(time_parts) > 1:
res += time_parts[0].strip() + "-" + time_parts[1].strip() + " and "
return res.rstrip(" and ")
def store_hours(self, store_hours):
lastday = DAYS[0]
lasttime = self.get_string(store_hours[0])
opening_hours = lastday
for day in range(1, 7): # loop by days
if day == len(store_hours):
break
str_curr = self.get_string(store_hours[day])
if str_curr != lasttime:
if lastday == DAYS[day - 1]:
opening_hours += " " + lasttime + ";" + DAYS[day]
else:
opening_hours += (
"-" + DAYS[day - 1] + " " + lasttime + ";" + DAYS[day]
)
lasttime = str_curr
lastday = DAYS[day]
if lasttime != "":
if lastday == DAYS[day]:
opening_hours += " " + str_curr
else:
opening_hours += "-" + DAYS[6] + " " + str_curr
else:
opening_hours = opening_hours.rstrip(DAYS[6])
return opening_hours.rstrip(";").strip()
def phone_normalize(self, phone):
r = re.search(
r"\+?(\s+)*(\d{1})?(\s|\()*(\d{3})(\s+|\))*(\d{3})(\s+|-)?(\d{2})(\s+|-)?(\d{2})",
phone,
)
return (
(
"("
+ r.group(4)
+ ") "
+ r.group(6)
+ "-"
+ r.group(8)
+ "-"
+ r.group(10)
)
if r
else phone
)
def parse(self, response): # high-level list of states
for city in CITIES:
formdata = {
"search[address]": city["name"],
"search[geocode]": str(city["lat"]) + "," + str(city["lon"]),
}
yield scrapy.FormRequest(
"https://www.carrefour.com.ar/storelocator/index/search/",
formdata=formdata,
callback=self.parse_shops,
)
def parse_shops(self, response): # high-level list of states
shops = response.xpath('//div[@class="storelocator_item"]')
for shop in shops:
if not shop.xpath('./div[@class="moreData"]/div/text()'):
return
address = shop.xpath('./div[@class="moreData"]/div/text()').extract()
id_num = shop.xpath('./div[@class="id"]/text()').extract_first()
time_data = response.xpath('//div[@id="store-detail-' + id_num + '"]')
dates = time_data.xpath(
'./div[@class="timetable"]//td[@class="hour"]/text()'
).extract()
yield GeojsonPointItem(
lat=float(
shop.xpath('./div[@class="geodata"]/@data-lat').extract_first()
),
lon=float(
shop.xpath('./div[@class="geodata"]/@data-lng').extract_first()
),
phone=time_data.xpath('./div/div[@class="tel"]/text()')
.extract_first()
.strip(),
# website='https://www.carrefour.com.ar/storelocator/index/',
ref=id_num,
opening_hours="" if not dates else self.store_hours(dates),
addr_full=address[0],
city=address[1],
state="",
postcode="",
country="Argentina",
)
| 21.933559
| 94
| 0.379075
|
794ae2122656da6525de9364c5f00519abc36bf9
| 1,384
|
py
|
Python
|
cogs/Twitch.py
|
loukayl/virtualv-Managmentbot
|
246eb2057a1d9cd772a3115de40436bf1e46ffb6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
cogs/Twitch.py
|
loukayl/virtualv-Managmentbot
|
246eb2057a1d9cd772a3115de40436bf1e46ffb6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
cogs/Twitch.py
|
loukayl/virtualv-Managmentbot
|
246eb2057a1d9cd772a3115de40436bf1e46ffb6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from TwitchApiPy import TwitchApiPy
from discord.ext import commands
import settings
from TwitchApiPy import TwitchApiPy
from discord.ext import commands
import settings
import discord
import os, sys, discord, platform, random, aiohttp, json
from discord.ext import commands
if not os.path.isfile("config.py"):
sys.exit("'config.py' not found! Please add it and try again.")
else:
import config
# This Cog is not being use for now due to an issue
class Twitch(commands.Cog, TwitchApiPy):
def __init__(self, bot):
self.bot = bot
self._last_member = None
api = TwitchApiPy()
api.ClientID = settings.ClientID
api.OAuth = settings.OAuth
@commands.command(pass_context=True)
async def TwitchFollower(self, ctx, name):
Count = self.api.GetFollowerCount(name)
await ctx.send("{}{} : {}".format("Follower count of ", name, Count))
@commands.command(pass_context=True)
async def TwitchChannelInfo(self, ctx, name):
info = self.api.GetChannelInfo(name)
info2 = self.api.GetChannelStatus(name)
text = (
"Channel name : {} , Last Played Game : {} , Last Streams Title : {} , Is Live : {} , Language : {}").format(
info["name"], info["game"], info["title"], info2["islive"], info2["language"])
await ctx.send(text)
def setup(bot):
bot.add_cog(Twitch(bot))
| 30.755556
| 121
| 0.668353
|
794ae22d978bd846053688382be0abb5d4da53d7
| 66
|
py
|
Python
|
libs/p3270/__init__.py
|
rocketbot-cl/Terminal_emulator
|
34f2e50f23908fb645f93e58aa579f03ad8e02d9
|
[
"MIT"
] | null | null | null |
libs/p3270/__init__.py
|
rocketbot-cl/Terminal_emulator
|
34f2e50f23908fb645f93e58aa579f03ad8e02d9
|
[
"MIT"
] | null | null | null |
libs/p3270/__init__.py
|
rocketbot-cl/Terminal_emulator
|
34f2e50f23908fb645f93e58aa579f03ad8e02d9
|
[
"MIT"
] | 2
|
2022-03-17T20:50:04.000Z
|
2022-03-30T12:26:10.000Z
|
from p3270.p3270 import S3270, P3270Client, Config, StatusMessage
| 33
| 65
| 0.833333
|
794ae462681c2df26e0a30de9b67fc61d3a7c27c
| 5,140
|
py
|
Python
|
esmvalcore/_recipe_checks.py
|
Peter9192/ESMValCore
|
febd96a39480cc837afbf4e1f5b0ef61571af76a
|
[
"Apache-2.0"
] | null | null | null |
esmvalcore/_recipe_checks.py
|
Peter9192/ESMValCore
|
febd96a39480cc837afbf4e1f5b0ef61571af76a
|
[
"Apache-2.0"
] | null | null | null |
esmvalcore/_recipe_checks.py
|
Peter9192/ESMValCore
|
febd96a39480cc837afbf4e1f5b0ef61571af76a
|
[
"Apache-2.0"
] | null | null | null |
"""Module with functions to check a recipe."""
import logging
import os
import subprocess
import yamale
from ._data_finder import get_start_end_year
from ._task import get_flattened_tasks, which
from .preprocessor import PreprocessingTask
logger = logging.getLogger(__name__)
class RecipeError(Exception):
"""Recipe contains an error."""
def ncl_version():
"""Check the NCL version."""
ncl = which('ncl')
if not ncl:
raise RecipeError("Recipe contains NCL scripts, but cannot find "
"an NCL installation.")
try:
cmd = [ncl, '-V']
version = subprocess.check_output(cmd, universal_newlines=True)
except subprocess.CalledProcessError:
logger.error("Failed to execute '%s'", ' '.join(' '.join(cmd)))
raise RecipeError("Recipe contains NCL scripts, but your NCL "
"installation appears to be broken.")
version = version.strip()
logger.info("Found NCL version %s", version)
major, minor = (int(i) for i in version.split('.')[:2])
if major < 6 or (major == 6 and minor < 4):
raise RecipeError("NCL version 6.4 or higher is required to run "
"a recipe containing NCL scripts.")
def recipe_with_schema(filename):
"""Check if the recipe content matches schema."""
schema_file = os.path.join(os.path.dirname(__file__), 'recipe_schema.yml')
logger.debug("Checking recipe against schema %s", schema_file)
recipe = yamale.make_data(filename)
schema = yamale.make_schema(schema_file)
yamale.validate(schema, recipe)
def diagnostics(diags):
"""Check diagnostics in recipe."""
for name, diagnostic in diags.items():
if 'scripts' not in diagnostic:
raise RecipeError(
"Missing scripts section in diagnostic {}".format(name))
variable_names = tuple(diagnostic.get('variables', {}))
scripts = diagnostic.get('scripts')
if scripts is None:
scripts = {}
for script_name, script in scripts.items():
if script_name in variable_names:
raise RecipeError(
"Invalid script name {} encountered in diagnostic {}: "
"scripts cannot have the same name as variables.".format(
script_name, name))
if not script.get('script'):
raise RecipeError(
"No script defined for script {} in diagnostic {}".format(
script_name, name))
def duplicate_datasets(datasets):
"""Check for duplicate datasets."""
checked_datasets_ = []
for dataset in datasets:
if dataset in checked_datasets_:
raise RecipeError(
"Duplicate dataset {} in datasets section".format(dataset))
checked_datasets_.append(dataset)
def variable(var, required_keys):
"""Check variables as derived from recipe."""
required = set(required_keys)
missing = required - set(var)
if missing:
raise RecipeError(
"Missing keys {} from variable {} in diagnostic {}".format(
missing, var.get('short_name'), var.get('diagnostic')))
def data_availability(input_files, var):
"""Check if the required input data is available."""
if not input_files:
raise RecipeError("No input files found for variable {}".format(var))
required_years = set(range(var['start_year'], var['end_year'] + 1))
available_years = set()
for filename in input_files:
start, end = get_start_end_year(filename)
available_years.update(range(start, end + 1))
missing_years = required_years - available_years
if missing_years:
raise RecipeError(
"No input data available for years {} in files {}".format(
", ".join(str(year) for year in missing_years), input_files))
def tasks_valid(tasks):
"""Check that tasks are consistent."""
filenames = set()
msg = "Duplicate preprocessor filename {}, please file a bug report."
for task in get_flattened_tasks(tasks):
if isinstance(task, PreprocessingTask):
for product in task.products:
if product.filename in filenames:
raise ValueError(msg.format(product.filename))
filenames.add(product.filename)
def extract_shape(settings):
"""Check that `extract_shape` arguments are valid."""
shapefile = settings.get('shapefile', '')
if not os.path.exists(shapefile):
raise RecipeError("In preprocessor function `extract_shape`: "
f"Unable to find 'shapefile: {shapefile}'")
valid = {
'method': {'contains', 'representative'},
'crop': {True, False},
}
for key in valid:
value = settings.get(key)
if not (value is None or value in valid[key]):
raise RecipeError(
f"In preprocessor function `extract_shape`: Invalid value "
f"'{value}' for argument '{key}', choose from "
"{}".format(', '.join(f"'{k}'".lower() for k in valid[key])))
| 36.453901
| 78
| 0.620039
|
794ae5f8c94523c844f42ca9005708562d7e11d5
| 124
|
py
|
Python
|
run.py
|
DoubleDoorDevelopment/MultiStream
|
2cf789d03c2206f56b0d861dd8f8c3943dc7e061
|
[
"BSD-3-Clause"
] | null | null | null |
run.py
|
DoubleDoorDevelopment/MultiStream
|
2cf789d03c2206f56b0d861dd8f8c3943dc7e061
|
[
"BSD-3-Clause"
] | null | null | null |
run.py
|
DoubleDoorDevelopment/MultiStream
|
2cf789d03c2206f56b0d861dd8f8c3943dc7e061
|
[
"BSD-3-Clause"
] | null | null | null |
# Run this file to start the app during development
from app import app
if __name__ == '__main__':
app.run(debug=True)
| 20.666667
| 51
| 0.725806
|
794ae610beaa167c547d84e22f613f439c8c1948
| 57,776
|
py
|
Python
|
sympy/core/basic.py
|
torsknod/sympy-torsknod
|
19425c8d2d876710413987eaa6e69ff9d47a0380
|
[
"BSD-3-Clause"
] | 1
|
2020-03-12T02:52:16.000Z
|
2020-03-12T02:52:16.000Z
|
sympy/core/basic.py
|
torsknod/sympy-torsknod
|
19425c8d2d876710413987eaa6e69ff9d47a0380
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/basic.py
|
torsknod/sympy-torsknod
|
19425c8d2d876710413987eaa6e69ff9d47a0380
|
[
"BSD-3-Clause"
] | null | null | null |
"""Base class for all the objects in SymPy"""
from __future__ import print_function, division
from sympy.core.assumptions import ManagedProperties
from sympy.core.cache import cacheit
from sympy.core.core import BasicType, C
from sympy.core.sympify import _sympify, sympify, SympifyError
from sympy.core.compatibility import (reduce, iterable, Iterator, ordered,
string_types, with_metaclass)
from sympy.core.decorators import deprecated
from sympy.core.singleton import S
class Basic(with_metaclass(ManagedProperties)):
"""
Base class for all objects in SymPy.
Conventions:
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
"""
__slots__ = ['_mhash', # hash value
'_args', # arguments
'_assumptions'
]
# To be overridden with True in the appropriate subclasses
is_Atom = False
is_Symbol = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
@property
@deprecated(useinstead="is_Float", issue=1721, deprecated_since_version="0.7.0")
def is_Real(self): # pragma: no cover
"""Deprecated alias for ``is_Float``"""
# When this is removed, remove the piece of code disabling the warning
# from test_pickling.py
return self.is_Float
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevent attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'hermitian': True,
'imaginary': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real': True,
'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
if isinstance(l, Basic):
c = l.compare(r)
elif isinstance(l, frozenset):
c = 0
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@staticmethod
@deprecated(useinstead="default_sort_key", issue=1491, deprecated_since_version="0.7.2")
def compare_pretty(a, b):
"""
Is a > b in the sense of ordering in printing?
THIS FUNCTION IS DEPRECATED. Use ``default_sort_key`` instead.
::
yes ..... return 1
no ...... return -1
equal ... return 0
Strategy:
It uses Basic.compare as a fallback, but improves it in many cases,
like ``x**3``, ``x**4``, ``O(x**3)`` etc. In those simple cases, it just parses the
expression and returns the "sane" ordering such as::
1 < x < x**2 < x**3 < O(x**4) etc.
Examples
========
>>> from sympy.abc import x
>>> from sympy import Basic, Number
>>> Basic._compare_pretty(x, x**2)
-1
>>> Basic._compare_pretty(x**2, x**2)
0
>>> Basic._compare_pretty(x**3, x**2)
1
>>> Basic._compare_pretty(Number(1, 2), Number(1, 3))
1
>>> Basic._compare_pretty(Number(0), Number(-1))
1
"""
try:
a = _sympify(a)
except SympifyError:
pass
try:
b = _sympify(b)
except SympifyError:
pass
if not isinstance(b, Basic):
return +1 # sympy > other
# now both objects are from SymPy, so we can proceed to usual comparison
a = a.sort_key()
b = b.sort_key()
return (a > b) - (a < b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue #2070 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([ inner_key(arg) for arg in args ])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if type(self) is not type(other):
# issue 3001 a**1.0 == a like a**2.0 == a**2
while isinstance(self, C.Pow) and self.exp == 1:
self = self.base
while isinstance(other, C.Pow) and other.exp == 1:
other = other.base
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other
if type(self) is not type(other):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""a != b -> Compare two symbolic trees and see whether they are different
this is the same as:
a.compare(b) != 0
but faster
"""
if type(self) is not type(other):
try:
other = _sympify(other)
except SympifyError:
return True # sympy != other
if type(self) is not type(other):
return True
return self._hashable_content() != other._hashable_content()
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
dummy_symbols = [ s for s in self.free_symbols if s.is_Dummy ]
if not dummy_symbols:
return self == other
elif len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
raise ValueError(
"only one dummy symbol allowed on the left-hand side")
if symbol is None:
symbols = other.free_symbols
if not symbols:
return self == other
elif len(symbols) == 1:
symbol = symbols.pop()
else:
raise ValueError("specify a symbol in which expressions should be compared")
tmp = dummy.__class__()
return self.subs(dummy, tmp) == other.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 2388.
def __repr__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
set([1, 2, I, pi, x, y])
If one or more types are given, the results will contain only
those types of atoms.
Examples
========
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
set([x, y])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
set([1, 2])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
set([1, 2, pi])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
set([1, 2, I, pi])
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
set([x, y])
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
set([1])
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
set([1, 2])
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
set([f(x), sin(y + I*pi)])
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
set([f(x)])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
set([I*pi, 2*sin(y + I*pi)])
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for expr in preorder_traversal(self):
if isinstance(expr, types):
result.add(expr)
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all symbols
except those. Derivative keeps track of symbols with respect to which it
will perform a derivative; those are bound variables, too, so it has
its own symbols method.
Any other method that uses bound variables should implement a symbols
method."""
union = set.union
return reduce(union, [arg.free_symbols for arg in self.args], set())
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.variables`` as underscore-suffixed numbers
corresponding to their position in ``self.variables``. Enough
underscores are added to ensure that there will be no clash with
existing free symbols.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: 0_}
"""
if not hasattr(self, 'variables'):
return {}
u = "_"
while any(s.name.endswith(u) for s in self.free_symbols):
u += "_"
name = '%%i%s' % u
V = self.variables
return dict(list(zip(V, [C.Symbol(name % i, **v.assumptions0)
for i, v in enumerate(V)])))
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_number(self):
"""Returns ``True`` if 'self' contains no free symbols.
See Also
========
is_comparable
sympy.core.expr.is_number
"""
# should be overriden by subclasses
return False
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
"""
is_real = self.is_real
if is_real is False:
return False
is_number = self.is_number
if is_number is False:
return False
if is_real and is_number:
return True
n, i = [p.evalf(2) for p in self.as_real_imag()]
if not i.is_Number or not n.is_Number:
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def iter_basic_args(self):
"""
Iterates arguments of ``self``.
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.iter_basic_args()
<...iterator object at 0x...>
>>> list(a.iter_basic_args())
[2, x]
"""
return iter(self.args)
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
from sympy.polys import Poly, PolynomialError
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except PolynomialError:
return None
def as_content_primitive(self, radical=False):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See docstring of Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A,B,C,D,E]))
a*c*sin(d*e) + b
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.containers import Dict
from sympy.utilities import default_sort_key
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, dict)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i in range(len(sequence)):
o, n = sequence[i]
so, sn = sympify(o), sympify(n)
if not isinstance(so, Basic):
if type(o) is str:
so = C.Symbol(o)
sequence[i] = (so, sn)
if _aresame(so, sn):
sequence[i] = None
continue
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
if not all(k.is_Atom for k in sequence):
d = {}
for o, n in sequence.items():
try:
ops = o.count_ops(), len(o.args)
except TypeError:
ops = (0, 0)
d.setdefault(ops, []).append((o, n))
newseq = []
for k in sorted(d.keys(), reverse=True):
newseq.extend(
sorted([v[0] for v in d[k]], key=default_sort_key))
sequence = [(k, sequence[k]) for k in newseq]
del newseq, d
else:
sequence = sorted([(k, v) for (k, v) in sequence.items()],
key=default_sort_key)
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
for old, new in sequence:
d = C.Dummy()
rv = rv._subs(old, d, **kwargs)
reps[d] = new
if not isinstance(rv, Basic):
break
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if arg is not args[i]:
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also: _subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x:pi, y:2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) #doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
if self in rule:
return rule[self]
elif rule:
args = []
for a in self.args:
try:
args.append(a.xreplace(rule))
except AttributeError:
args.append(a)
args = tuple(args)
if not _aresame(args, self.args):
return self.func(*args)
return self
@deprecated(useinstead="has", issue=2389, deprecated_since_version="0.7.2")
def __contains__(self, obj):
if self == obj:
return True
for arg in self.args:
try:
if obj in arg:
return True
except TypeError:
if obj == arg:
return True
return False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicType):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
try:
match = pattern._has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
except AttributeError:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return self.__eq__
def replace(self, query, value, map=False, simultaneous=True, exact=False):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False. In addition, if an
expression containing more than one Wild symbol is being used to match
subexpressions and the ``exact`` flag is True, then the match will only
succeed if non-zero values are received for each Wild that appears in
the match pattern.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a = Wild('a')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
When the default value of False is used with patterns that have
more than one Wild symbol, non-intuitive results may be obtained:
>>> b = Wild('b')
>>> (2*x).replace(a*x + b, b - a)
2/x
For this reason, the ``exact`` option can be used to make the
replacement only when the match gives non-zero values for all
Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a, exact=True)
y - 2
>>> (2*x).replace(a*x + b, b - a, exact=True)
2*x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy
from sympy.simplify.simplify import bottom_up
try:
query = sympify(query)
except SympifyError:
pass
try:
value = sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
# XXX remove the exact flag and make multi-symbol
# patterns use exact=True semantics; to do this the query must
# be tested to find out how many Wild symbols are present.
# See https://groups.google.com/forum/
# ?fromgroups=#!topic/sympy/zPzo5FtRiqI
# for a method of inspecting a function to know how many
# parameters it has.
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**dict([ (
str(key)[:-1], val) for key, val in result.items()]))
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**dict([ (
str(key)[:-1], val) for key, val in result.items()]))
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this expression be changed during rebuilding
d = Dummy()
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
rv = rv.xreplace(r)
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = dict([(k.xreplace(r), v.xreplace(r))
for k, v in mapping.items()])
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
from sympy import signsimp
pattern = sympify(pattern)
s = signsimp(self)
p = signsimp(pattern)
# if we still have the same relationship between the types of
# input, then use the sign simplified forms
if (pattern.func == self.func) and (s.func == p.func):
rv = p.matches(s, old=old)
else:
rv = pattern.matches(self, old=old)
return rv
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep = False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [ term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args ]
return self.func(*terms)
else:
return self
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
sargs = self.args
terms = [ t._eval_rewrite(pattern, rule, **hints)
if isinstance(t, Basic) else t
for t in sargs ]
return self.func(*terms)
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], string_types):
rule = '_eval_rewrite_as_' + args[-1]
else:
rule = '_eval_rewrite_as_' + args[-1].__name__
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [ p.__class__ for p in pattern if self.has(p) ]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = []
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
from sympy.core import S
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, ratio, measure):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
To SymPy, 2.0 == 2:
>>> from sympy import S
>>> 2.0 == S(2)
True
Since a simple 'same or not' result is sometimes useful, this routine was
written to provide that query:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
for i, j in zip(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
return False
else:
return True
def _atomic(e):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
set([x, y])
>>> _atomic(x + f(y))
set([x, f(y)])
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
set([y, cos(x), Derivative(f(x), x)])
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
try:
free = e.free_symbols
except AttributeError:
return set([e])
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal(Iterator):
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
for subtree in self._preorder_traversal(arg, keys):
yield subtree
elif iterable(node):
for item in node:
for subtree in self._preorder_traversal(item, keys):
yield subtree
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
--------
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
| 31.606127
| 94
| 0.526966
|
794ae66ce4d1e65b2a0a4063feaace439b5480eb
| 6,445
|
py
|
Python
|
config/train.py
|
prismformore/SDSEN
|
815d1afcf8091eed4c3b35e8a3d56b28b7f3979d
|
[
"MIT"
] | 18
|
2019-09-16T10:27:45.000Z
|
2021-02-22T13:52:03.000Z
|
config/train.py
|
prismformore/SDSEN
|
815d1afcf8091eed4c3b35e8a3d56b28b7f3979d
|
[
"MIT"
] | null | null | null |
config/train.py
|
prismformore/SDSEN
|
815d1afcf8091eed4c3b35e8a3d56b28b7f3979d
|
[
"MIT"
] | 5
|
2019-09-16T14:04:56.000Z
|
2022-03-22T12:59:01.000Z
|
import os
import sys
import cv2
import argparse
import numpy as np
import torch
from torch import nn
from torch.nn import MSELoss
from torch.optim import Adam
from torch.optim.lr_scheduler import MultiStepLR
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import settings
from dataset import TrainValDataset
from model import SDSEN
from cal_ssim import SSIM
logger = settings.logger
torch.cuda.manual_seed_all(66)
torch.manual_seed(66)
torch.cuda.set_device(settings.device_id)
def ensure_dir(dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
class Session:
def __init__(self):
self.log_dir = settings.log_dir
self.model_dir = settings.model_dir
ensure_dir(settings.log_dir)
ensure_dir(settings.model_dir)
logger.info('set log dir as %s' % settings.log_dir)
logger.info('set model dir as %s' % settings.model_dir)
self.net = SDSEN().cuda()
self.crit = MSELoss().cuda()
self.ssim = SSIM().cuda()
self.step = 0
self.save_steps = settings.save_steps
self.num_workers = settings.num_workers
self.batch_size = settings.batch_size
self.writers = {}
self.dataloaders = {}
self.opt = Adam(self.net.parameters(), lr=settings.lr)
self.sche = MultiStepLR(self.opt, milestones=[15000, 17500], gamma=0.1)
def tensorboard(self, name):
self.writers[name] = SummaryWriter(os.path.join(self.log_dir, name + '.events'))
return self.writers[name]
def write(self, name, out):
for k, v in out.items():
self.writers[name].add_scalar(k, v, self.step)
out['lr'] = self.opt.param_groups[0]['lr']
out['step'] = self.step
outputs = [
"{}:{:.4g}".format(k, v)
for k, v in out.items()
]
logger.info(name + '--' + ' '.join(outputs))
def get_dataloader(self, dataset_name):
dataset = TrainValDataset(dataset_name)
if not dataset_name in self.dataloaders:
self.dataloaders[dataset_name] = \
DataLoader(dataset, batch_size=self.batch_size,
shuffle=True, num_workers=self.num_workers, drop_last=True)
return iter(self.dataloaders[dataset_name])
def save_checkpoints(self, name):
ckp_path = os.path.join(self.model_dir, name)
obj = {
'net': self.net.state_dict(),
'clock': self.step,
'opt': self.opt.state_dict(),
}
torch.save(obj, ckp_path)
def load_checkpoints(self, name):
ckp_path = os.path.join(self.model_dir, name)
try:
obj = torch.load(ckp_path)
logger.info('Load checkpoint %s' % ckp_path)
except FileNotFoundError:
logger.info('No checkpoint %s!!' % ckp_path)
return
self.net.load_state_dict(obj['net'])
self.opt.load_state_dict(obj['opt'])
self.step = obj['clock']
self.sche.last_epoch = self.step
def inf_batch(self, name, batch):
O, B = batch['O'].cuda(), batch['B'].cuda()
O, B = Variable(O, requires_grad=False), Variable(B, requires_grad=False)
R = O - B
O_Rs = self.net(O)
loss_list = [self.crit(O_R, R) for O_R in O_Rs]
ssim_list = [self.ssim(O - O_R, O - R) for O_R in O_Rs]
if name == 'train':
self.net.zero_grad()
sum(loss_list).backward()
self.opt.step()
losses = {
'loss%d' % i: loss.item()
for i, loss in enumerate(loss_list)
}
ssimes = {
'ssim%d' % i: ssim.item()
for i, ssim in enumerate(ssim_list)
}
losses.update(ssimes)
self.write(name, losses)
return O - O_Rs[-1]
def save_image(self, name, img_lists):
data, pred, label = img_lists
pred = pred.cpu().data
data, label, pred = data * 255, label * 255, pred * 255
pred = np.clip(pred, 0, 255)
h, w = pred.shape[-2:]
gen_num = (6, 2)
img = np.zeros((gen_num[0] * h, gen_num[1] * 3 * w, 3))
for img_list in img_lists:
for i in range(gen_num[0]):
row = i * h
for j in range(gen_num[1]):
idx = i * gen_num[1] + j
tmp_list = [data[idx], pred[idx], label[idx]]
for k in range(3):
col = (j * 3 + k) * w
tmp = np.transpose(tmp_list[k], (1, 2, 0))
img[row: row+h, col: col+w] = tmp
img_file = os.path.join(self.log_dir, '%d_%s.jpg' % (self.step, name))
cv2.imwrite(img_file, img)
def run_train_val(ckp_name='latest'):
sess = Session()
sess.load_checkpoints(ckp_name)
sess.tensorboard('train')
sess.tensorboard('val')
dt_train = sess.get_dataloader('train')
dt_val = sess.get_dataloader('val')
while sess.step < 20000:
sess.sche.step()
sess.net.train()
try:
batch_t = next(dt_train)
except StopIteration:
dt_train = sess.get_dataloader('train')
batch_t = next(dt_train)
pred_t = sess.inf_batch('train', batch_t)
if sess.step % 4 == 0:
sess.net.eval()
try:
batch_v = next(dt_val)
except StopIteration:
dt_val = sess.get_dataloader('val')
batch_v = next(dt_val)
pred_v = sess.inf_batch('val', batch_v)
if sess.step % int(sess.save_steps / 16) == 0:
sess.save_checkpoints('latest')
if sess.step % int(sess.save_steps / 2) == 0:
sess.save_image('train', [batch_t['O'], pred_t, batch_t['B']])
if sess.step % 4 == 0:
sess.save_image('val', [batch_v['O'], pred_v, batch_v['B']])
logger.info('save image as step_%d' % sess.step)
if sess.step % sess.save_steps == 0:
sess.save_checkpoints('step_%d' % sess.step)
logger.info('save model as step_%d' % sess.step)
sess.step += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', default='latest')
args = parser.parse_args(sys.argv[1:])
run_train_val(args.model)
| 31.593137
| 88
| 0.569123
|
794ae68b2348153a632fd1c7b30fc597877a3414
| 9,934
|
py
|
Python
|
src/m7_loops_within_loops_graphics.py
|
wilsonta/20-Exam3Practice
|
840c3bf0cf73c05db83c412f71771d05d5baafab
|
[
"MIT"
] | null | null | null |
src/m7_loops_within_loops_graphics.py
|
wilsonta/20-Exam3Practice
|
840c3bf0cf73c05db83c412f71771d05d5baafab
|
[
"MIT"
] | null | null | null |
src/m7_loops_within_loops_graphics.py
|
wilsonta/20-Exam3Practice
|
840c3bf0cf73c05db83c412f71771d05d5baafab
|
[
"MIT"
] | null | null | null |
"""
PRACTICE Exam 3.
This problem provides practice at:
*** LOOPS WITHIN LOOPS in 2D GRAPHICS problems. ***
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Tim Wilson.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
###############################################################################
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
#run_test_hourglass()
run_test_many_hourglasses()
def run_test_hourglass():
""" Tests the hourglass function. """
print()
print('--------------------------------------------------')
print('Testing the hourglass function:')
print('--------------------------------------------------')
test1 = '(n = 3, radius = 40, blue)'
test2 = '(n = 8, radius = 15, green)'
title1 = 'Hourglass, two tests: {} and {}'.format(test1, test2)
window1 = rg.RoseWindow(600, 500, title1)
hourglass(window1, 3, rg.Point(150, 200), 40, 'blue')
hourglass(window1, 8, rg.Point(450, 250), 15, 'green')
window1.close_on_mouse_click()
test3 = '(n = 6, radius = 30, red)'
title2 = 'Hourglass, one more test: {}'.format(test3)
window2 = rg.RoseWindow(400, 700, title2)
hourglass(window2, 6, rg.Point(200, 350), 30, 'red')
window2.close_on_mouse_click()
def hourglass(window, n, point, radius, color):
"""
See hourglass_picture.pdf in this project for pictures that may
help you better understand the following specification:
Displays an "hourglass" shape of circles in the given window.
-- Each circle has the given radius and given color.
-- Each circle has a horizontal line drawn through it.
-- The middlemost of the circles is centered at the given point.
-- There is a single circle in that middlemost row.
-- There are n rows (including the middlemost row)
of circles going UP from the middlemost circle.
-- There are n rows (including the middlemost row)
of circles going DOWN from the middlemost circle.
-- Each circle barely touches its neighbor circles.
Preconditions:
:type window: rg.RoseWindow
:type n: int
:type point: rg.Point
:type radius: int
:type color: str
where n and radius are positive and color is a string that denotes
a color that rosegraphics understands.
"""
for k in range(n):
centery=point.y-k*2*radius
centerx = point.x - k * radius
for j in range(k+1):
circlex=centerx+j*2*radius
circle=rg.Circle(rg.Point(circlex,centery),radius)
circle.fill_color=color
line=rg.Line(rg.Point(circlex-radius,centery),rg.Point(circlex+radius,centery))
circle.attach_to(window)
line.attach_to(window)
window.render()
for m in range(n-1):
centery=point.y+2*radius+m*2*radius
centerx=point.x-radius-m*radius
for j in range(m+2):
circlex = centerx + j * 2 * radius
circle = rg.Circle(rg.Point(circlex, centery), radius)
circle.fill_color = color
line2 = rg.Line(rg.Point(circlex - radius, centery), rg.Point(circlex + radius, centery))
circle.attach_to(window)
line2.attach_to(window)
window.render()
# -------------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# We provided some tests for you (above).
# -------------------------------------------------------------------------
###########################################################################
# BONUS: Avoid replicated code if you can. Hint: You are allowed
# to define an additional function(s) if you wish.
###########################################################################
# -------------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 8
# TIME ESTIMATE: 25 minutes (warning: this problem is challenging)
# -------------------------------------------------------------------------
def run_test_many_hourglasses():
""" Tests the many_hourglasses function. """
print()
print('--------------------------------------------------')
print('Testing the many_hourglasses function:')
print('--------------------------------------------------')
test1 = '(n = 4, radius = 30, red-blue-black-green)'
test2 = '(n = 3, radius = 70, brown-cyan-yellow-green)'
title1 = 'Many hourglasses, two tests: {} and {}'.format(test1,
test2)
window1 = rg.RoseWindow(800, 400, title1)
square1 = rg.Square(rg.Point(50, 150), 30)
square2 = rg.Square(rg.Point(400, 200), 70)
many_hourglasses(window1, square1, 4,
('red', 'blue', 'black', 'green'))
many_hourglasses(window1, square2, 3,
('brown', 'cyan', 'yellow', 'green'))
window1.close_on_mouse_click()
test3 = '(n = 7, radius = 40, red-black-blue)'
title2 = 'Many hourglasses, one more test: {}'.format(test3)
window2 = rg.RoseWindow(1200, 500, title2)
square3 = rg.Square(rg.Point(50, 250), 40)
many_hourglasses(window2, square3, 7, ('red', 'black', 'blue'))
window2.close_on_mouse_click()
def many_hourglasses(window, square, m, colors):
"""
See many_hourglasses_picture.pdf in this project for pictures that may
help you better understand the following specification:
Displays m rectangles, where:
-- Each rectangle has an hourglass of circles inside it,
per the hourglass function above.
-- The circles in the hourglasses are all the same size.
-- The leftmost rectangle is the given square, and it contains
an hourglass with a single circle that fills the square.
-- Each successive rectangle is immediately to the right of the
previous rectangle, and each contains an hourglass with
the hourglass' n being one greater than the n used
for the previous rectangle.
-- The colors for the hourglass figures use the given sequence of
colors, "wrapping" if m exceeds the length of the sequence.
Preconditions:
:type window: rg.RoseWindow
:type square: rg.Square
:type m: int
:type colors: (list | tuple) of str
where m is positive and colors is a sequence of strings,
each of which denotes a color that rosegraphics understands.
"""
square.attach_to(window)
window.render()
hourglass(window,1,square.center,square.length_of_each_side/2,colors[0])
corner1=rg.Point(square.center.x - square.length_of_each_side / 2, square.center.y - square.length_of_each_side / 2)
corner2=rg.Point(square.center.x + square.length_of_each_side / 2, square.center.y + square.length_of_each_side / 2)
newcorner1=corner1
newcorner2=corner2
i=1
for k in range(1,m):
corneraddition1=k*square.length_of_each_side
corneraddition2=k*2*square.length_of_each_side
newcorner1=rg.Point(newcorner1.x+k*square.length_of_each_side,corner1.y-corneraddition1)
newcorner2=rg.Point(newcorner2.x+(k+1)*square.length_of_each_side,corner2.y+corneraddition1)
rect=rg.Rectangle(newcorner1,newcorner2)
center=rg.Point(newcorner1.x+(newcorner2.x-newcorner1.x)/2,newcorner1.y+(newcorner2.y-newcorner1.y)/2)
rect.attach_to(window)
window.render()
if i==len(colors):
i=0
hourglass(window,k+1,center,square.length_of_each_side/2,colors[i])
i=i+1
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# We provided some tests for you (above).
# -------------------------------------------------------------------------
###########################################################################
# IMPORTANT:
# 1. Partial credit if you draw JUST the rectangles.
# 2. No additional credit unless you CALL the hourglass function
# in the PREVIOUS problem appropriately
# to draw the hourglass figures.
###########################################################################
# -------------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7 (assuming that you already have
# a correct "hourglass" function above)
# TIME ESTIMATE: 20 minutes (warning: this problem is challenging)
# -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 42.452991
| 120
| 0.54993
|
794ae6ee782c9df7ff718b035f612193ad85b487
| 7,438
|
py
|
Python
|
tests/unit/ops/test_ops_schema.py
|
davidxia/NVTabular
|
97b05ac74204d4e21fa31d522d0f84fb37cf94a9
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/ops/test_ops_schema.py
|
davidxia/NVTabular
|
97b05ac74204d4e21fa31d522d0f84fb37cf94a9
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/ops/test_ops_schema.py
|
davidxia/NVTabular
|
97b05ac74204d4e21fa31d522d0f84fb37cf94a9
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
import nvtabular as nvt
from nvtabular import ColumnSchema, ColumnSelector, Schema, dispatch, ops
@pytest.mark.parametrize("properties", [{}, {"p1": "1"}])
@pytest.mark.parametrize("tags", [[], ["TAG1", "TAG2"]])
@pytest.mark.parametrize(
"op",
[
ops.Bucketize([1]),
ops.Rename(postfix="_trim"),
ops.Categorify(),
ops.Categorify(encode_type="combo"),
ops.Clip(0),
ops.DifferenceLag("1"),
ops.FillMissing(),
ops.Groupby(["1"]),
ops.HashBucket(1),
ops.HashedCross(1),
ops.JoinGroupby(["1"]),
ops.ListSlice(0),
ops.LogOp(),
ops.Normalize(),
ops.TargetEncoding(["1"]),
ops.AddMetadata(tags=["excellent"], properties={"domain": {"min": 0, "max": 20}}),
ops.ValueCount(),
],
)
@pytest.mark.parametrize("selection", [["1"], ["2", "3"], ["1", "2", "3", "4"]])
def test_schema_out(tags, properties, selection, op):
# Create columnSchemas
column_schemas = []
all_cols = []
for x in range(5):
all_cols.append(str(x))
column_schemas.append(ColumnSchema(str(x), tags=tags, properties=properties))
# Turn to Schema
schema = Schema(column_schemas)
# run schema through op
selector = ColumnSelector(selection)
new_schema = op.compute_output_schema(schema, selector)
# should have dtype float
for col_name in selector.names:
names_group = [name for name in new_schema.column_schemas if col_name in name]
if names_group:
for name in names_group:
schema1 = new_schema.column_schemas[name]
# should not be exactly the same name, having gone through operator
assert schema1.dtype == op.output_dtype()
if name in selector.names:
assert (
schema1.properties
== op._add_properties(schema.column_schemas[schema1.name]).properties
)
all_tags = op.output_tags() + tags
assert len(schema1.tags) == len(all_tags)
else:
assert set(op.output_tags()).issubset(schema1.tags)
not_used = [col for col in all_cols if col not in selector.names]
for col_name in not_used:
assert col_name not in new_schema.column_schemas
@pytest.mark.parametrize("properties", [{"p1": "1"}])
@pytest.mark.parametrize("tags", [["TAG1", "TAG2"]])
@pytest.mark.parametrize(
"op_routine",
[
[ops.Categorify()],
[ops.Clip(min_value=10), ops.Categorify()],
[ops.Categorify(), ops.Rename(postfix="_test")],
[ops.Clip(min_value=10), ops.Categorify(), ops.Rename(postfix="_test")],
],
)
def test_categorify_schema_properties(properties, tags, op_routine):
run_op_full(properties, tags, op_routine)
@pytest.mark.parametrize("properties", [{}])
@pytest.mark.parametrize("tags", [[]])
@pytest.mark.parametrize(
"op_routine",
[
[ops.Categorify()],
[ops.Clip(min_value=10), ops.Categorify()],
[ops.Categorify(), ops.Rename(postfix="_test")],
[ops.Clip(min_value=10), ops.Categorify(), ops.Rename(postfix="_test")],
],
)
def test_categorify_schema_properties_blank(properties, tags, op_routine):
run_op_full(properties, tags, op_routine)
@pytest.mark.parametrize("properties", [{}])
@pytest.mark.parametrize("tags", [["TAG1", "TAG2"]])
@pytest.mark.parametrize(
"op_routine",
[
[ops.Categorify()],
[ops.Clip(min_value=10), ops.Categorify()],
[ops.Categorify(), ops.Rename(postfix="_test")],
[ops.Clip(min_value=10), ops.Categorify(), ops.Rename(postfix="_test")],
],
)
def test_categorify_schema_properties_tag(properties, tags, op_routine):
run_op_full(properties, tags, op_routine)
@pytest.mark.parametrize("properties", [{"p1": "1"}])
@pytest.mark.parametrize("tags", [[]])
@pytest.mark.parametrize(
"op_routine",
[
[ops.Categorify()],
[ops.Clip(min_value=10), ops.Categorify()],
[ops.Categorify(), ops.Rename(postfix="_test")],
[ops.Clip(min_value=10), ops.Categorify(), ops.Rename(postfix="_test")],
],
)
def test_categorify_schema_properties_props(properties, tags, op_routine):
run_op_full(properties, tags, op_routine)
def run_op_full(properties, tags, op_routine):
column_schemas = []
all_cols = []
for x in range(5):
all_cols.append(str(x))
column_schemas.append(ColumnSchema(str(x), tags=tags, properties=properties))
# Turn to Schema
schema = Schema(column_schemas)
df_dict = {}
num_rows = 10000
for column_name in schema.column_names:
df_dict[column_name] = np.random.randint(1, 1000, num_rows)
df = dispatch._make_df(df_dict)
dataset = nvt.Dataset(df)
test_node = ColumnSelector(schema.column_names) >> op_routine[0]
for op in op_routine[1:]:
test_node = test_node >> op
processor = nvt.Workflow(test_node)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute()
workflow_schema_out = processor.output_node.output_schema
for column_name in workflow_schema_out.column_names:
schema1 = workflow_schema_out.column_schemas[column_name]
assert "domain" in schema1.properties
embeddings_info = schema1.properties["domain"]
# should always exist, represents unkown
assert embeddings_info["min"] == 0
assert embeddings_info["max"] == new_gdf[column_name].max() + 1
@pytest.mark.parametrize("properties", [{"p1": "1"}])
@pytest.mark.parametrize("tags", [[]])
@pytest.mark.parametrize(
"op_routine",
[
[ops.Categorify(), ops.Rename(postfix="_test"), ops.ValueCount()],
],
)
def test_ops_list_vc(properties, tags, op_routine):
column_schemas = []
all_cols = []
for x in range(5):
all_cols.append(str(x))
column_schemas.append(ColumnSchema(str(x), tags=tags, properties=properties))
# Turn to Schema
schema = Schema(column_schemas)
df_dict = {}
num_rows = 10000
for column_name in schema.column_names:
df_dict[column_name] = np.random.randint(1, 1000, num_rows)
df_dict[column_name] = [[x] * np.random.randint(1, 10) for x in df_dict[column_name]]
df = dispatch._make_df(df_dict)
dataset = nvt.Dataset(df)
test_node = ColumnSelector(schema.column_names) >> op_routine[0]
for op in op_routine[1:]:
test_node = test_node >> op
processor = nvt.Workflow(test_node)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute()
workflow_schema_out = processor.output_node.output_schema
for column_name in workflow_schema_out.column_names:
schema1 = workflow_schema_out.column_schemas[column_name]
assert "domain" in schema1.properties
embeddings_info = schema1.properties["domain"]
# should always exist, represents unkown
assert embeddings_info["min"] == 0
assert embeddings_info["max"] == new_gdf[column_name]._column.elements.max() + 1
assert "value_count" in schema1.properties
val_c = schema1.properties["value_count"]
assert val_c["min"] == op_routine[-1].stats[column_name]["value_count"]["min"]
assert val_c["max"] == op_routine[-1].stats[column_name]["value_count"]["max"]
| 35.932367
| 93
| 0.642915
|
794ae76ed74e0e253b874598d9df1653ac0b7641
| 6,578
|
py
|
Python
|
Wrapping/Generators/Python/Tests/extras.py
|
gift-surg/ITK_NiftyMIC
|
26415ac2e6197de7b07ffcb0c3f740aa937ba7e9
|
[
"Apache-2.0"
] | 3
|
2019-11-19T09:47:25.000Z
|
2022-02-24T00:32:31.000Z
|
Wrapping/Generators/Python/Tests/extras.py
|
gift-surg/ITK_NiftyMIC
|
26415ac2e6197de7b07ffcb0c3f740aa937ba7e9
|
[
"Apache-2.0"
] | 1
|
2019-03-18T14:19:49.000Z
|
2020-01-11T13:54:33.000Z
|
Wrapping/Generators/Python/Tests/extras.py
|
gift-surg/ITK_NiftyMIC
|
26415ac2e6197de7b07ffcb0c3f740aa937ba7e9
|
[
"Apache-2.0"
] | 1
|
2022-02-24T00:32:36.000Z
|
2022-02-24T00:32:36.000Z
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# also test the import callback feature
from __future__ import print_function
def custom_callback(name, progress):
if progress == 0:
print("Loading %s..." % name, file=sys.stderr)
if progress == 1:
print("done", file=sys.stderr)
import itkConfig
itkConfig.ImportCallback = custom_callback
import itk
import sys
# test the force load function
itk.force_load()
fileName = sys.argv[1]
PixelType = itk.UC
dim = 2
ImageType = itk.Image[PixelType, dim]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New(FileName=fileName)
# test echo
itk.echo(reader)
itk.echo(reader, sys.stdout)
# test class_
assert itk.class_(reader) == ReaderType
assert itk.class_("dummy") == str
# test template
assert itk.template(ReaderType) == (itk.ImageFileReader, (ImageType,))
assert itk.template(reader) == (itk.ImageFileReader, (ImageType,))
try:
itk.template(str)
raise Exception("unknown class should send an exception")
except KeyError:
pass
# test ctype
assert itk.ctype("unsigned short") == itk.US
assert itk.ctype(" unsigned \n short \t ") == itk.US
assert itk.ctype("signed short") == itk.SS
assert itk.ctype("short") == itk.SS
try:
itk.ctype("dummy")
raise Exception("unknown C type should send an exception")
except KeyError:
pass
# test output
assert itk.output(reader) == reader.GetOutput()
assert itk.output(1) == 1
# test the deprecated image
assert itk.image(reader) == reader.GetOutput()
assert itk.image(1) == 1
# test size
s = itk.size(reader)
assert s[0] == s[1] == 256
s = itk.size(reader.GetOutput())
assert s[0] == s[1] == 256
# test physical size
s = itk.physical_size(reader)
assert s[0] == s[1] == 256.0
s = itk.physical_size(reader.GetOutput())
assert s[0] == s[1] == 256.0
# test spacing
s = itk.spacing(reader)
assert s[0] == s[1] == 1.0
s = itk.spacing(reader.GetOutput())
assert s[0] == s[1] == 1.0
# test origin
s = itk.origin(reader)
assert s[0] == s[1] == 0.0
s = itk.origin(reader.GetOutput())
assert s[0] == s[1] == 0.0
# test index
s = itk.index(reader)
assert s[0] == s[1] == 0
s = itk.index(reader.GetOutput())
assert s[0] == s[1] == 0
# test region
s = itk.region(reader)
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256
s = itk.region(reader.GetOutput())
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256
# test range
assert itk.range(reader) == (0, 255)
assert itk.range(reader.GetOutput()) == (0, 255)
# test write
itk.imwrite(reader, sys.argv[2])
itk.write(reader, sys.argv[2])
itk.imwrite(reader, sys.argv[2], True)
# test read
image=itk.imread(fileName)
assert type(image) == itk.Image[itk.RGBPixel[itk.UC],2]
image=itk.imread(fileName, itk.F)
assert type(image) == itk.Image[itk.F,2]
# test search
res = itk.search("Index")
assert res[0] == "Index"
assert res[1] == "index"
assert "ContinuousIndex" in res
res = itk.search("index", True)
assert "Index" not in res
# test down_cast
obj = itk.Object.cast(reader)
# be sure that the reader is casted to itk::Object
assert obj.__class__ == itk.Object
down_casted = itk.down_cast(obj)
assert down_casted == reader
assert down_casted.__class__ == ReaderType
# pipeline, auto_pipeline and templated class are tested in other files
# BridgeNumPy
try:
# Images
import numpy as np
image = itk.imread(fileName)
arr = itk.GetArrayFromImage(image)
arr.fill(1)
assert np.any(arr != itk.GetArrayFromImage(image))
view = itk.GetArrayViewFromImage(image)
view.fill(1)
assert np.all(view == itk.GetArrayFromImage(image))
image = itk.GetImageFromArray(arr)
image.FillBuffer(2)
assert np.any(arr != itk.GetArrayFromImage(image))
image = itk.GetImageViewFromArray(arr)
image.FillBuffer(2)
assert np.all(arr == itk.GetArrayFromImage(image))
image = itk.GetImageFromArray(arr, isVector=True)
assert image.GetImageDimension() == 2
image = itk.GetImageViewFromArray(arr, isVector=True)
assert image.GetImageDimension() == 2
arr = np.array([[1,2,3],[4,5,6]]).astype(np.uint8)
assert arr.shape[0] == 2
assert arr.shape[1] == 3
assert arr[1,1] == 5
image = itk.GetImageFromArray(arr)
arrKeepAxes = itk.GetArrayFromImage(image, keepAxes=True)
assert arrKeepAxes.shape[0] == 3
assert arrKeepAxes.shape[1] == 2
assert arrKeepAxes[1,1] == 4
arr = itk.GetArrayFromImage(image, keepAxes=False)
assert arr.shape[0] == 2
assert arr.shape[1] == 3
assert arr[1,1] == 5
arrKeepAxes = itk.GetArrayViewFromImage(image, keepAxes=True)
assert arrKeepAxes.shape[0] == 3
assert arrKeepAxes.shape[1] == 2
assert arrKeepAxes[1,1] == 4
arr = itk.GetArrayViewFromImage(image, keepAxes=False)
assert arr.shape[0] == 2
assert arr.shape[1] == 3
assert arr[1,1] == 5
# VNL Vectors
v1 = itk.vnl_vector.D(2)
v1.fill(1)
v_np = itk.GetArrayFromVnlVector(v1)
assert v1.get(0) == v_np[0]
v_np[0] = 0
assert v1.get(0) != v_np[0]
view = itk.GetArrayViewFromVnlVector(v1)
assert v1.get(0) == view[0]
view[0] = 0
assert v1.get(0) == view[0]
# VNL Matrices
m1 = itk.vnl_matrix.D(2,2)
m1.fill(1)
m_np = itk.GetArrayFromVnlMatrix(m1)
assert m1.get(0,0) == m_np[0,0]
m_np[0,0] = 0
assert m1.get(0,0) != m_np[0,0]
view = itk.GetArrayViewFromVnlMatrix(m1)
assert m1.get(0,0) == view[0,0]
view[0,0] = 0
assert m1.get(0,0) == view[0,0]
arr = np.zeros([3,3])
m_vnl = itk.GetVnlMatrixFromArray(arr)
assert m_vnl(0,0) == 0
m_vnl.put(0,0,3)
assert m_vnl(0,0) == 3
assert arr[0,0] == 0
except ImportError:
print("NumPy not imported. Skipping BridgeNumPy tests")
# Numpy is not available, do not run the Bridge NumPy tests
pass
| 28.23176
| 77
| 0.656887
|
794ae7d5f07e8d7af583cb71d8f547e92e1d5561
| 3,880
|
py
|
Python
|
nipype/interfaces/fsl/tests/test_auto_FNIRT.py
|
dPys/nipype
|
75030b29297808e7c9a9e91b411b685154dff60b
|
[
"Apache-2.0"
] | 1
|
2019-03-25T14:11:18.000Z
|
2019-03-25T14:11:18.000Z
|
nipype/interfaces/fsl/tests/test_auto_FNIRT.py
|
dPys/nipype
|
75030b29297808e7c9a9e91b411b685154dff60b
|
[
"Apache-2.0"
] | 1
|
2017-01-05T01:24:33.000Z
|
2017-01-05T01:24:33.000Z
|
nipype/interfaces/fsl/tests/test_auto_FNIRT.py
|
wtriplett/nipype
|
388f140fceaf55438a987e9cdfa2a8e995428afd
|
[
"Apache-2.0"
] | 1
|
2020-12-16T16:36:48.000Z
|
2020-12-16T16:36:48.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..preprocess import FNIRT
def test_FNIRT_inputs():
input_map = dict(
affine_file=dict(argstr="--aff=%s", extensions=None,),
apply_inmask=dict(argstr="--applyinmask=%s", sep=",", xor=["skip_inmask"],),
apply_intensity_mapping=dict(
argstr="--estint=%s", sep=",", xor=["skip_intensity_mapping"],
),
apply_refmask=dict(argstr="--applyrefmask=%s", sep=",", xor=["skip_refmask"],),
args=dict(argstr="%s",),
bias_regularization_lambda=dict(argstr="--biaslambda=%f",),
biasfield_resolution=dict(argstr="--biasres=%d,%d,%d",),
config_file=dict(argstr="--config=%s",),
derive_from_ref=dict(argstr="--refderiv",),
environ=dict(nohash=True, usedefault=True,),
field_file=dict(argstr="--fout=%s", hash_files=False,),
fieldcoeff_file=dict(argstr="--cout=%s",),
hessian_precision=dict(argstr="--numprec=%s",),
in_file=dict(argstr="--in=%s", extensions=None, mandatory=True,),
in_fwhm=dict(argstr="--infwhm=%s", sep=",",),
in_intensitymap_file=dict(argstr="--intin=%s", copyfile=False,),
inmask_file=dict(argstr="--inmask=%s", extensions=None,),
inmask_val=dict(argstr="--impinval=%f",),
intensity_mapping_model=dict(argstr="--intmod=%s",),
intensity_mapping_order=dict(argstr="--intorder=%d",),
inwarp_file=dict(argstr="--inwarp=%s", extensions=None,),
jacobian_file=dict(argstr="--jout=%s", hash_files=False,),
jacobian_range=dict(argstr="--jacrange=%f,%f",),
log_file=dict(
argstr="--logout=%s", extensions=None, genfile=True, hash_files=False,
),
max_nonlin_iter=dict(argstr="--miter=%s", sep=",",),
modulatedref_file=dict(argstr="--refout=%s", hash_files=False,),
out_intensitymap_file=dict(argstr="--intout=%s", hash_files=False,),
output_type=dict(),
ref_file=dict(argstr="--ref=%s", extensions=None, mandatory=True,),
ref_fwhm=dict(argstr="--reffwhm=%s", sep=",",),
refmask_file=dict(argstr="--refmask=%s", extensions=None,),
refmask_val=dict(argstr="--imprefval=%f",),
regularization_lambda=dict(argstr="--lambda=%s", sep=",",),
regularization_model=dict(argstr="--regmod=%s",),
skip_implicit_in_masking=dict(argstr="--impinm=0",),
skip_implicit_ref_masking=dict(argstr="--imprefm=0",),
skip_inmask=dict(argstr="--applyinmask=0", xor=["apply_inmask"],),
skip_intensity_mapping=dict(
argstr="--estint=0", xor=["apply_intensity_mapping"],
),
skip_lambda_ssq=dict(argstr="--ssqlambda=0",),
skip_refmask=dict(argstr="--applyrefmask=0", xor=["apply_refmask"],),
spline_order=dict(argstr="--splineorder=%d",),
subsampling_scheme=dict(argstr="--subsamp=%s", sep=",",),
warp_resolution=dict(argstr="--warpres=%d,%d,%d",),
warped_file=dict(
argstr="--iout=%s", extensions=None, genfile=True, hash_files=False,
),
)
inputs = FNIRT.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_FNIRT_outputs():
output_map = dict(
field_file=dict(extensions=None,),
fieldcoeff_file=dict(extensions=None,),
jacobian_file=dict(extensions=None,),
log_file=dict(extensions=None,),
modulatedref_file=dict(extensions=None,),
out_intensitymap_file=dict(),
warped_file=dict(extensions=None,),
)
outputs = FNIRT.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 47.317073
| 87
| 0.622938
|
794ae7dd093c1a9464ff107010330d1f8903aa13
| 15,924
|
py
|
Python
|
main.py
|
SIRAJULHUDA/samples
|
2aff7aa9b1e2a5250260b2459e0ab9084c7b3f94
|
[
"MIT"
] | null | null | null |
main.py
|
SIRAJULHUDA/samples
|
2aff7aa9b1e2a5250260b2459e0ab9084c7b3f94
|
[
"MIT"
] | null | null | null |
main.py
|
SIRAJULHUDA/samples
|
2aff7aa9b1e2a5250260b2459e0ab9084c7b3f94
|
[
"MIT"
] | null | null | null |
import sys, io, os
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtGui import QPainter, QColor, QFont
from os.path import expanduser
import subprocess as sp
import numpy
from PIL import Image, ImageDraw, ImageFont
from PIL.ImageQt import ImageQt
import atexit
from queue import Queue
from PyQt4.QtCore import QSettings
import signal
import preview_thread, core, video_thread
class Command(QtCore.QObject):
videoTask = QtCore.pyqtSignal(str, str, QFont, int, int, int, int, tuple, tuple, str, str)
def __init__(self):
QtCore.QObject.__init__(self)
print("2")
import argparse
self.parser = argparse.ArgumentParser(description='Create a visualization for an audio file')
self.parser.add_argument('-i', '--input', dest='input', help='input audio file', required=True)
self.parser.add_argument('-o', '--output', dest='output', help='output video file', required=True)
self.parser.add_argument('-b', '--background', dest='bgimage', help='background image file', required=True)
self.parser.add_argument('-t', '--text', dest='text', help='title text', required=True)
self.parser.add_argument('-f', '--font', dest='font', help='title font', required=False)
self.parser.add_argument('-s', '--fontsize', dest='fontsize', help='title font size', required=False)
self.parser.add_argument('-c', '--textcolor', dest='textcolor', help='title text color in r,g,b format', required=False)
self.parser.add_argument('-C', '--viscolor', dest='viscolor', help='visualization color in r,g,b format', required=False)
self.parser.add_argument('-x', '--xposition', dest='xposition', help='x position', required=False)
self.parser.add_argument('-y', '--yposition', dest='yposition', help='y position', required=False)
self.parser.add_argument('-a', '--alignment', dest='alignment', help='title alignment', required=False, type=int, choices=[0, 1, 2])
self.args = self.parser.parse_args()
self.settings = QSettings('settings.ini', QSettings.IniFormat)
# load colours as tuples from comma-separated strings
self.textColor = core.Core.RGBFromString(self.settings.value("textColor", '255, 255, 255'))
self.visColor = core.Core.RGBFromString(self.settings.value("visColor", '255, 255, 255'))
if self.args.textcolor:
self.textColor = core.Core.RGBFromString(self.args.textcolor)
if self.args.viscolor:
self.visColor = core.Core.RGBFromString(self.args.viscolor)
# font settings
if self.args.font:
self.font = QFont(self.args.font)
else:
self.font = QFont(self.settings.value("titleFont", QFont()))
if self.args.fontsize:
self.fontsize = int(self.args.fontsize)
else:
self.fontsize = int(self.settings.value("fontSize", 35))
if self.args.alignment:
self.alignment = int(self.args.alignment)
else:
self.alignment = int(self.settings.value("alignment", 0))
if self.args.xposition:
self.textX = int(self.args.xposition)
else:
self.textX = int(self.settings.value("xPosition", 70))
if self.args.yposition:
self.textY = int(self.args.yposition)
else:
self.textY = int(self.settings.value("yPosition", 375))
ffmpeg_cmd = self.settings.value("ffmpeg_cmd", expanduser("~"))
self.videoThread = QtCore.QThread(self)
self.videoWorker = video_thread.Worker(self)
self.videoWorker.moveToThread(self.videoThread)
self.videoWorker.videoCreated.connect(self.videoCreated)
self.videoThread.start()
self.videoTask.emit(self.args.bgimage,
self.args.text,
self.font,
self.fontsize,
self.alignment,
self.textX,
self.textY,
self.textColor,
self.visColor,
self.args.input,
self.args.output)
def videoCreated(self):
self.videoThread.quit()
self.videoThread.wait()
self.cleanUp()
def cleanUp(self):
self.settings.setValue("titleFont", self.font.toString())
self.settings.setValue("alignment", str(self.alignment))
self.settings.setValue("fontSize", str(self.fontsize))
self.settings.setValue("xPosition", str(self.textX))
self.settings.setValue("yPosition", str(self.textY))
self.settings.setValue("visColor", '%s,%s,%s' % self.visColor)
self.settings.setValue("textColor", '%s,%s,%s' % self.textColor)
sys.exit(0)
class Main(QtCore.QObject):
newTask = QtCore.pyqtSignal(str, str, QFont, int, int, int, int, tuple, tuple)
processTask = QtCore.pyqtSignal()
videoTask = QtCore.pyqtSignal(str, str, QFont, int, int, int, int, tuple, tuple, str, str)
def __init__(self, window):
QtCore.QObject.__init__(self)
# print('main thread id: {}'.format(QtCore.QThread.currentThreadId()))
self.window = window
self.core = core.Core()
self.settings = QSettings('settings.ini', QSettings.IniFormat)
# load colors as tuples from a comma-separated string
self.textColor = core.Core.RGBFromString(self.settings.value("textColor", '255, 255, 255'))
self.visColor = core.Core.RGBFromString(self.settings.value("visColor", '255, 255, 255'))
self.previewQueue = Queue()
self.previewThread = QtCore.QThread(self)
self.previewWorker = preview_thread.Worker(self, self.previewQueue)
self.previewWorker.moveToThread(self.previewThread)
self.previewWorker.imageCreated.connect(self.showPreviewImage)
self.previewThread.start()
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.processTask.emit)
self.timer.start(500)
#window.pushButton_selectInput.clicked.connect(self.openInputFileDialog)
#window.pushButton_selectOutput.clicked.connect(self.openOutputFileDialog)
#window.pushButton_createVideo.clicked.connect(self.createAudioVisualisation)
#window.pushButton_selectBackground.clicked.connect(self.openBackgroundFileDialog)
print("5")
fileName = "siraj.mp3"
if not fileName == "":
#self.settings.setValue("inputDir", os.path.dirname(fileName))
self.window.label_input.setText(fileName)
fileName = "siraj.mkv"
if not fileName == "":
#self.settings.setValue("outputDir", os.path.dirname(fileName))
self.window.label_output.setText(fileName)
fileName = "siraj.jpg"
if not fileName == "":
#self.settings.setValue("backgroundDir", os.path.dirname(fileName))
self.window.label_background.setText(fileName)
#window.pushButton_createVideo.clicked.connect(self.createAudioVisualisation)
window.progressBar_create.setValue(0)
window.setWindowTitle("Audio Visualizer")
window.pushButton_selectInput.setText("Select Input Music File")
window.pushButton_selectOutput.setText("Select Output Video File")
window.pushButton_selectBackground.setText("Select Background Image")
window.label_font.setText("Title Font")
window.label_alignment.setText("Title Options")
window.label_colorOptions.setText("Colors")
window.label_fontsize.setText("Fontsize")
window.label_title.setText("Title Text")
window.label_textColor.setText("Text:")
window.label_visColor.setText("Visualizer:")
#window.pushButton_createVideo.setText("Create Video")
window.groupBox_create.setTitle("Create")
window.groupBox_settings.setTitle("Settings")
window.groupBox_preview.setTitle("Preview")
window.alignmentComboBox.addItem("Left")
window.alignmentComboBox.addItem("Middle")
window.alignmentComboBox.addItem("Right")
window.fontsizeSpinBox.setValue(35)
window.textXSpinBox.setValue(70)
window.textYSpinBox.setValue(375)
window.lineEdit_textColor.setText('%s,%s,%s' % self.textColor)
window.lineEdit_visColor.setText('%s,%s,%s' % self.visColor)
window.pushButton_textColor.clicked.connect(lambda: self.pickColor('text'))
window.pushButton_visColor.clicked.connect(lambda: self.pickColor('vis'))
btnStyle = "QPushButton { background-color : %s; outline: none; }" % QColor(*self.textColor).name()
window.pushButton_textColor.setStyleSheet(btnStyle)
btnStyle = "QPushButton { background-color : %s; outline: none; }" % QColor(*self.visColor).name()
window.pushButton_visColor.setStyleSheet(btnStyle)
titleFont = self.settings.value("titleFont")
if not titleFont == None:
window.fontComboBox.setCurrentFont(QFont(titleFont))
alignment = self.settings.value("alignment")
if not alignment == None:
window.alignmentComboBox.setCurrentIndex(int(alignment))
fontSize = self.settings.value("fontSize")
if not fontSize == None:
window.fontsizeSpinBox.setValue(int(fontSize))
xPosition = self.settings.value("xPosition")
if not xPosition == None:
window.textXSpinBox.setValue(int(xPosition))
yPosition = self.settings.value("yPosition")
if not yPosition == None:
window.textYSpinBox.setValue(int(yPosition))
window.fontComboBox.currentFontChanged.connect(self.drawPreview)
window.lineEdit_title.textChanged.connect(self.drawPreview)
window.alignmentComboBox.currentIndexChanged.connect(self.drawPreview)
window.textXSpinBox.valueChanged.connect(self.drawPreview)
window.textYSpinBox.valueChanged.connect(self.drawPreview)
window.fontsizeSpinBox.valueChanged.connect(self.drawPreview)
window.lineEdit_textColor.textChanged.connect(self.drawPreview)
window.lineEdit_visColor.textChanged.connect(self.drawPreview)
#self.drawPreview()
ffmpeg_cmd = self.settings.value("ffmpeg_cmd", expanduser("~"))
self.videoThread = QtCore.QThread(self)
self.videoWorker = video_thread.Worker(self)
self.videoWorker.moveToThread(self.videoThread)
self.videoWorker.videoCreated.connect(self.videoCreated)
self.videoWorker.progressBarUpdate.connect(self.progressBarUpdated)
self.videoWorker.progressBarSetText.connect(self.progressBarSetText)
self.videoThread.start()
self.videoTask.emit(self.window.label_background.text(),
self.window.lineEdit_title.text(),
self.window.fontComboBox.currentFont(),
self.window.fontsizeSpinBox.value(),
self.window.alignmentComboBox.currentIndex(),
self.window.textXSpinBox.value(),
self.window.textYSpinBox.value(),
core.Core.RGBFromString(self.window.lineEdit_textColor.text()),
core.Core.RGBFromString(self.window.lineEdit_visColor.text()),
self.window.label_input.text(),
self.window.label_output.text())
#window.show()
def cleanUp(self):
self.timer.stop()
self.previewThread.quit()
self.previewThread.wait()
self.settings.setValue("titleFont", self.window.fontComboBox.currentFont().toString())
self.settings.setValue("alignment", str(self.window.alignmentComboBox.currentIndex()))
self.settings.setValue("fontSize", str(self.window.fontsizeSpinBox.value()))
self.settings.setValue("xPosition", str(self.window.textXSpinBox.value()))
self.settings.setValue("yPosition", str(self.window.textYSpinBox.value()))
self.settings.setValue("visColor", self.window.lineEdit_visColor.text())
self.settings.setValue("textColor", self.window.lineEdit_textColor.text())
def openInputFileDialog(self):
inputDir = self.settings.value("inputDir", expanduser("~"))
#fileName = QtGui.QFileDialog.getOpenFileName(self.window,
# "Open Music File", inputDir, "Music Files (*.mp3 *.wav *.ogg *.flac)");
fileName = r"C:\Users\MSHK\Downloads\المصطفى ﷺ _ مشاري راشد العفاسي وابنه محمد (192 kbps).mp3"
if not fileName == "":
self.settings.setValue("inputDir", os.path.dirname(fileName))
self.window.label_input.setText(fileName)
def openOutputFileDialog(self):
outputDir = self.settings.value("outputDir", expanduser("~"))
#fileName = QtGui.QFileDialog.getSaveFileName(self.window,
# "Set Output Video File", outputDir, "Video Files (*.mkv)");
fileName = r"C:\Users\MSHK\Pictures\tesing.mkv"
if not fileName == "":
self.settings.setValue("outputDir", os.path.dirname(fileName))
self.window.label_output.setText(fileName)
def openBackgroundFileDialog(self):
backgroundDir = self.settings.value("backgroundDir", expanduser("~"))
#fileName = QtGui.QFileDialog.getOpenFileName(self.window,
#"Open Background Image", backgroundDir, "Image Files (*.jpg *.png);; Video Files (*.mp4)");
fileName = r"C:\Users\MSHK\Pictures\butterfly - Copy - Copy.jpg"
if not fileName == "":
self.settings.setValue("backgroundDir", os.path.dirname(fileName))
self.window.label_background.setText(fileName)
self.drawPreview()
def createAudioVisualisation(self):
ffmpeg_cmd = self.settings.value("ffmpeg_cmd", expanduser("~"))
self.videoThread = QtCore.QThread(self)
self.videoWorker = video_thread.Worker(self)
self.videoWorker.moveToThread(self.videoThread)
self.videoWorker.videoCreated.connect(self.videoCreated)
self.videoWorker.progressBarUpdate.connect(self.progressBarUpdated)
self.videoWorker.progressBarSetText.connect(self.progressBarSetText)
self.videoThread.start()
self.videoTask.emit(self.window.label_background.text(),
self.window.lineEdit_title.text(),
self.window.fontComboBox.currentFont(),
self.window.fontsizeSpinBox.value(),
self.window.alignmentComboBox.currentIndex(),
self.window.textXSpinBox.value(),
self.window.textYSpinBox.value(),
core.Core.RGBFromString(self.window.lineEdit_textColor.text()),
core.Core.RGBFromString(self.window.lineEdit_visColor.text()),
self.window.label_input.text(),
self.window.label_output.text())
def progressBarUpdated(self, value):
self.window.progressBar_create.setValue(value)
def progressBarSetText(self, value):
self.window.progressBar_create.setFormat(value)
def videoCreated(self):
self.videoThread.quit()
self.videoThread.wait()
def drawPreview(self):
self.newTask.emit(self.window.label_background.text(),
self.window.lineEdit_title.text(),
self.window.fontComboBox.currentFont(),
self.window.fontsizeSpinBox.value(),
self.window.alignmentComboBox.currentIndex(),
self.window.textXSpinBox.value(),
self.window.textYSpinBox.value(),
core.Core.RGBFromString(self.window.lineEdit_textColor.text()),
core.Core.RGBFromString(self.window.lineEdit_visColor.text()))
# self.processTask.emit()
def showPreviewImage(self, image):
self._scaledPreviewImage = image
self._previewPixmap = QtGui.QPixmap.fromImage(self._scaledPreviewImage)
self.window.label_preview.setPixmap(self._previewPixmap)
def pickColor(self, colorTarget):
color = QtGui.QColorDialog.getColor()
if color.isValid():
RGBstring = '%s,%s,%s' % (str(color.red()), str(color.green()), str(color.blue()))
btnStyle = "QPushButton { background-color : %s; outline: none; }" % color.name()
if colorTarget == 'text':
self.window.lineEdit_textColor.setText(RGBstring)
window.pushButton_textColor.setStyleSheet(btnStyle)
elif colorTarget == 'vis':
self.window.lineEdit_visColor.setText(RGBstring)
window.pushButton_visColor.setStyleSheet(btnStyle)
if len(sys.argv) > 1:
# command line mode
app = QtGui.QApplication(sys.argv, False)
command = Command()
signal.signal(signal.SIGINT, command.cleanUp)
sys.exit(app.exec_())
else:
# gui mode
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = uic.loadUi("main.ui")
# window.adjustSize()
desc = QtGui.QDesktopWidget()
dpi = desc.physicalDpiX()
topMargin = 0 if (dpi == 96) else int(10 * (dpi / 96))
window.resize(window.width() * (dpi / 96), window.height() * (dpi / 96))
window.verticalLayout_2.setContentsMargins(0, topMargin, 0, 0)
print("1")
main = Main(window)
signal.signal(signal.SIGINT, main.cleanUp)
atexit.register(main.cleanUp)
sys.exit(app.exec_())
| 41.795276
| 136
| 0.716152
|
794ae9d47cd348e73a34ba5bd696f2b537a2718a
| 26,037
|
py
|
Python
|
hops/application_4_alignment.py
|
ExoWorldsSpies/hops
|
a33e434befe17318c064210a289b453c6f91b44f
|
[
"MIT"
] | 5
|
2020-02-22T13:51:47.000Z
|
2021-12-10T20:24:11.000Z
|
hops/application_4_alignment.py
|
ExoWorldsSpies/hops
|
a33e434befe17318c064210a289b453c6f91b44f
|
[
"MIT"
] | 6
|
2020-02-24T16:29:11.000Z
|
2021-11-27T22:57:19.000Z
|
hops/application_4_alignment.py
|
ExoWorldsSpies/hops
|
a33e434befe17318c064210a289b453c6f91b44f
|
[
"MIT"
] | 2
|
2020-04-04T17:33:05.000Z
|
2021-03-04T20:10:23.000Z
|
import os
import time
import numpy as np
import matplotlib.patches as mpatches
import hops.pylightcurve3 as plc
from astropy.io import fits as pf
from hops.application_windows import MainWindow
class AlignmentWindow(MainWindow):
def __init__(self, log):
MainWindow.__init__(self, log, name='HOPS - Alignment', position=2)
# set variables, create and place widgets
self.bin_fits = self.log.get_param('bin_fits')
self.burn_limit = int(1.1 * self.log.get_param('burn_limit')) * self.bin_fits * self.bin_fits
self.shift_tolerance_p = self.log.get_param('shift_tolerance_p')
self.rotation_tolerance = self.log.get_param('rotation_tolerance')
self.min_calibration_stars_number = int(self.log.get_param('min_calibration_stars_number'))
self.all_frames = plc.open_dict(self.log.all_frames)
self.science_files = []
for science_file in self.all_frames:
if not self.all_frames[science_file][self.log.skip_key]:
self.science_files.append([self.all_frames[science_file][self.log.time_key], science_file])
else:
self.all_frames[science_file][self.log.align_x0_key] = False
self.all_frames[science_file][self.log.align_y0_key] = False
self.all_frames[science_file][self.log.align_u0_key] = False
fits = pf.open(os.path.join(self.log.reduction_directory, science_file), mode='update')
fits[1].header.set(self.log.align_x0_key, False)
fits[1].header.set(self.log.align_y0_key, False)
fits[1].header.set(self.log.align_u0_key, False)
fits.flush()
fits.close()
self.science_files.sort()
self.science_files = [ff[1] for ff in self.science_files]
self.skip_time = 0
self.science_counter = 0
self.test_level = None
self.redraw = None
self.stars = None
self.science_file = None
self.fits = None
self.std = None
self.mean = None
self.star_std = None
self.int_psf = None
self.stars_detected = None
self.rotation_detected = None
self.check_num = None
self.check_num_snr = None
self.x0 = None
self.y0 = None
self.u0 = None
self.f0 = None
self.comparisons = None
self.comparisons_snr = None
self.small_angles = None
self.large_angles = None
self.circle = None
self.settings_to_check = None
self.comparisons_to_check = None
# common definitions for all images
fits = plc.open_fits(os.path.join(self.log.reduction_directory, self.science_files[0]))
self.shift_tolerance = int(max(len(fits[1].data), len(fits[1].data[0])) * (self.shift_tolerance_p / 100.0))
self.y_length, self.x_length = fits[1].data.shape
self.circles_diameter = 0.02 * max(self.y_length, self.x_length)
# progress window
y_scale = (self.root.winfo_screenheight() - 500) / self.root.winfo_screenheight()
self.progress_figure = self.FitsWindow(figsize=(0.5, y_scale, 10, 10, len(fits[1].data[0]) / len(fits[1].data)))
self.progress_figure.load_fits(fits[1], input_name=self.science_files[0])
self.progress_all_stars = self.Label(text='')
self.progress_alignment = self.Progressbar(task="Aligning frames")
# self.progress_all_frames = self.Progressbar(task="Aligning all stars in all frames")
self.setup_window([
[[self.progress_figure, 0, 2]],
[[self.progress_all_stars, 0, 2]],
[[self.progress_alignment, 0, 2]],
[[self.Button(text='STOP ALIGNMENT & RETURN TO MAIN MENU', command=self.trigger_exit), 0, 2]],
[]
])
self.set_close_button_function(self.trigger_exit)
def run_alignment(self):
self.close = self.trigger_exit
if self.log.get_param('alignment_complete'):
if self.askyesno('Overwrite files', 'Alignment has been completed, do you want to run again?'):
self.log.set_param('alignment_complete', False)
self.log.save_local_log()
else:
self.log.set_param('proceed', True)
self.show()
if not self.log.get_param('alignment_complete'):
self.progress_all_stars.set('Analysing first frame...')
self.after(self.find_all_stars)
else:
self.def_close()
def alignment_log(self, *text):
# print(*text)
pass
def find_all_stars(self):
if self.exit:
self.after(self.align)
else:
fits = plc.open_fits(os.path.join(self.log.reduction_directory, self.science_files[0]))
metadata = self.all_frames[self.science_files[0]]
self.progress_figure.load_fits(fits[1], self.science_files[0])
stars, psf = plc.find_all_stars(fits[1].data,
mean=metadata[self.log.mean_key], std=metadata[self.log.std_key],
std_limit=3, burn_limit=self.burn_limit, star_std=metadata[self.log.psf_key],
progressbar=self.progress_all_stars, progress_window=self,
verbose=True
)
if self.exit:
self.after(self.choose_calibartion_stars)
stars = np.array(stars)
self.log.save_local_log()
all_stars_dict = {'all_stars': stars}
plc.save_dict(all_stars_dict, 'all_stars.pickle')
self.progress_all_stars.set('Choosing calibrating stars...')
self.after(self.choose_calibartion_stars)
def choose_calibartion_stars(self):
if self.exit:
self.after(self.align)
else:
all_stars_dict = plc.open_dict('all_stars.pickle')
stars = np.array(all_stars_dict['all_stars'])
fits = pf.open(os.path.join(self.log.reduction_directory, self.science_files[0]), memmap=False)
metadata = self.all_frames[self.science_files[0]]
frame_mean = metadata[self.log.mean_key]
frame_std = metadata[self.log.std_key]
frame_star_psf = metadata[self.log.psf_key]
bright_stars = []
std_limit = 30
while len(bright_stars) < 100 and std_limit >= 5.0:
bright_stars = []
for star in stars:
if star[2] + star[3] < 2.0 * self.burn_limit / 3.0:
if star[-1] > (2 * np.pi * (std_limit * frame_std) * frame_star_psf * frame_star_psf):
self.alignment_log(star[0], star[1], star[-1],
2 * np.pi * (frame_mean + std_limit * frame_std) * (frame_star_psf ** 2))
bright_stars.append(star)
std_limit -= 5
if len(bright_stars) < self.min_calibration_stars_number:
bright_stars = []
std_limit = 30
while len(bright_stars) < 100 and std_limit >= 5.0:
bright_stars = []
for star in stars:
if star[-1] > (2 * np.pi * (std_limit * frame_std) * frame_star_psf * frame_star_psf):
self.alignment_log(star[0], star[1], star[-1],
2 * np.pi * (frame_mean + std_limit * frame_std) * (frame_star_psf ** 2))
bright_stars.append(star)
std_limit -= 5
stars = sorted(bright_stars, key=lambda x: -x[-1] / (x[-2] ** 3))
x_ref_position = stars[0][0]
y_ref_position = stars[0][1]
f_ref = stars[0][-1]
del stars[0]
# take the rest as calibration stars and calculate their polar coordinates relatively to the first
calibration_stars_polar = []
for star in stars:
r_position, u_position = plc.cartesian_to_polar(star[0], star[1], x_ref_position, y_ref_position)
if r_position > 5 * frame_star_psf:
calibration_stars_polar.append([r_position, u_position])
stars = sorted(stars, key=lambda x: -x[-1])
calibration_stars_polar_snr = []
for star in stars:
r_position, u_position = plc.cartesian_to_polar(star[0], star[1], x_ref_position, y_ref_position)
if r_position > 5 * frame_star_psf:
calibration_stars_polar_snr.append([r_position, u_position])
if len(calibration_stars_polar) <= self.min_calibration_stars_number:
self.check_num = len(calibration_stars_polar) - 0.5
self.check_num_snr = len(calibration_stars_polar) - 0.5
else:
self.check_num = max(self.min_calibration_stars_number - 0.5, len(calibration_stars_polar) / 10.0 - 0.5)
self.check_num_snr = max(self.min_calibration_stars_number - 0.5, len(calibration_stars_polar) / 20.0 - 0.5)
self.x0 = x_ref_position
self.y0 = y_ref_position
self.u0 = 0
self.f0 = f_ref
self.comparisons = calibration_stars_polar
self.comparisons_snr = calibration_stars_polar_snr
fits.close()
ustep = np.arcsin(float(frame_star_psf) / self.comparisons[int(len(self.comparisons) / 2)][0])
self.small_angles = np.append(np.arange(-self.rotation_tolerance, self.rotation_tolerance, ustep),
np.arange(-self.rotation_tolerance, self.rotation_tolerance, ustep) + np.pi)
self.large_angles = np.array([np.pi, 0])
for ff in range(1, int(np.pi / ustep) + 1):
self.large_angles = np.append(self.large_angles, np.pi - ff * ustep)
self.large_angles = np.append(self.large_angles, np.pi + ff * ustep)
self.large_angles = np.append(self.large_angles, 0 - ff * ustep)
self.large_angles = np.append(self.large_angles, 0 + ff * ustep)
# set the looking window and angular step
self.progress_all_stars.set(' ')
self.after(self.align)
def align(self):
if self.exit:
self.after(self.plot_current)
else:
if self.science_counter == 0:
self.progress_alignment.initiate(len(self.science_files))
self.stars = None
self.science_file = self.science_files[self.science_counter]
self.fits = pf.open(os.path.join(self.log.reduction_directory, self.science_file), memmap=False, mode='update')
self.std = self.fits[1].header[self.log.std_key]
self.mean = self.fits[1].header[self.log.mean_key]
self.star_std = self.fits[1].header[self.log.psf_key]
self.int_psf = int(max(1, round(self.fits[1].header[self.log.psf_key])))
self.stars_detected = False
self.rotation_detected = False
self.progress_alignment.show_message(' ')
self.progress_figure.load_fits(self.fits[1], self.science_file, draw=False)
self.circle = mpatches.Circle((self.x0, self.y0), self.circles_diameter, ec='r', fill=False)
self.progress_figure.ax.add_patch(self.circle)
self.test_level = 1
self.redraw = 0
self.skip_time = 0
self.after(self.detect_stars)
def detect_stars(self):
if self.exit:
self.after(self.plot_current)
else:
if self.test_level == 1:
self.stars = plc.find_single_star(self.fits[1].data, self.x0, self.y0, mean=self.mean, std=self.std,
burn_limit=self.burn_limit, star_std=self.star_std)
if self.stars:
self.stars.append(2 * np.pi * self.stars[2] * self.stars[4] * self.stars[5])
self.stars = [self.stars]
elif self.test_level == 2:
self.skip_time = time.time()
self.stars = plc.find_all_stars(self.fits[1].data, x_low=self.x0 - self.shift_tolerance,
x_upper=self.x0 + self.shift_tolerance,
y_low=self.y0 - self.shift_tolerance,
y_upper=self.y0 + self.shift_tolerance, x_centre=self.x0,
y_centre=self.y0, mean=self.mean,
std=self.std, burn_limit=self.burn_limit,
star_std=self.star_std, verbose=True,
order_by_distance_and_flux=self.f0)[0]
self.progress_all_stars.set(' ')
elif self.test_level == 4:
if self.askyesno('HOPS - Alignment', 'Stars not found close to their previous positions.\n'
'Do you want to skip this frame?'):
self.after(self.plot_current)
self.stars = plc.find_all_stars(self.fits[1].data, mean=self.mean, std=self.std, burn_limit=self.burn_limit,
star_std=self.star_std, order_by_flux=self.f0, verbose=True)[0]
self.progress_all_stars.set(' ')
if self.stars:
self.settings_to_check = []
if self.test_level == 1:
self.settings_to_check.append([self.stars[0][0], self.stars[0][1], self.u0, self.stars[0]])
self.setting_checking = 0
self.comparisons_to_check = self.comparisons
elif self.test_level == 2:
for star in self.stars:
self.settings_to_check.append([star[0], star[1], self.u0, star])
self.comparisons_to_check = self.comparisons_snr
elif self.test_level == 3:
for star in self.stars:
for rotation in self.small_angles:
self.settings_to_check.append([star[0], star[1], rotation, star])
elif self.test_level == 4:
for star in self.stars:
self.settings_to_check.append([star[0], star[1], self.u0, star])
elif self.test_level == 5:
for star in self.stars:
for rotation in self.large_angles:
self.settings_to_check.append([star[0], star[1], rotation, star])
self.setting_checking = 0
self.after(self.check_star)
else:
if self.test_level == 1:
self.test_level = 2
self.progress_figure.draw()
self.progress_all_stars.set('Analysing frame...')
self.progress_alignment.show_message('Testing small shift...')
self.after(self.detect_stars)
elif self.test_level == 2:
self.test_level = 3
self.progress_alignment.show_message('Testing small shift & rotation...')
self.after(self.detect_stars)
elif self.test_level == 3:
self.test_level = 4
self.progress_all_stars.set('Analysing frame...')
self.progress_alignment.show_message('Testing large shift...')
self.after(self.detect_stars)
elif self.test_level == 4:
self.test_level = 5
self.progress_alignment.show_message('Testing large shift & rotation...')
self.after(self.detect_stars)
else:
self.after(self.plot_current)
def check_star(self):
if self.exit:
self.after(self.plot_current)
else:
x, y, u, star = self.settings_to_check[self.setting_checking]
self.alignment_log('Checking star at: ', x, y, ', with rotation:', u)
if self.redraw >= 1:
self.circle.set_center((x, y))
self.progress_figure.draw()
self.redraw = 0
else:
self.redraw += 0.01
test = 0
for comp in self.comparisons_to_check:
check_x = int(x + comp[0] * np.cos(u + comp[1]))
check_y = int(y + comp[0] * np.sin(u + comp[1]))
if 0 < check_x < self.x_length and 0 < check_y < self.y_length:
check_sum = np.sum(self.fits[1].data[check_y - self.int_psf:check_y + self.int_psf + 1,
check_x - self.int_psf:check_x + self.int_psf + 1])
check_lim = (self.fits[1].header[self.log.mean_key] +
3 * self.fits[1].header[self.log.std_key]) * ((2 * self.int_psf + 1) ** 2)
if check_sum > check_lim:
test += 1
else:
test -= 1
self.alignment_log('Check ref. star at: ', check_x, check_y, ', Test: ', test)
if abs(test) > self.check_num:
break
if test >= self.check_num:
self.stars_detected = True
if self.test_level > 1:
self.rotation_detected = True
self.x0 = x
self.y0 = y
self.u0 = u
self.f0 = star[-1]
self.after(self.plot_current)
else:
self.setting_checking += 1
if self.setting_checking < len(self.settings_to_check):
self.after(self.check_star)
else:
if self.test_level == 1:
self.test_level = 2
self.progress_figure.draw()
self.progress_all_stars.set('Analysing frame...')
self.progress_alignment.show_message('Testing small shift...')
self.after(self.detect_stars)
elif self.test_level == 2:
self.test_level = 3
self.progress_alignment.show_message('Testing small shift & rotation...')
self.after(self.detect_stars)
elif self.test_level == 3:
self.test_level = 4
self.progress_all_stars.set('Analysing frame...')
self.progress_alignment.show_message('Testing large shift...')
self.after(self.detect_stars)
elif self.test_level == 4:
self.test_level = 5
self.progress_alignment.show_message('Testing large shift & rotation...')
self.after(self.detect_stars)
else:
self.after(self.plot_current)
def plot_current(self):
if self.exit:
self.after(self.save)
else:
if self.stars_detected:
if self.rotation_detected:
test_u0 = []
test_cos = []
test_sin = []
for ii in self.comparisons[:int(self.check_num + 0.5)]:
check_x = self.x0 + ii[0] * np.cos(self.u0 + ii[1])
check_y = self.y0 + ii[0] * np.sin(self.u0 + ii[1])
star = plc.find_single_star(self.fits[1].data, check_x, check_y, mean=self.mean, std=self.std,
burn_limit=self.burn_limit, star_std=self.star_std)
if star:
diff = plc.cartesian_to_polar(star[0], star[1], self.x0, self.y0)[1] - ii[1]
if diff < 0:
diff += 2 * np.pi
test_u0.append(diff)
test_cos.append(np.cos(diff))
test_sin.append(np.sin(diff))
if len(test_u0) > 0:
test_cos = np.median(test_cos)
test_sin = np.median(test_sin)
self.u0 = np.arccos(test_cos)
if test_sin < 0:
self.u0 = np.pi + (np.pi - self.u0)
self.fits[1].header.set(self.log.align_x0_key, self.x0)
self.fits[1].header.set(self.log.align_y0_key, self.y0)
self.fits[1].header.set(self.log.align_u0_key, self.u0)
self.circle.set_center((self.x0, self.y0))
for ii in self.comparisons[:int(self.check_num + 0.5)]:
circle = mpatches.Circle((self.x0 + ii[0] * np.cos(self.u0 + ii[1]),
self.y0 + ii[0] * np.sin(self.u0 + ii[1])),
self.circles_diameter, ec='w', fill=False)
self.progress_figure.ax.add_patch(circle)
else:
self.fits[1].header.set(self.log.align_x0_key, False)
self.fits[1].header.set(self.log.align_y0_key, False)
self.fits[1].header.set(self.log.align_u0_key, False)
self.all_frames[self.science_file][self.log.align_x0_key] = self.fits[1].header[self.log.align_x0_key]
self.all_frames[self.science_file][self.log.align_y0_key] = self.fits[1].header[self.log.align_y0_key]
self.all_frames[self.science_file][self.log.align_u0_key] = self.fits[1].header[self.log.align_u0_key]
if not self.fits[1].header[self.log.align_x0_key]:
self.all_frames[self.science_file][self.log.skip_key] = True
self.fits.flush()
self.fits.close()
self.progress_figure.draw()
if self.skip_time == 0:
self.progress_alignment.update()
else:
self.progress_alignment.update(skip=time.time() - self.skip_time)
self.skip_time = 0
self.science_counter += 1
if self.science_counter >= len(self.science_files):
self.progress_all_stars.set('Aligning all stars in all frames...')
self.after(self.save)
else:
self.after(self.align)
def save(self):
if self.exit:
self.after(self.check_visibility)
else:
plc.save_dict(self.all_frames, self.log.all_frames)
self.after(self.check_visibility)
def check_visibility(self):
if self.exit:
self.def_close()
else:
all_stars_dict = plc.open_dict('all_stars.pickle')
stars = np.array(all_stars_dict['all_stars'])
fits = plc.open_fits(os.path.join(self.log.reduction_directory, self.science_files[0]))
polar_coords = []
for star in all_stars_dict['all_stars']:
polar_coords.append(plc.cartesian_to_polar(star[0], star[1],
self.all_frames[self.science_files[0]][
self.log.align_x0_key],
self.all_frames[self.science_files[0]][
self.log.align_y0_key]))
in_fov = np.ones(len(polar_coords))
for science_file in self.science_files:
metadata = self.all_frames[science_file]
if self.exit:
self.def_close()
ref_x_position = metadata[self.log.align_x0_key]
ref_y_position = metadata[self.log.align_y0_key]
ref_u_position = metadata[self.log.align_u0_key]
star_std = metadata[self.log.psf_key]
if ref_x_position:
in_fov_single = []
for star in polar_coords:
cartesian_x = ref_x_position + star[0] * np.cos(ref_u_position + star[1])
cartesian_y = ref_y_position + star[0] * np.sin(ref_u_position + star[1])
if (3 * star_std < cartesian_x < len(fits[1].data[0]) - 3 * star_std
and 3 * star_std < cartesian_y < len(fits[1].data) - 3 * star_std):
in_fov_single.append(1)
else:
in_fov_single.append(0)
in_fov *= in_fov_single
all_stars_dict['in_fov'] = np.array(in_fov)
visible_fov_x_min = np.min(stars[np.where(in_fov), 0]) - 3 * star_std
visible_fov_x_max = np.max(stars[np.where(in_fov), 0]) + 3 * star_std
visible_fov_y_min = np.min(stars[np.where(in_fov), 1]) - 3 * star_std
visible_fov_y_max = np.max(stars[np.where(in_fov), 1]) + 3 * star_std
self.log.set_param('min_x', float(visible_fov_x_min))
self.log.set_param('min_y', float(visible_fov_y_min))
self.log.set_param('max_x', float(visible_fov_x_max))
self.log.set_param('max_y', float(visible_fov_y_max))
plc.save_dict(all_stars_dict, 'all_stars.pickle')
self.log.set_param('alignment_complete', True)
self.log.set_param('alignment_version', self.log.version)
self.log.save_local_log()
self.log.set_param('proceed', True)
self.def_close()
| 43.395
| 124
| 0.53616
|
794aeb145ac4f80ebe094c058e7ec3173c5a14a9
| 3,161
|
py
|
Python
|
mdanalysis/runConfGen.py
|
otayfuroglu/mdutils
|
481148316d9347c1136c22f581a3668da4192168
|
[
"MIT"
] | null | null | null |
mdanalysis/runConfGen.py
|
otayfuroglu/mdutils
|
481148316d9347c1136c22f581a3668da4192168
|
[
"MIT"
] | null | null | null |
mdanalysis/runConfGen.py
|
otayfuroglu/mdutils
|
481148316d9347c1136c22f581a3668da4192168
|
[
"MIT"
] | null | null | null |
#! /home/omert/miniconda3/bin/python
from gmx_md_utils import *
import sys, os, shutil
from rdkit import Chem
from rdkit.Chem import AllChem, TorsionFingerprints
from rdkit.ML.Cluster import Butina
def mainCalcRMS(mols_path, ref_path):
ref_mol = Chem.MolFromMolFile(ref_path)
suppl = Chem.SDMolSupplier(mols_path)
for i, mol in enumerate(suppl):
print("RMSD for %d. mol" %i, calcRMS(mol, ref_mol))
def mainGenConf():
mol_path = "Fabienne_project/Aa12b12BCD/ZnPc_SCH3_Aa1a2b1b2BCD_1.mol2"
fileBase = mol_path.split("/")[-1].replace(".mol", "")
print(fileBase)
numConfs = 500
maxAttempts = 5000
pruneRmsThresh = 0.3
clusterMethod = "RMSD"
clusterThreshold = 2.0
minimizeIterations = 0
# suppl = Chem.ForwardSDMolSupplier(input_file)
print(mol_path)
# suppl = Chem.MolFromMolFile(mol_path)
suppl = Chem.MolFromMol2File(mol_path)
i=0
xyzDIR = "xyz"
if os.path.exists(xyzDIR):
shutil.rmtree(xyzDIR)
os.mkdir(xyzDIR)
sdfDIR = "sdf"
if os.path.exists(sdfDIR):
shutil.rmtree(sdfDIR)
os.mkdir(sdfDIR)
for mol in [suppl]:
i = i+1
if mol is None: continue
m = Chem.AddHs(mol, addCoords=True)
# generate the confomers
conformerIds = genGonformers(m, numConfs, maxAttempts, pruneRmsThresh, True, True, True)
# align conformers
# AllChem.AlignMolConformers(m, conformerIds)
conformerPropsDict = {}
for j, conformerId in enumerate(conformerIds):
conf_file_base = fileBase + "_conf_" + str(j)
writeConf2sdf(m, "%s/%s.sdf" % (sdfDIR, conf_file_base), conformerId)
mol = read("%s/%s.sdf" % (sdfDIR, conf_file_base))
write("%s/%s.xyz" % (xyzDIR, conf_file_base), mol)
# energy minimise (optional) and energy calculation
# props = calcEnergy(m, conformerId, minimizeIterations)
# conformerPropsDict[conformerId] = props
# # cluster the conformers
# rmsClusters = getClusterConf(m, clusterMethod, clusterThreshold)
# print("Molecule", i, ": generated", len(conformerIds), "conformers and", len(rmsClusters), "clusters")
# rmsClustersPerCluster = []
# clusterNumber = 0
# minEnergy = 9999999999999
# for cluster in rmsClusters:
# clusterNumber = clusterNumber+1
# rmsWithinCluster = alignConfs(m, cluster)
# for conformerId in cluster:
# e = props["energy_abs"]
# if e < minEnergy:
# minEnergy = e
# props = conformerPropsDict[conformerId]
# props["cluster_no"] = clusterNumber
# props["cluster_centroid"] = cluster[0] + 1
# idx = cluster.index(conformerId)
# if idx > 0:
# props["rms_to_centroid"] = rmsWithinCluster[idx-1]
# else:
# props["rms_to_centroid"] = 0.0
# writeMinEConf2sdf(m, "target_conf_" + str(i) + ".sdf", rmsClusters, conformerPropsDict, minEnergy)
mainGenConf()
| 36.755814
| 113
| 0.608035
|
794aeb15e34e805bb4cb518de4515271d3f2f13f
| 360
|
py
|
Python
|
pmu-tools-master/ucevent/ucmsg.py
|
patinnc/60secs
|
45ad68e4359e0dfd506f9e3a898c216ed38e7fd0
|
[
"MIT"
] | null | null | null |
pmu-tools-master/ucevent/ucmsg.py
|
patinnc/60secs
|
45ad68e4359e0dfd506f9e3a898c216ed38e7fd0
|
[
"MIT"
] | null | null | null |
pmu-tools-master/ucevent/ucmsg.py
|
patinnc/60secs
|
45ad68e4359e0dfd506f9e3a898c216ed38e7fd0
|
[
"MIT"
] | 1
|
2021-03-22T20:38:10.000Z
|
2021-03-22T20:38:10.000Z
|
# Handle warnings and errors
# Separate module to avoid circular imports
import sys
import fnmatch
quiet = False
debug = None
def debug_msg(x, y):
if debug and any(map(lambda p: fnmatch.fnmatch(x, p), debug.split(","))):
print >>sys.stderr, "debug:", x + ": " + str(y)
def warning(x):
if not quiet:
print >>sys.stderr, "WARNING:", x
| 22.5
| 77
| 0.638889
|
794aeb1606271051d05972f7162bf659643d7dc4
| 7,547
|
py
|
Python
|
python/netsnmp/client.py
|
aristanetworks/net-snmp
|
49efeb8092af1d6be8b7535a7556ac24b25d2b6d
|
[
"Net-SNMP"
] | 1
|
2015-07-08T20:43:18.000Z
|
2015-07-08T20:43:18.000Z
|
python/netsnmp/client.py
|
aristanetworks/net-snmp
|
49efeb8092af1d6be8b7535a7556ac24b25d2b6d
|
[
"Net-SNMP"
] | 1
|
2016-11-14T16:42:51.000Z
|
2016-11-14T19:55:59.000Z
|
python/netsnmp/client.py
|
aristanetworks/net-snmp
|
49efeb8092af1d6be8b7535a7556ac24b25d2b6d
|
[
"Net-SNMP"
] | null | null | null |
import client_intf
import string
import re
import types
from sys import stderr
from client_intf import read_module, read_mib
# control verbosity of error output
verbose = 1
secLevelMap = { 'noAuthNoPriv':1, 'authNoPriv':2, 'authPriv':3 }
def _parse_session_args(kargs):
sessArgs = {
'Version':3,
'DestHost':'localhost',
'Community':'public',
'Timeout':1000000,
'Retries':3,
'RemotePort':161,
'LocalPort':0,
'SecLevel':'noAuthNoPriv',
'SecName':'initial',
'PrivProto':'DEFAULT',
'PrivPass':'',
'AuthProto':'DEFAULT',
'AuthPass':'',
'ContextEngineId':'',
'SecEngineId':'',
'Context':'',
'Engineboots':0,
'Enginetime':0,
'UseNumeric':0,
'OurIdentity':'',
'TheirIdentity':'',
'TheirHostname':'',
'TrustCert':''
}
keys = kargs.keys()
for key in keys:
if sessArgs.has_key(key):
sessArgs[key] = kargs[key]
else:
print >>stderr, "ERROR: unknown key", key
return sessArgs
def STR(obj):
if obj != None:
obj = str(obj)
return obj
class Varbind(object):
def __init__(self, tag=None, iid=None, val=None, type=None):
self.tag = STR(tag)
self.iid = STR(iid)
self.val = STR(val)
self.type = STR(type)
# parse iid out of tag if needed
if iid == None and tag != None:
regex = re.compile(r'^((?:\.\d+)+|(?:\w+(?:[-:]*\w+)+))\.?(.*)$')
match = regex.match(tag)
if match:
(self.tag, self.iid) = match.group(1,2)
def __setattr__(self, name, val):
self.__dict__[name] = val
def print_str(self):
return self.tag, self.iid, self.val, self.type
class VarList(object):
def __init__(self, *vs):
self.varbinds = []
for var in vs:
if isinstance(var, netsnmp.client.Varbind):
self.varbinds.append(var)
else:
self.varbinds.append(Varbind(var))
def __len__(self):
return len(self.varbinds)
def __getitem__(self, index):
return self.varbinds[index]
def __setitem__(self, index, val):
if isinstance(val, netsnmp.client.Varbind):
self.varbinds[index] = val
else:
raise TypeError
def __iter__(self):
return iter(self.varbinds)
def __delitem__(self, index):
del self.varbinds[index]
def __repr__(self):
return repr(self.varbinds)
def __getslice__(self, i, j):
return self.varbinds[i:j]
def append(self, *vars):
for var in vars:
if isinstance(var, netsnmp.client.Varbind):
self.varbinds.append(var)
else:
raise TypeError
class Session(object):
def __init__(self, **args):
self.sess_ptr = None
self.UseLongNames = 0
self.UseNumeric = 0
self.UseSprintValue = 0
self.UseEnums = 0
self.BestGuess = 0
self.RetryNoSuch = 0
self.ErrorStr = ''
self.ErrorNum = 0
self.ErrorInd = 0
sess_args = _parse_session_args(args)
for k,v in sess_args.items():
self.__dict__[k] = v
# check for transports that may be tunneled
transportCheck = re.compile('^(tls|dtls|ssh)');
match = transportCheck.match(sess_args['DestHost'])
if match:
self.sess_ptr = client_intf.session_tunneled(
sess_args['Version'],
sess_args['DestHost'],
sess_args['LocalPort'],
sess_args['Retries'],
sess_args['Timeout'],
sess_args['SecName'],
secLevelMap[sess_args['SecLevel']],
sess_args['ContextEngineId'],
sess_args['Context'],
sess_args['OurIdentity'],
sess_args['TheirIdentity'],
sess_args['TheirHostname'],
sess_args['TrustCert'],
);
elif sess_args['Version'] == 3:
self.sess_ptr = client_intf.session_v3(
sess_args['Version'],
sess_args['DestHost'],
sess_args['LocalPort'],
sess_args['Retries'],
sess_args['Timeout'],
sess_args['SecName'],
secLevelMap[sess_args['SecLevel']],
sess_args['SecEngineId'],
sess_args['ContextEngineId'],
sess_args['Context'],
sess_args['AuthProto'],
sess_args['AuthPass'],
sess_args['PrivProto'],
sess_args['PrivPass'],
sess_args['Engineboots'],
sess_args['Enginetime'])
else:
self.sess_ptr = client_intf.session(
sess_args['Version'],
sess_args['Community'],
sess_args['DestHost'],
sess_args['LocalPort'],
sess_args['Retries'],
sess_args['Timeout'])
def get(self, varlist):
res = client_intf.get(self, varlist)
return res
def set(self, varlist):
res = client_intf.set(self, varlist)
return res
def getnext(self, varlist):
res = client_intf.getnext(self, varlist)
return res
def getbulk(self, nonrepeaters, maxrepetitions, varlist):
if self.Version == 1:
return None
res = client_intf.getbulk(self, nonrepeaters, maxrepetitions, varlist)
return res
def walk(self, varlist):
res = client_intf.walk(self, varlist)
return res
def __del__(self):
res = client_intf.delete_session(self)
return res
import netsnmp
def snmpget(*args, **kargs):
sess = Session(**kargs)
var_list = VarList()
for arg in args:
if isinstance(arg, netsnmp.client.Varbind):
var_list.append(arg)
else:
var_list.append(Varbind(arg))
res = sess.get(var_list)
return res
def snmpset(*args, **kargs):
sess = Session(**kargs)
var_list = VarList()
for arg in args:
if isinstance(arg, netsnmp.client.Varbind):
var_list.append(arg)
else:
var_list.append(Varbind(arg))
res = sess.set(var_list)
return res
def snmpgetnext(*args, **kargs):
sess = Session(**kargs)
var_list = VarList()
for arg in args:
if isinstance(arg, netsnmp.client.Varbind):
var_list.append(arg)
else:
var_list.append(Varbind(arg))
res = sess.getnext(var_list)
return res
def snmpgetbulk(nonrepeaters, maxrepetitions,*args, **kargs):
sess = Session(**kargs)
var_list = VarList()
for arg in args:
if isinstance(arg, netsnmp.client.Varbind):
var_list.append(arg)
else:
var_list.append(Varbind(arg))
res = sess.getbulk(nonrepeaters, maxrepetitions, var_list)
return res
def snmpwalk(*args, **kargs):
sess = Session(**kargs)
if isinstance(args[0], netsnmp.client.VarList):
var_list = args[0]
else:
var_list = VarList()
for arg in args:
if isinstance(arg, netsnmp.client.Varbind):
var_list.append(arg)
else:
var_list.append(Varbind(arg))
res = sess.walk(var_list)
return res
| 28.055762
| 78
| 0.543395
|
794aeb575b765228142bcdec088214db472bf427
| 2,311
|
py
|
Python
|
venv/Lib/site-packages/win32comext/shell/demos/viewstate.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 150
|
2021-11-02T05:31:51.000Z
|
2022-03-24T06:22:22.000Z
|
venv/Lib/site-packages/win32comext/shell/demos/viewstate.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 4
|
2021-12-01T11:55:58.000Z
|
2022-02-24T16:14:37.000Z
|
venv/Lib/site-packages/win32comext/shell/demos/viewstate.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 33
|
2021-11-03T00:29:41.000Z
|
2022-03-15T13:15:56.000Z
|
"""
Demonstrates how to propagate a folder's view state to all its subfolders
The format of the ColInfo stream is apparently undocumented, but
it can be read raw from one folder and copied to another's view state.
"""
from win32com.shell import shell, shellcon
import pythoncom
import os, sys
template_folder = os.path.split(sys.executable)[0]
print("Template folder:", template_folder)
template_pidl = shell.SHILCreateFromPath(template_folder, 0)[0]
template_pb = shell.SHGetViewStatePropertyBag(
template_pidl,
"Shell",
shellcon.SHGVSPB_FOLDERNODEFAULTS,
pythoncom.IID_IPropertyBag,
)
# Column info has to be read as a stream
# This may blow up if folder has never been opened in Explorer and has no ColInfo yet
template_iunk = template_pb.Read("ColInfo", pythoncom.VT_UNKNOWN)
template_stream = template_iunk.QueryInterface(pythoncom.IID_IStream)
streamsize = template_stream.Stat()[2]
template_colinfo = template_stream.Read(streamsize)
def update_colinfo(not_used, dir_name, fnames):
for fname in fnames:
full_fname = os.path.join(dir_name, fname)
if os.path.isdir(full_fname):
print(full_fname)
pidl = shell.SHILCreateFromPath(full_fname, 0)[0]
pb = shell.SHGetViewStatePropertyBag(
pidl,
"Shell",
shellcon.SHGVSPB_FOLDERNODEFAULTS,
pythoncom.IID_IPropertyBag,
)
## not all folders already have column info, and we're replacing it anyway
pb.Write("ColInfo", template_stream)
iunk = pb.Read("ColInfo", pythoncom.VT_UNKNOWN)
s = iunk.QueryInterface(pythoncom.IID_IStream)
s.Write(template_colinfo)
s = None
## attribute names read from registry, can't find any way to enumerate IPropertyBag
for attr in (
"Address",
"Buttons",
"Col",
"Vid",
"WFlags",
"FFlags",
"Sort",
"SortDir",
"ShowCmd",
"FolderType",
"Mode",
"Rev",
):
pb.Write(attr, template_pb.Read(attr))
pb = None
os.path.walk(template_folder, update_colinfo, None)
| 34.492537
| 95
| 0.621809
|
794aeb8d7f0ab1256b43c35ea4fb64c0820c38ee
| 1,940
|
py
|
Python
|
nowcasting_dataset/square.py
|
lenassero/nowcasting_dataset
|
deaf098c4d318f3ef532bac73f9cc4fa2858479b
|
[
"MIT"
] | null | null | null |
nowcasting_dataset/square.py
|
lenassero/nowcasting_dataset
|
deaf098c4d318f3ef532bac73f9cc4fa2858479b
|
[
"MIT"
] | null | null | null |
nowcasting_dataset/square.py
|
lenassero/nowcasting_dataset
|
deaf098c4d318f3ef532bac73f9cc4fa2858479b
|
[
"MIT"
] | null | null | null |
""" Square objects """
from numbers import Number
from typing import NamedTuple, Union
from nowcasting_dataset.consts import Array
class BoundingBox(NamedTuple):
"""Bounding box tuple"""
top: Union[Number, float]
bottom: Union[Number, float]
left: Union[Number, float]
right: Union[Number, float]
class Square:
"""Class for computing bounding box for satellite imagery."""
def __init__(self, size_pixels: int, meters_per_pixel: Number):
"""
Init
Args:
size_pixels: number of pixels
meters_per_pixel: how many meters for each pixel
"""
self.size_pixels = size_pixels
size_meters = size_pixels * meters_per_pixel
self._half_size_meters = size_meters / 2
def bounding_box_centered_on(
self, x_meters_center: Number, y_meters_center: Number
) -> BoundingBox:
"""
Get bounding box from a centre
Args:
x_meters_center: x center of the bounding box
y_meters_center: y center of the bounding box
Returns: Bounding box
"""
return BoundingBox(
top=y_meters_center + self._half_size_meters,
bottom=y_meters_center - self._half_size_meters,
left=x_meters_center - self._half_size_meters,
right=x_meters_center + self._half_size_meters,
)
def get_bounding_box_mask(bounding_box: BoundingBox, x: Array, y: Array) -> Array:
"""
Get boundary box mask from x and y locations. I.e are the x,y coords in the boundaring box
Args:
bounding_box: Bounding box
x: x coordinates
y: y coordinates
Returns: list of booleans if the x and y coordinates are in the bounding box
"""
mask = (
(x >= bounding_box.left)
& (x <= bounding_box.right)
& (y >= bounding_box.bottom)
& (y <= bounding_box.top)
)
return mask
| 26.944444
| 94
| 0.63299
|
794aeba8187a07b167c1891ce4e55acf97fbc78e
| 2,812
|
py
|
Python
|
test/helpers.py
|
DalavanCloud/hpc-container-maker
|
555093c0a5c98bd2b0114831b8c676c0c3c50dd7
|
[
"Apache-2.0"
] | 1
|
2019-02-25T22:54:31.000Z
|
2019-02-25T22:54:31.000Z
|
test/helpers.py
|
DalavanCloud/hpc-container-maker
|
555093c0a5c98bd2b0114831b8c676c0c3c50dd7
|
[
"Apache-2.0"
] | null | null | null |
test/helpers.py
|
DalavanCloud/hpc-container-maker
|
555093c0a5c98bd2b0114831b8c676c0c3c50dd7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Unit testing helpers"""
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type, linux_distro
def centos(function):
"""Decorator to set the Linux distribution to CentOS 7"""
def wrapper(*args, **kwargs):
hpccm.config.g_linux_distro = linux_distro.CENTOS
hpccm.config.g_linux_version = StrictVersion('7.0')
return function(*args, **kwargs)
return wrapper
def docker(function):
"""Decorator to set the global container type to docker"""
def wrapper(*args, **kwargs):
hpccm.config.g_ctype = container_type.DOCKER
return function(*args, **kwargs)
return wrapper
def invalid_ctype(function):
"""Decorator to set the global container type to an invalid value"""
def wrapper(*args, **kwargs):
hpccm.config.g_ctype = None
return function(*args, **kwargs)
return wrapper
def invalid_distro(function):
"""Decorator to set the global Linux distribution to an invalid value"""
def wrapper(*args, **kwargs):
hpccm.config.g_linux_distro = None
return function(*args, **kwargs)
return wrapper
def singularity(function):
"""Decorator to set the global container type to singularity"""
def wrapper(*args, **kwargs):
hpccm.config.g_ctype = container_type.SINGULARITY
return function(*args, **kwargs)
return wrapper
def ubuntu(function):
"""Decorator to set the Linux distribution to Ubuntu 16.04"""
def wrapper(*args, **kwargs):
hpccm.config.g_linux_distro = linux_distro.UBUNTU
hpccm.config.g_linux_version = StrictVersion('16.04')
return function(*args, **kwargs)
return wrapper
def ubuntu18(function):
"""Decorator to set the Linux distribution to Ubuntu 18.04"""
def wrapper(*args, **kwargs):
hpccm.config.g_linux_distro = linux_distro.UBUNTU
hpccm.config.g_linux_version = StrictVersion('18.04')
return function(*args, **kwargs)
return wrapper
| 32.321839
| 76
| 0.711238
|
794aec20bcb82f4b0a81082338dbfaee65785904
| 7,477
|
py
|
Python
|
infiltrate/models/deck.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | 4
|
2019-04-08T09:30:10.000Z
|
2020-09-15T19:25:30.000Z
|
infiltrate/models/deck.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | 19
|
2019-04-09T19:02:14.000Z
|
2020-12-25T05:22:45.000Z
|
infiltrate/models/deck.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | null | null | null |
"""The Deck model and related utilities"""
import enum
import logging
import typing as t
import urllib.error
from datetime import datetime
import tqdm
import infiltrate.browsers as browsers
import infiltrate.global_data as global_data
import infiltrate.models.card as card
# todo replace application with config injection
from infiltrate import application, db
class DeckHasCard(db.Model):
"""A table showing how many copies of a card a deck has."""
deck_id = db.Column(
"deck_id", db.String(length=100), db.ForeignKey("decks.id"), primary_key=True
)
set_num = db.Column("set_num", db.Integer, primary_key=True)
card_num = db.Column("card_num", db.Integer, primary_key=True)
num_played = db.Column("num_played", db.Integer, nullable=False)
__table_args__ = (
db.ForeignKeyConstraint(
[set_num, card_num], [card.Card.set_num, card.Card.card_num]
),
{},
)
def to_card_id(self) -> card.CardId:
return card.CardId(set_num=self.set_num, card_num=self.card_num)
class DeckType(enum.Enum):
"""Enum for deck types matching Warcry"""
unknown = 0
standard = 1
throne = 1
draft = 2
gauntlet = 3
forge = 4
campaign = 5
event = 6
_ = 7
expedition = 8
other = 9
class Archetype(enum.Enum):
"""Enum for deck archetypes matching Warcry"""
unknown = 0
aggro = 1
midrange = 2
combo = 3
control = 4
tempo = 5
aggro_control = 6
aggro_combo = 7
aggro_midrange = 8
control_combo = 9
control_midrange = 10
tempo_combo = 11
tempo_control = 12
combo_midrange = 13
class Deck(db.Model):
"""Model representing an Eternal Deck from Warcry"""
__tablename__ = "decks"
id = db.Column("id", db.String(length=100), primary_key=True)
archetype = db.Column("archetype", db.Enum(Archetype), nullable=True)
date_added = db.Column("date_added", db.DateTime)
date_updated = db.Column("date_updated", db.DateTime)
deck_type = db.Column("deck_type", db.Enum(DeckType))
description = db.Column("description", db.Text, nullable=True)
patch = db.Column("patch", db.String(length=10))
username = db.Column("username", db.String(length=30))
views = db.Column("views", db.Integer)
rating = db.Column("rating", db.Integer)
cards = db.relationship("DeckHasCard")
@classmethod
def get_from_id(cls, deck_id: str):
"""Gets the deck matching the deck id."""
return Deck.query.filter_by(id=deck_id).first()
# noinspection PyMissingOrEmptyDocstring
class _WarcryNewIdGetter:
ITEMS_PER_PAGE = 50
def get_new_ids(self, max_decks=None):
if max_decks is not None:
max_pages = max_decks / self.ITEMS_PER_PAGE
else:
max_pages = None
logging.info("Getting new deck ids")
new_ids = []
page = 0
while True:
ids_on_page = self.get_ids_from_page(page)
new_ids_on_page = self.remove_old_ids(ids_on_page)
new_ids += new_ids_on_page
if not new_ids_on_page or max_pages is not None and page >= max_pages:
# todo this may need testing.
break
page += 1
logging.info(f"Pages of deck ids ready: {page}")
return new_ids
def get_ids_from_page(self, page: int):
url = (
"https://api.eternalwarcry.com/v1/decks/SearchDecks"
+ f"?starting={self.ITEMS_PER_PAGE * page}"
+ f"&perpage={self.ITEMS_PER_PAGE}"
+ f"&key={application.config['WARCRY_KEY']}"
)
page_json = browsers.get_json_from_url(url)
ids = self.get_ids_from_page_json(page_json)
return ids
@staticmethod
def get_ids_from_page_json(page_json: t.Dict):
decks = page_json["decks"]
ids = [deck["deck_id"] for deck in decks]
return ids
@staticmethod
def remove_old_ids(ids: t.List[str]) -> t.List[str]:
new_ids = []
for deck_id in ids:
if not Deck.get_from_id(deck_id):
new_ids.append(deck_id)
else:
break
return new_ids
def get_new_warcry_ids(max_decks=1_000):
"""Return all Warcry deck IDs newer than any in the database."""
id_getter = _WarcryNewIdGetter()
ids = id_getter.get_new_ids(max_decks=max_decks)
return ids
def update_decks():
"""Updates the database with all new Warcry decks"""
# noinspection PyMissingOrEmptyDocstring
class _WarcyDeckUpdater:
def run(self):
ids = get_new_warcry_ids(1_000)
for deck_id in tqdm.tqdm(ids, desc="Updating decks"):
self.update_deck(deck_id)
def update_deck(self, deck_id: str):
url = (
"https://api.eternalwarcry.com/v1/decks/details"
+ f"?key={application.config['WARCRY_KEY']}"
+ f"&deck_id={deck_id}"
)
try:
page_json = browsers.get_json_from_url(url)
except (ConnectionError, urllib.error.HTTPError):
return
self.make_deck_from_details_json(page_json)
def make_deck_from_details_json(self, page_json: t.Dict):
archetype = Archetype[page_json["archetype"].lower().replace(" ", "_")]
try:
deck_type = DeckType.__dict__[
page_json["deck_type"].lower().replace(" ", "_")
]
except KeyError: # not sure this is the right exception
deck_type = DeckType(int(page_json["deck_type"]))
deck = Deck(
id=page_json["deck_id"],
archetype=archetype,
date_added=datetime.strptime(
page_json["date_added_full"][:19], "%Y-%m-%dT%H:%M:%S"
),
date_updated=datetime.strptime(
page_json["date_updated_full"][:19], "%Y-%m-%dT%H:%M:%S"
),
deck_type=deck_type,
description=page_json["description"].encode("ascii", errors="ignore"),
patch=page_json["patch"],
username=page_json["username"],
views=page_json["views"],
rating=page_json["rating"],
)
self.add_cards_to_deck(deck, page_json)
db.session.merge(deck)
db.session.commit()
@staticmethod
def add_cards_to_deck(deck: Deck, page_json: t.Dict):
cards_json = (
page_json["deck_cards"]
+ page_json["sideboard_cards"]
+ page_json["market_cards"]
)
for card_json in cards_json:
set_num = card_json["set_number"]
card_num = card_json["eternal_id"]
card_id = card.CardId(set_num, card_num)
# todo better to pass all_cards to this than use the global
if global_data.all_cards.card_exists(card_id):
deck_has_card = DeckHasCard(
deck_id=page_json["deck_id"],
set_num=set_num,
card_num=card_num,
num_played=card_json["count"],
)
deck.cards.append(deck_has_card)
logging.info("Updating decks")
updater = _WarcyDeckUpdater()
updater.run()
| 31.284519
| 86
| 0.589408
|
794aec21d21c068ce8a50d5b32a032b7b1b4ebf1
| 874
|
py
|
Python
|
libralli/circcuitpython/adafruit-circuitpython-bundle-7.x-mpy-20211225/examples/tmp117_limits_test.py
|
Yarik9008/SoftAcademic
|
118c9dc4620ca444c1557edd141a838820577202
|
[
"MIT"
] | 1
|
2021-04-24T05:25:43.000Z
|
2021-04-24T05:25:43.000Z
|
libralli/circcuitpython/adafruit-circuitpython-bundle-7.x-mpy-20211225/examples/tmp117_limits_test.py
|
Yarik9008/SoftAcademic
|
118c9dc4620ca444c1557edd141a838820577202
|
[
"MIT"
] | 4
|
2021-01-07T17:25:13.000Z
|
2021-12-14T20:23:00.000Z
|
libralli/circcuitpython/adafruit-circuitpython-bundle-7.x-mpy-20211225/examples/tmp117_limits_test.py
|
Yarik9008/SoftAcademic
|
118c9dc4620ca444c1557edd141a838820577202
|
[
"MIT"
] | 6
|
2021-01-07T07:18:13.000Z
|
2021-11-20T06:23:14.000Z
|
# SPDX-FileCopyrightText: 2020 Bryan Siepert, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
import time
import board
from adafruit_tmp117 import TMP117, AlertMode
i2c = board.I2C() # uses board.SCL and board.SDA
tmp117 = TMP117(i2c)
tmp117.high_limit = 25
tmp117.low_limit = 10
print("\nHigh limit", tmp117.high_limit)
print("Low limit", tmp117.low_limit)
# Try changing `alert_mode` to see how it modifies the behavior of the alerts.
# tmp117.alert_mode = AlertMode.WINDOW #default
# tmp117.alert_mode = AlertMode.HYSTERESIS
print("Alert mode:", AlertMode.string[tmp117.alert_mode])
print("\n\n")
while True:
print("Temperature: %.2f degrees C" % tmp117.temperature)
alert_status = tmp117.alert_status
print("High alert:", alert_status.high_alert)
print("Low alert:", alert_status.low_alert)
print("")
time.sleep(1)
| 28.193548
| 79
| 0.745995
|
794aec60e5f189a7da5a8f7df61d20197417bf21
| 108,580
|
py
|
Python
|
databricks/koalas/tests/test_groupby.py
|
AishwaryaKalloli/koalas
|
8d35a74508c1319996c8c27e2a5e24af52b9ee31
|
[
"Apache-2.0"
] | null | null | null |
databricks/koalas/tests/test_groupby.py
|
AishwaryaKalloli/koalas
|
8d35a74508c1319996c8c27e2a5e24af52b9ee31
|
[
"Apache-2.0"
] | null | null | null |
databricks/koalas/tests/test_groupby.py
|
AishwaryaKalloli/koalas
|
8d35a74508c1319996c8c27e2a5e24af52b9ee31
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from databricks import koalas as ks
from databricks.koalas.config import option_context
from databricks.koalas.exceptions import PandasNotImplementedError, DataError
from databricks.koalas.missing.groupby import (
MissingPandasLikeDataFrameGroupBy,
MissingPandasLikeSeriesGroupBy,
)
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
from databricks.koalas.groupby import is_multi_agg_with_relabel
class GroupByTest(ReusedSQLTestCase, TestUtils):
def test_groupby_simple(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ks.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("a").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index).sum()),
sort(pdf.groupby("a", as_index=as_index).sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index).b.sum()),
sort(pdf.groupby("a", as_index=as_index).b.sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)["b"].sum()),
sort(pdf.groupby("a", as_index=as_index)["b"].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
sort(pdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)[[]].sum()),
sort(pdf.groupby("a", as_index=as_index)[[]].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)["c"].sum()),
sort(pdf.groupby("a", as_index=as_index)["c"].sum()),
)
self.assert_eq(kdf.groupby("a").a.sum().sort_index(), pdf.groupby("a").a.sum().sort_index())
self.assert_eq(
kdf.groupby("a")["a"].sum().sort_index(), pdf.groupby("a")["a"].sum().sort_index()
)
self.assert_eq(
kdf.groupby("a")[["a"]].sum().sort_index(), pdf.groupby("a")[["a"]].sum().sort_index()
)
self.assert_eq(
kdf.groupby("a")[["a", "c"]].sum().sort_index(),
pdf.groupby("a")[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b).sum().sort_index(), pdf.a.groupby(pdf.b).sum().sort_index()
)
for axis in [0, "index"]:
self.assert_eq(
kdf.groupby("a", axis=axis).a.sum().sort_index(),
pdf.groupby("a", axis=axis).a.sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)["a"].sum().sort_index(),
pdf.groupby("a", axis=axis)["a"].sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b, axis=axis).sum().sort_index(),
pdf.a.groupby(pdf.b, axis=axis).sum().sort_index(),
)
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False).a)
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)["a"])
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)[["a"]])
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: kdf.groupby("z", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: kdf.groupby(["z"], as_index=False)[["a", "c"]])
self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.b, as_index=False))
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a", axis=1))
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a", axis="columns"))
self.assertRaises(ValueError, lambda: kdf.groupby("a", "b"))
self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.a, kdf.b))
# we can't use column name/names as a parameter `by` for `SeriesGroupBy`.
self.assertRaises(KeyError, lambda: kdf.a.groupby(by="a"))
self.assertRaises(KeyError, lambda: kdf.a.groupby(by=["a", "b"]))
self.assertRaises(KeyError, lambda: kdf.a.groupby(by=("a", "b")))
# we can't use DataFrame as a parameter `by` for `DataFrameGroupBy`/`SeriesGroupBy`.
self.assertRaises(ValueError, lambda: kdf.groupby(kdf))
self.assertRaises(ValueError, lambda: kdf.a.groupby(kdf))
self.assertRaises(ValueError, lambda: kdf.a.groupby((kdf,)))
# non-string names
pdf = pd.DataFrame(
{
10: [1, 2, 6, 4, 4, 6, 4, 3, 7],
20: [4, 2, 7, 3, 3, 1, 1, 1, 2],
30: [4, 2, 7, 3, None, 1, 1, 1, 2],
40: list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ks.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(10).reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index).sum()),
sort(pdf.groupby(10, as_index=as_index).sum()),
)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index)[20].sum()),
sort(pdf.groupby(10, as_index=as_index)[20].sum()),
)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
sort(pdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
)
def test_groupby_multiindex_columns(self):
pdf = pd.DataFrame(
{
(10, "a"): [1, 2, 6, 4, 4, 6, 4, 3, 7],
(10, "b"): [4, 2, 7, 3, 3, 1, 1, 1, 2],
(20, "c"): [4, 2, 7, 3, None, 1, 1, 1, 2],
(30, "d"): list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby((10, "a")).sum().sort_index(), pdf.groupby((10, "a")).sum().sort_index()
)
self.assert_eq(
kdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
pdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
)
self.assert_eq(
kdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
pdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
)
# TODO: a pandas bug?
# expected = pdf.groupby((10, "a"))[(20, "c")].sum().sort_index()
expected = pd.Series(
[4.0, 2.0, 1.0, 4.0, 8.0, 2.0],
name=(20, "c"),
index=pd.Index([1, 2, 3, 4, 6, 7], name=(10, "a")),
)
self.assert_eq(kdf.groupby((10, "a"))[(20, "c")].sum().sort_index(), expected)
if LooseVersion(pd.__version__) < LooseVersion("1.1.3"):
self.assert_eq(
kdf[(20, "c")].groupby(kdf[(10, "a")]).sum().sort_index(),
pdf[(20, "c")].groupby(pdf[(10, "a")]).sum().sort_index(),
)
else:
# seems like a pandas bug introduced in pandas 1.1.3.
self.assert_eq(kdf[(20, "c")].groupby(kdf[(10, "a")]).sum().sort_index(), expected)
def test_split_apply_combine_on_series(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ks.from_pandas(pdf)
funcs = [
((True, False), ["sum", "min", "max", "count", "first", "last"]),
((True, True), ["mean"]),
((False, False), ["var", "std"]),
]
funcs = [(check_exact, almost, f) for (check_exact, almost), fs in funcs for f in fs]
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for check_exact, almost, func in funcs:
for kkey, pkey in [("b", "b"), (kdf.b, pdf.b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
if as_index is True or func != "std":
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
else:
# seems like a pandas' bug for as_index=False and func == "std"?
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(pdf.groupby(pkey, as_index=True).a.std().reset_index()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(pdf.groupby(pkey, as_index=True).std().reset_index()),
check_exact=check_exact,
almost=almost,
)
for kkey, pkey in [(kdf.b + 1, pdf.b + 1), (kdf.copy().b, pdf.copy().b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for i in [0, 4, 7]:
with self.subTest(as_index=as_index, func=func, i=i):
self.assert_eq(
sort(getattr(kdf.groupby(kdf.b > i, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kdf.b > i, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for kkey, pkey in [
(kdf.b, pdf.b),
(kdf.b + 1, pdf.b + 1),
(kdf.copy().b, pdf.copy().b),
(kdf.b.rename(), pdf.b.rename()),
]:
with self.subTest(func=func, key=pkey):
self.assert_eq(
getattr(kdf.a.groupby(kkey), func)().sort_index(),
getattr(pdf.a.groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((kdf.a + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.a + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((kdf.b + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.b + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr(kdf.a.rename().groupby(kkey), func)().sort_index(),
getattr(pdf.a.rename().groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
def test_aggregate(self):
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
kdf = ks.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for kkey, pkey in [("A", "A"), (kdf.A, pdf.A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
kdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
if as_index:
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
else:
# seems like a pandas' bug for as_index=False and func_or_funcs is list?
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=True).agg(["sum"]).reset_index()),
)
for kkey, pkey in [(kdf.A + 1, pdf.A + 1), (kdf.copy().A, pdf.copy().A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
kdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
expected_error_message = (
r"aggs must be a dict mapping from column name to aggregate functions "
r"\(string or list of strings\)."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kdf.groupby("A", as_index=as_index).agg(0)
# multi-index columns
columns = pd.MultiIndex.from_tuples([(10, "A"), (10, "B"), (20, "C")])
pdf.columns = columns
kdf.columns = columns
for as_index in [True, False]:
stats_kdf = kdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
stats_pdf = pdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
self.assert_eq(
stats_kdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
stats_pdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
)
stats_kdf = kdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
stats_pdf = pdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
self.assert_eq(
stats_kdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
stats_pdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
)
# non-string names
pdf.columns = [10, 20, 30]
kdf.columns = [10, 20, 30]
for as_index in [True, False]:
stats_kdf = kdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
stats_pdf = pdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
self.assert_eq(
stats_kdf.sort_values(by=[20, 30]).reset_index(drop=True),
stats_pdf.sort_values(by=[20, 30]).reset_index(drop=True),
)
stats_kdf = kdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
stats_pdf = pdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
self.assert_eq(
stats_kdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
stats_pdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
)
def test_aggregate_func_str_list(self):
# this is test for cases where only string or list is assigned
pdf = pd.DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
kdf = ks.from_pandas(pdf)
agg_funcs = ["max", "min", ["min", "max"]]
for aggfunc in agg_funcs:
# Since in Koalas groupby, the order of rows might be different
# so sort on index to ensure they have same output
sorted_agg_kdf = kdf.groupby("kind").agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby("kind").agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_kdf, sorted_agg_pdf)
# test on multi index column case
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
kdf = ks.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
kdf.columns = columns
for aggfunc in agg_funcs:
sorted_agg_kdf = kdf.groupby(("X", "A")).agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby(("X", "A")).agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_kdf, sorted_agg_pdf)
@unittest.skipIf(pd.__version__ < "0.25.0", "not supported before pandas 0.25.0")
def test_aggregate_relabel(self):
# this is to test named aggregation in groupby
pdf = pd.DataFrame({"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]})
kdf = ks.from_pandas(pdf)
# different agg column, same function
agg_pdf = pdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
agg_kdf = kdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# same agg column, different functions
agg_pdf = pdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
agg_kdf = kdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# test on NamedAgg
agg_pdf = (
pdf.groupby("group").agg(b_max=pd.NamedAgg(column="B", aggfunc="max")).sort_index()
)
agg_kdf = (
kdf.groupby("group").agg(b_max=ks.NamedAgg(column="B", aggfunc="max")).sort_index()
)
self.assert_eq(agg_kdf, agg_pdf)
# test on NamedAgg multi columns aggregation
agg_pdf = (
pdf.groupby("group")
.agg(
b_max=pd.NamedAgg(column="B", aggfunc="max"),
b_min=pd.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
agg_kdf = (
kdf.groupby("group")
.agg(
b_max=ks.NamedAgg(column="B", aggfunc="max"),
b_min=ks.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
self.assert_eq(agg_kdf, agg_pdf)
def test_dropna(self):
pdf = pd.DataFrame(
{"A": [None, 1, None, 1, 2], "B": [1, 2, 3, None, None], "C": [4, 5, 6, 7, None]}
)
kdf = ks.from_pandas(pdf)
# pd.DataFrame.groupby with dropna parameter is implemented since pandas 1.1.0
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna).std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).std()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
)
self.assert_eq(
sort(
kdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
sort(
pdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
)
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
kdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
sort(
pdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
kdf.columns = columns
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
sorted_stats_kdf = sort(
kdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
sorted_stats_pdf = sort(
pdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
self.assert_eq(sorted_stats_kdf, sorted_stats_pdf)
else:
# Testing dropna=True (pandas default behavior)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=True)["B"].min()),
sort(pdf.groupby("A", as_index=as_index)["B"].min()),
)
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
kdf.groupby(["A", "B"], as_index=as_index, dropna=True).agg(
{"C": ["min", "std"]}
)
),
sort(pdf.groupby(["A", "B"], as_index=as_index).agg({"C": ["min", "std"]})),
almost=True,
)
# Testing dropna=False
index = pd.Index([1.0, 2.0, np.nan], name="A")
expected = pd.Series([2.0, np.nan, 1.0], index=index, name="B")
result = kdf.groupby("A", as_index=True, dropna=False)["B"].min().sort_index()
self.assert_eq(expected, result)
expected = pd.DataFrame({"A": [1.0, 2.0, np.nan], "B": [2.0, np.nan, 1.0]})
result = (
kdf.groupby("A", as_index=False, dropna=False)["B"]
.min()
.sort_values("A")
.reset_index(drop=True)
)
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[(1.0, 2.0), (1.0, None), (2.0, None), (None, 1.0), (None, 3.0)], names=["A", "B"]
)
expected = pd.DataFrame(
{
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
},
index=index,
)
result = (
kdf.groupby(["A", "B"], as_index=True, dropna=False)
.agg({"C": ["min", "std"]})
.sort_index()
)
self.assert_eq(expected, result)
expected = pd.DataFrame(
{
("A", ""): [1.0, 1.0, 2.0, np.nan, np.nan],
("B", ""): [2.0, np.nan, np.nan, 1.0, 3.0],
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
result = (
kdf.groupby(["A", "B"], as_index=False, dropna=False)
.agg({"C": ["min", "std"]})
.sort_values(["A", "B"])
.reset_index(drop=True)
)
self.assert_eq(expected, result)
def test_describe(self):
# support for numeric type, not support for string type yet
datas = []
datas.append({"a": [1, 1, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
datas.append({"a": [-1, -1, -3], "b": [-4, -5, -6], "c": [-7, -8, -9]})
datas.append({"a": [0, 0, 0], "b": [0, 0, 0], "c": [0, 8, 0]})
# it is okay if string type column as a group key
datas.append({"a": ["a", "a", "c"], "b": [4, 5, 6], "c": [7, 8, 9]})
percentiles = [0.25, 0.5, 0.75]
formatted_percentiles = ["25%", "50%", "75%"]
non_percentile_stats = ["count", "mean", "std", "min", "max"]
for data in datas:
pdf = pd.DataFrame(data)
kdf = ks.from_pandas(pdf)
describe_pdf = pdf.groupby("a").describe().sort_index()
describe_kdf = kdf.groupby("a").describe().sort_index()
# since the result of percentile columns are slightly difference from pandas,
# we should check them separately: non-percentile columns & percentile columns
# 1. Check that non-percentile columns are equal.
agg_cols = [col.name for col in kdf.groupby("a")._agg_columns]
self.assert_eq(
describe_kdf.drop(list(product(agg_cols, formatted_percentiles))),
describe_pdf.drop(columns=formatted_percentiles, level=1),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby("a").quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_kdf.drop(list(product(agg_cols, non_percentile_stats))),
quantile_pdf.rename(columns="{:.0%}".format, level=1),
)
# not support for string type yet
datas = []
datas.append({"a": ["a", "a", "c"], "b": ["d", "e", "f"], "c": ["g", "h", "i"]})
datas.append({"a": ["a", "a", "c"], "b": [4, 0, 1], "c": ["g", "h", "i"]})
for data in datas:
pdf = pd.DataFrame(data)
kdf = ks.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a").describe().sort_index())
# multi-index columns
pdf = pd.DataFrame({("x", "a"): [1, 1, 3], ("x", "b"): [4, 5, 6], ("y", "c"): [7, 8, 9]})
kdf = ks.from_pandas(pdf)
describe_pdf = pdf.groupby(("x", "a")).describe().sort_index()
describe_kdf = kdf.groupby(("x", "a")).describe().sort_index()
# 1. Check that non-percentile columns are equal.
agg_column_labels = [col._column_label for col in kdf.groupby(("x", "a"))._agg_columns]
self.assert_eq(
describe_kdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, formatted_percentiles)
]
),
describe_pdf.drop(columns=formatted_percentiles, level=2),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby(("x", "a")).quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_kdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, non_percentile_stats)
]
),
quantile_pdf.rename(columns="{:.0%}".format, level=2),
)
def test_aggregate_relabel_multiindex(self):
pdf = pd.DataFrame({"A": [0, 1, 2, 3], "B": [5, 6, 7, 8], "group": ["a", "a", "b", "b"]})
pdf.columns = pd.MultiIndex.from_tuples([("y", "A"), ("y", "B"), ("x", "group")])
kdf = ks.from_pandas(pdf)
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = pdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
agg_kdf = kdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# same column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_kdf = (
kdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_kdf)
# different column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [6, 8], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_kdf = (
kdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_kdf)
def test_all_any(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
"B": [True, True, True, False, False, False, None, True, None, False],
}
)
kdf = ks.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).all()),
sort(pdf.groupby("A", as_index=as_index).all()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).any()),
sort(pdf.groupby("A", as_index=as_index).any()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).all()).B,
sort(pdf.groupby("A", as_index=as_index).all()).B,
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).any()).B,
sort(pdf.groupby("A", as_index=as_index).any()).B,
)
self.assert_eq(
kdf.B.groupby(kdf.A).all().sort_index(), pdf.B.groupby(pdf.A).all().sort_index()
)
self.assert_eq(
kdf.B.groupby(kdf.A).any().sort_index(), pdf.B.groupby(pdf.A).any().sort_index()
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
kdf.columns = columns
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby(("X", "A"), as_index=as_index).all()),
sort(pdf.groupby(("X", "A"), as_index=as_index).all()),
)
self.assert_eq(
sort(kdf.groupby(("X", "A"), as_index=as_index).any()),
sort(pdf.groupby(("X", "A"), as_index=as_index).any()),
)
def test_raises(self):
kdf = ks.DataFrame(
{"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
# test raises with incorrect key
self.assertRaises(ValueError, lambda: kdf.groupby([]))
self.assertRaises(KeyError, lambda: kdf.groupby("x"))
self.assertRaises(KeyError, lambda: kdf.groupby(["a", "x"]))
self.assertRaises(KeyError, lambda: kdf.groupby("a")["x"])
self.assertRaises(KeyError, lambda: kdf.groupby("a")["b", "x"])
self.assertRaises(KeyError, lambda: kdf.groupby("a")[["b", "x"]])
def test_nunique(self):
pdf = pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("a").agg({"b": "nunique"}).sort_index(),
pdf.groupby("a").agg({"b": "nunique"}).sort_index(),
)
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ks.DataFrame({"b": [2, 2]}, index=pd.Index([0, 1], name="a"))
self.assert_eq(kdf.groupby("a").nunique().sort_index(), expected)
self.assert_eq(
kdf.groupby("a").nunique(dropna=False).sort_index(), expected,
)
else:
self.assert_eq(
kdf.groupby("a").nunique().sort_index(), pdf.groupby("a").nunique().sort_index()
)
self.assert_eq(
kdf.groupby("a").nunique(dropna=False).sort_index(),
pdf.groupby("a").nunique(dropna=False).sort_index(),
)
self.assert_eq(
kdf.groupby("a")["b"].nunique().sort_index(),
pdf.groupby("a")["b"].nunique().sort_index(),
)
self.assert_eq(
kdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
pdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
)
nunique_kdf = kdf.groupby("a", as_index=False).agg({"b": "nunique"})
nunique_pdf = pdf.groupby("a", as_index=False).agg({"b": "nunique"})
self.assert_eq(
nunique_kdf.sort_values(["a", "b"]).reset_index(drop=True),
nunique_pdf.sort_values(["a", "b"]).reset_index(drop=True),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ks.DataFrame({("y", "b"): [2, 2]}, index=pd.Index([0, 1], name=("x", "a")))
self.assert_eq(
kdf.groupby(("x", "a")).nunique().sort_index(), expected,
)
self.assert_eq(
kdf.groupby(("x", "a")).nunique(dropna=False).sort_index(), expected,
)
else:
self.assert_eq(
kdf.groupby(("x", "a")).nunique().sort_index(),
pdf.groupby(("x", "a")).nunique().sort_index(),
)
self.assert_eq(
kdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
pdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
)
def test_unique(self):
for pdf in [
pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
),
pd.DataFrame(
{
"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
"b": ["w", "w", "w", "x", "x", "y", "y", "z", "z", "z"],
}
),
]:
with self.subTest(pdf=pdf):
kdf = ks.from_pandas(pdf)
actual = kdf.groupby("a")["b"].unique().sort_index().to_pandas()
expect = pdf.groupby("a")["b"].unique().sort_index()
self.assert_eq(len(actual), len(expect))
for act, exp in zip(actual, expect):
self.assertTrue(sorted(act) == sorted(exp))
def test_value_counts(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]}, columns=["A", "B"])
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("A")["B"].value_counts().sort_index(),
pdf.groupby("A")["B"].value_counts().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
)
self.assert_eq(
kdf.B.rename().groupby(kdf.A).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A).value_counts().sort_index(),
)
self.assert_eq(
kdf.B.groupby(kdf.A.rename()).value_counts().sort_index(),
pdf.B.groupby(pdf.A.rename()).value_counts().sort_index(),
)
self.assert_eq(
kdf.B.rename().groupby(kdf.A.rename()).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A.rename()).value_counts().sort_index(),
)
def test_size(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]})
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.groupby("A").size().sort_index(), pdf.groupby("A").size().sort_index())
self.assert_eq(
kdf.groupby("A")["B"].size().sort_index(), pdf.groupby("A")["B"].size().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].size().sort_index(), pdf.groupby("A")[["B"]].size().sort_index()
)
self.assert_eq(
kdf.groupby(["A", "B"]).size().sort_index(), pdf.groupby(["A", "B"]).size().sort_index()
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("X", "A")).size().sort_index(), pdf.groupby(("X", "A")).size().sort_index()
)
self.assert_eq(
kdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
pdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
)
def test_diff(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.groupby("b").diff().sort_index(), pdf.groupby("b").diff().sort_index())
self.assert_eq(
kdf.groupby(["a", "b"]).diff().sort_index(), pdf.groupby(["a", "b"]).diff().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])["a"].diff().sort_index(), pdf.groupby(["b"])["a"].diff().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
pdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).diff().sort_index(), pdf.groupby(pdf.b // 5).diff().sort_index()
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].diff().sort_index(),
pdf.groupby(pdf.b // 5)["a"].diff().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).diff().sort_index(), pdf.groupby(("x", "b")).diff().sort_index()
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
)
def test_rank(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.groupby("b").rank().sort_index(), pdf.groupby("b").rank().sort_index())
self.assert_eq(
kdf.groupby(["a", "b"]).rank().sort_index(), pdf.groupby(["a", "b"]).rank().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])["a"].rank().sort_index(), pdf.groupby(["b"])["a"].rank().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
pdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).rank().sort_index(), pdf.groupby(pdf.b // 5).rank().sort_index()
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].rank().sort_index(),
pdf.groupby(pdf.b // 5)["a"].rank().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).rank().sort_index(), pdf.groupby(("x", "b")).rank().sort_index()
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
)
def test_cumcount(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
for ascending in [True, False]:
self.assert_eq(
kdf.groupby("b").cumcount(ascending=ascending).sort_index(),
pdf.groupby("b").cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
pdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby("b").cumcount(ascending=ascending).sum(),
pdf.groupby("b").cumcount(ascending=ascending).sum(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
for ascending in [True, False]:
self.assert_eq(
kdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
pdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
)
def test_cummin(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cummin().sort_index(), pdf.groupby("b").cummin().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cummin().sort_index(),
pdf.groupby(["a", "b"]).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cummin().sort_index(),
pdf.groupby(["b"])["a"].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cummin().sort_index(),
pdf.groupby(pdf.b // 5).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cummin().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cummin().sum().sort_index(),
pdf.groupby("b").cummin().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b).cummin().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cummin().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummin().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummin().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cummin().sort_index(),
pdf.groupby(("x", "b")).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
)
kdf = ks.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cummin())
kdf = ks.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cummin())
def test_cummax(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cummax().sort_index(), pdf.groupby("b").cummax().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cummax().sort_index(),
pdf.groupby(["a", "b"]).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cummax().sort_index(),
pdf.groupby(["b"])["a"].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cummax().sort_index(),
pdf.groupby(pdf.b // 5).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cummax().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cummax().sum().sort_index(),
pdf.groupby("b").cummax().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b).cummax().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cummax().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummax().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummax().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cummax().sort_index(),
pdf.groupby(("x", "b")).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
)
kdf = ks.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cummax())
kdf = ks.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cummax())
def test_cumsum(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cumsum().sort_index(), pdf.groupby("b").cumsum().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumsum().sort_index(),
pdf.groupby(["a", "b"]).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumsum().sort_index(),
pdf.groupby(["b"])["a"].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cumsum().sort_index(),
pdf.groupby(pdf.b // 5).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cumsum().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cumsum().sum().sort_index(),
pdf.groupby("b").cumsum().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b).cumsum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumsum().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumsum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumsum().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cumsum().sort_index(),
pdf.groupby(("x", "b")).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
)
kdf = ks.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cumsum())
kdf = ks.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cumsum())
def test_cumprod(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cumprod().sort_index(),
pdf.groupby("b").cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumprod().sort_index(),
pdf.groupby(["a", "b"]).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumprod().sort_index(),
pdf.groupby(["b"])["a"].cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(kdf.b // 3).cumprod().sort_index(),
pdf.groupby(pdf.b // 3).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby(kdf.b // 3)["a"].cumprod().sort_index(),
pdf.groupby(pdf.b // 3)["a"].cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby("b").cumprod().sum().sort_index(),
pdf.groupby("b").cumprod().sum().sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumprod().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumprod().sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cumprod().sort_index(),
pdf.groupby(("x", "b")).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
almost=True,
)
kdf = ks.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cumprod())
kdf = ks.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cumprod())
def test_nsmallest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
pdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
)
self.assert_eq(
kdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
pdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
)
self.assert_eq(
(kdf.b * 10).groupby(kdf.a).nsmallest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.groupby(kdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nsmallest do not support multi-index now"):
kdf.set_index(["a", "b"]).groupby(["c"])["d"].nsmallest(1)
def test_nlargest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby(["a"])["b"].nlargest(1).sort_values(),
pdf.groupby(["a"])["b"].nlargest(1).sort_values(),
)
self.assert_eq(
kdf.groupby(["a"])["b"].nlargest(2).sort_index(),
pdf.groupby(["a"])["b"].nlargest(2).sort_index(),
)
self.assert_eq(
(kdf.b * 10).groupby(kdf.a).nlargest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.groupby(kdf.a.rename()).nlargest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a.rename()).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nlargest do not support multi-index now"):
kdf.set_index(["a", "b"]).groupby(["c"])["d"].nlargest(1)
def test_fillna(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("A").fillna(0).sort_index(), pdf.groupby("A").fillna(0).sort_index()
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(0).sort_index(),
pdf.groupby("A")["C"].fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(0).sort_index(),
pdf.groupby("A")[["C"]].fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby("A").fillna(method="bfill").sort_index(),
pdf.groupby("A").fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
pdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A").fillna(method="ffill").sort_index(),
pdf.groupby("A").fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
pdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5).fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)["C"].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5).fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)["C"].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.C.rename().groupby(kdf.A).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A).fillna(0).sort_index(),
)
self.assert_eq(
kdf.C.groupby(kdf.A.rename()).fillna(0).sort_index(),
pdf.C.groupby(pdf.A.rename()).fillna(0).sort_index(),
)
self.assert_eq(
kdf.C.rename().groupby(kdf.A.rename()).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A.rename()).fillna(0).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("X", "A")).fillna(0).sort_index(),
pdf.groupby(("X", "A")).fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
)
def test_ffill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
kdf = ks.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby("A").ffill().sort_index(),
pdf.groupby("A").ffill().sort_index().drop("A", 1),
)
self.assert_eq(
kdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
kdf.groupby("A").ffill().sort_index(), pdf.groupby("A").ffill().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].ffill().sort_index(), pdf.groupby("A")["B"].ffill().sort_index()
)
self.assert_eq(kdf.groupby("A")["B"].ffill()[idx[6]], pdf.groupby("A")["B"].ffill()[idx[6]])
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
kdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index(),
)
def test_bfill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
kdf = ks.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby("A").bfill().sort_index(),
pdf.groupby("A").bfill().sort_index().drop("A", 1),
)
self.assert_eq(
kdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
kdf.groupby("A").bfill().sort_index(), pdf.groupby("A").bfill().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].bfill().sort_index(), pdf.groupby("A")["B"].bfill().sort_index(),
)
self.assert_eq(kdf.groupby("A")["B"].bfill()[idx[6]], pdf.groupby("A")["B"].bfill()[idx[6]])
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
kdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index(),
)
@unittest.skipIf(pd.__version__ < "0.24.0", "not supported before pandas 0.24.0")
def test_shift(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 2, 2, 3, 3] * 3,
"b": [1, 1, 2, 2, 3, 4] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.groupby("a").shift().sort_index(), pdf.groupby("a").shift().sort_index())
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(kdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index(),
# pdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index())
self.assert_eq(
kdf.groupby(["b"])["a"].shift().sort_index(),
pdf.groupby(["b"])["a"].shift().sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"])["c"].shift().sort_index(),
pdf.groupby(["a", "b"])["c"].shift().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).shift().sort_index(),
pdf.groupby(pdf.b // 5).shift().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].shift().sort_index(),
pdf.groupby(pdf.b // 5)["a"].shift().sort_index(),
)
# TODO: known pandas' bug when fill_value is not None pandas>=1.0.0
# https://github.com/pandas-dev/pandas/issues/31971#issue-565171762
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
pdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).shift().sort_index(),
pdf.a.rename().groupby(pdf.b).shift().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).shift().sort_index(),
pdf.a.groupby(pdf.b.rename()).shift().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).shift().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).shift().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "a")).shift().sort_index(),
pdf.groupby(("x", "a")).shift().sort_index(),
)
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index(),
# pdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index())
def test_apply(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b").apply(len).sort_index(), pdf.groupby("b").apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),
pdf.groupby("b")["a"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
pdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].apply(len).sort_index(),
pdf.groupby(["b"])["c"].apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].apply(len).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(len).sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
kdf.groupby("b").apply(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
pdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(("x", "b")).apply(len).sort_index(),
pdf.groupby(("x", "b")).apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
)
def test_apply_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply()
def test_apply_negative(self):
def func(_) -> ks.Series[int]:
return pd.Series([1])
with self.assertRaisesRegex(TypeError, "Series as a return type hint at frame groupby"):
ks.range(10).groupby("id").apply(func)
def test_apply_with_new_dataframe(self):
pdf = pd.DataFrame(
{"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5], "car_id": ["A", "A", "A", "B", "B"]}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
kdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
# dataframe with 1000+ records
pdf = pd.DataFrame(
{
"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5] * 300,
"car_id": ["A", "A", "A", "B", "B"] * 300,
}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
kdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
def test_apply_with_new_dataframe_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply_with_new_dataframe()
def test_apply_key_handling(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
with ks.option_context("compute.shortcut_limit", 1):
self.assert_eq(
kdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
def test_apply_with_side_effect(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
kdf = ks.from_pandas(pdf)
acc = ks.utils.default_session().sparkContext.accumulator(0)
def sum_with_acc_frame(x) -> ks.DataFrame[np.float64, np.float64]:
nonlocal acc
acc += 1
return np.sum(x)
actual = kdf.groupby("d").apply(sum_with_acc_frame).sort_index()
actual.columns = ["d", "v"]
self.assert_eq(actual, pdf.groupby("d").apply(sum).sort_index().reset_index(drop=True))
self.assert_eq(acc.value, 2)
def sum_with_acc_series(x) -> np.float64:
nonlocal acc
acc += 1
return np.sum(x)
self.assert_eq(
kdf.groupby("d")["v"].apply(sum_with_acc_series).sort_index(),
pdf.groupby("d")["v"].apply(sum).sort_index().reset_index(drop=True),
)
self.assert_eq(acc.value, 4)
def test_transform(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
)
def test_transform_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_transform()
def test_filter(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
kdf.groupby("b").filter(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
pdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
pdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
)
def test_idxmax(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmax().sort_index(), kdf.groupby(["a"]).idxmax().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
kdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmax().sort_index(),
kdf.groupby(["a"])["b"].idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmax().sort_index(),
kdf.b.rename().groupby(kdf.a).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmax().sort_index(),
kdf.b.groupby(kdf.a.rename()).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmax().sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).idxmax().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmax only support one-level index now"):
kdf.set_index(["a", "b"]).groupby(["c"]).idxmax()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmax().sort_index(),
kdf.groupby(("x", "a")).idxmax().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
kdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
)
def test_idxmin(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
kdf = ks.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmin().sort_index(), kdf.groupby(["a"]).idxmin().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
kdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmin().sort_index(),
kdf.groupby(["a"])["b"].idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmin().sort_index(),
kdf.b.rename().groupby(kdf.a).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmin().sort_index(),
kdf.b.groupby(kdf.a.rename()).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmin().sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).idxmin().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmin only support one-level index now"):
kdf.set_index(["a", "b"]).groupby(["c"]).idxmin()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmin().sort_index(),
kdf.groupby(("x", "a")).idxmin().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
kdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
)
def test_head(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf.groupby("a").head(2).sort_index(), kdf.groupby("a").head(2).sort_index())
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), kdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), kdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), kdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(), kdf.groupby("a")["b"].head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
kdf.groupby("a")["b"].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(2).sort_index(),
kdf.groupby("a")[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(-2).sort_index(),
kdf.groupby("a")[["b"]].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(100000).sort_index(),
kdf.groupby("a")[["b"]].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2).head(2).sort_index(),
kdf.groupby(kdf.a // 2).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)["b"].head(2).sort_index(),
kdf.groupby(kdf.a // 2)["b"].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)[["b"]].head(2).sort_index(),
kdf.groupby(kdf.a // 2)[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).head(2).sort_index(),
kdf.b.rename().groupby(kdf.a).head(2).sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).head(2).sort_index(),
kdf.b.groupby(kdf.a.rename()).head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).head(2).sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).head(2).sort_index(),
)
# multi-index
midx = pd.MultiIndex(
[["x", "y"], ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]],
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
)
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],
},
columns=["a", "b", "c"],
index=midx,
)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf.groupby("a").head(2).sort_index(), kdf.groupby("a").head(2).sort_index())
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), kdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), kdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), kdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(), kdf.groupby("a")["b"].head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
kdf.groupby("a")["b"].head(100000).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).head(2).sort_index(),
kdf.groupby(("x", "a")).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(-2).sort_index(),
kdf.groupby(("x", "a")).head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(100000).sort_index(),
kdf.groupby(("x", "a")).head(100000).sort_index(),
)
def test_missing(self):
kdf = ks.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# DataFrameGroupBy functions
missing_functions = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a"), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.groupby("a"), name)()
# SeriesGroupBy functions
missing_functions = inspect.getmembers(MissingPandasLikeSeriesGroupBy, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.a.groupby(kdf.a), name)()
# DataFrameGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a"), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.groupby("a"), name)
# SeriesGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeSeriesGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.a.groupby(kdf.a), name)
@staticmethod
def test_is_multi_agg_with_relabel():
assert is_multi_agg_with_relabel(a="max") is False
assert is_multi_agg_with_relabel(a_min=("a", "max"), a_max=("a", "min")) is True
def test_get_group(self):
pdf = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
pdf.columns.name = "Koalas"
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby("class").get_group("bird"), pdf.groupby("class").get_group("bird"),
)
self.assert_eq(
kdf.groupby("class")["name"].get_group("mammal"),
pdf.groupby("class")["name"].get_group("mammal"),
)
self.assert_eq(
kdf.groupby("class")[["name"]].get_group("mammal"),
pdf.groupby("class")[["name"]].get_group("mammal"),
)
self.assert_eq(
kdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
pdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
)
self.assert_eq(
(kdf.max_speed + 1).groupby(kdf["class"]).get_group("mammal"),
(pdf.max_speed + 1).groupby(pdf["class"]).get_group("mammal"),
)
self.assert_eq(
kdf.groupby("max_speed").get_group(80.5), pdf.groupby("max_speed").get_group(80.5),
)
self.assertRaises(KeyError, lambda: kdf.groupby("class").get_group("fish"))
self.assertRaises(TypeError, lambda: kdf.groupby("class").get_group(["bird", "mammal"]))
self.assertRaises(KeyError, lambda: kdf.groupby("class")["name"].get_group("fish"))
self.assertRaises(
TypeError, lambda: kdf.groupby("class")["name"].get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: kdf.groupby(["class", "name"]).get_group(("lion", "mammal"))
)
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group(("lion",)))
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group(("mammal",)))
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group("mammal"))
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("A", "name"), ("B", "class"), ("C", "max_speed")])
pdf.columns.names = ["Hello", "Koalas"]
kdf = ks.from_pandas(pdf)
self.assert_eq(
kdf.groupby(("B", "class")).get_group("bird"),
pdf.groupby(("B", "class")).get_group("bird"),
)
self.assert_eq(
kdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
pdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
)
self.assert_eq(
kdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
pdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
pdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
)
self.assert_eq(
(kdf[("C", "max_speed")] + 1).groupby(kdf[("B", "class")]).get_group("mammal"),
(pdf[("C", "max_speed")] + 1).groupby(pdf[("B", "class")]).get_group("mammal"),
)
self.assert_eq(
kdf.groupby(("C", "max_speed")).get_group(80.5),
pdf.groupby(("C", "max_speed")).get_group(80.5),
)
self.assertRaises(KeyError, lambda: kdf.groupby(("B", "class")).get_group("fish"))
self.assertRaises(
TypeError, lambda: kdf.groupby(("B", "class")).get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: kdf.groupby(("B", "class"))[("A", "name")].get_group("fish")
)
self.assertRaises(
KeyError,
lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion", "mammal")),
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion",)),
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal",))
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group("mammal")
)
| 41.585599
| 100
| 0.479085
|
794af019c7a36bc1aedf7b78af40e3210237544e
| 2,558
|
py
|
Python
|
python_modules/dagster/dagster/core/storage/output_manager.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/storage/output_manager.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/storage/output_manager.py
|
ericct/dagster
|
dd2c9f05751e1bae212a30dbc54381167a14f6c5
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod, abstractproperty
from dagster import check
from dagster.core.definitions.definition_config_schema import (
convert_user_facing_definition_config_schema,
)
from dagster.core.definitions.resource import ResourceDefinition
class IOutputManagerDefinition:
@abstractproperty
def output_config_schema(self):
"""The schema for per-output configuration for outputs that are managed by this
manager"""
class OutputManagerDefinition(ResourceDefinition, IOutputManagerDefinition):
"""Definition of an output manager resource.
An OutputManagerDefinition is a :py:class:`ResourceDefinition` whose resource_fn returns an
:py:class:`OutputManager`. OutputManagers are used to handle the outputs of solids.
"""
def __init__(
self,
resource_fn=None,
config_schema=None,
description=None,
output_config_schema=None,
required_resource_keys=None,
version=None,
):
self._output_config_schema = convert_user_facing_definition_config_schema(
output_config_schema
)
super(OutputManagerDefinition, self).__init__(
resource_fn=resource_fn,
config_schema=config_schema,
description=description,
required_resource_keys=required_resource_keys,
version=version,
)
@property
def output_config_schema(self):
return self._output_config_schema
def copy_for_configured(self, name, description, config_schema, _):
check.invariant(name is None, "ResourceDefintions do not have names")
return OutputManagerDefinition(
config_schema=config_schema,
description=description or self.description,
resource_fn=self.resource_fn,
required_resource_keys=self.required_resource_keys,
output_config_schema=self.output_config_schema,
)
class OutputManager(ABC):
"""Base class for user-provided output managers. OutputManagers are used to handle the outputs
of solids.
The easiest way to define an OutputManager is with the :py:function:`output_manager` decorator.
"""
@abstractmethod
def handle_output(self, context, obj):
"""Handles an output produced by a solid. Usually, this means materializing it to persistent
storage.
Args:
context (OutputContext): The context of the step output that produces this object.
obj (Any): The data object to be handled.
"""
| 34.106667
| 100
| 0.702893
|
794af0436dd7ce6ca83a21bb8970cd7e6f2ca1e4
| 19,542
|
py
|
Python
|
boxes/secnotes/psexec.py
|
jasonperhaps/HTB
|
85c2b6ea551b8f20b72dad0f1941278e10be015d
|
[
"MIT"
] | null | null | null |
boxes/secnotes/psexec.py
|
jasonperhaps/HTB
|
85c2b6ea551b8f20b72dad0f1941278e10be015d
|
[
"MIT"
] | null | null | null |
boxes/secnotes/psexec.py
|
jasonperhaps/HTB
|
85c2b6ea551b8f20b72dad0f1941278e10be015d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# PSEXEC like functionality example using RemComSvc (https://github.com/kavika13/RemCom)
#
# Author:
# beto (@agsolino)
#
# Reference for:
# DCE/RPC and SMB.
import sys
import os
import cmd
import logging
from threading import Thread, Lock
import argparse
import random
import string
import time
from impacket.examples import logger
from impacket import version, smb
from impacket.smbconnection import SMBConnection
from impacket.dcerpc.v5 import transport
from impacket.structure import Structure
from impacket.examples import remcomsvc, serviceinstall
class RemComMessage(Structure):
structure = (
('Command','4096s=""'),
('WorkingDir','260s=""'),
('Priority','<L=0x20'),
('ProcessID','<L=0x01'),
('Machine','260s=""'),
('NoWait','<L=0'),
)
class RemComResponse(Structure):
structure = (
('ErrorCode','<L=0'),
('ReturnCode','<L=0'),
)
RemComSTDOUT = "RemCom_stdout"
RemComSTDIN = "RemCom_stdin"
RemComSTDERR = "RemCom_stderr"
lock = Lock()
class PSEXEC:
def __init__(self, command, path, exeFile, copyFile, port=445,
username='', password='', domain='', hashes=None, aesKey=None, doKerberos=False, kdcHost=None, serviceName=None):
self.__username = username
self.__password = password
self.__port = port
self.__command = command
self.__path = path
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__exeFile = exeFile
self.__copyFile = copyFile
self.__doKerberos = doKerberos
self.__kdcHost = kdcHost
self.__serviceName = serviceName
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def run(self, remoteName, remoteHost):
stringbinding = 'ncacn_np:%s[\pipe\svcctl]' % remoteName
logging.debug('StringBinding %s'%stringbinding)
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(self.__port)
rpctransport.setRemoteHost(remoteHost)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransport.set_kerberos(self.__doKerberos, self.__kdcHost)
self.doStuff(rpctransport)
def openPipe(self, s, tid, pipe, accessMask):
pipeReady = False
tries = 50
while pipeReady is False and tries > 0:
try:
s.waitNamedPipe(tid,pipe)
pipeReady = True
except:
tries -= 1
time.sleep(2)
pass
if tries == 0:
raise Exception('Pipe not ready, aborting')
fid = s.openFile(tid,pipe,accessMask, creationOption = 0x40, fileAttributes = 0x80)
return fid
def doStuff(self, rpctransport):
dce = rpctransport.get_dce_rpc()
try:
dce.connect()
except Exception, e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.critical(str(e))
sys.exit(1)
global dialect
dialect = rpctransport.get_smb_connection().getDialect()
try:
unInstalled = False
s = rpctransport.get_smb_connection()
# We don't wanna deal with timeouts from now on.
s.setTimeout(100000)
if self.__exeFile is None:
installService = serviceinstall.ServiceInstall(rpctransport.get_smb_connection(), remcomsvc.RemComSvc(), self.__serviceName)
else:
try:
f = open(self.__exeFile)
except Exception, e:
logging.critical(str(e))
sys.exit(1)
installService = serviceinstall.ServiceInstall(rpctransport.get_smb_connection(), f)
if installService.install() is False:
return
if self.__exeFile is not None:
f.close()
# Check if we need to copy a file for execution
if self.__copyFile is not None:
installService.copy_file(self.__copyFile, installService.getShare(), os.path.basename(self.__copyFile))
# And we change the command to be executed to this filename
self.__command = os.path.basename(self.__copyFile) + ' ' + self.__command
tid = s.connectTree('IPC$')
fid_main = self.openPipe(s,tid,'\RemCom_communicaton',0x12019f)
packet = RemComMessage()
pid = os.getpid()
packet['Machine'] = ''.join([random.choice(string.letters) for _ in range(4)])
if self.__path is not None:
packet['WorkingDir'] = self.__path
packet['Command'] = self.__command
packet['ProcessID'] = pid
s.writeNamedPipe(tid, fid_main, str(packet))
# Here we'll store the command we type so we don't print it back ;)
# ( I know.. globals are nasty :P )
global LastDataSent
LastDataSent = ''
# Create the pipes threads
stdin_pipe = RemoteStdInPipe(rpctransport,
'\%s%s%d' % (RemComSTDIN, packet['Machine'], packet['ProcessID']),
smb.FILE_WRITE_DATA | smb.FILE_APPEND_DATA, installService.getShare())
stdin_pipe.start()
stdout_pipe = RemoteStdOutPipe(rpctransport,
'\%s%s%d' % (RemComSTDOUT, packet['Machine'], packet['ProcessID']),
smb.FILE_READ_DATA)
stdout_pipe.start()
stderr_pipe = RemoteStdErrPipe(rpctransport,
'\%s%s%d' % (RemComSTDERR, packet['Machine'], packet['ProcessID']),
smb.FILE_READ_DATA)
stderr_pipe.start()
# And we stay here till the end
ans = s.readNamedPipe(tid,fid_main,8)
if len(ans):
retCode = RemComResponse(ans)
logging.info("Process %s finished with ErrorCode: %d, ReturnCode: %d" % (
self.__command, retCode['ErrorCode'], retCode['ReturnCode']))
installService.uninstall()
if self.__copyFile is not None:
# We copied a file for execution, let's remove it
s.deleteFile(installService.getShare(), os.path.basename(self.__copyFile))
unInstalled = True
sys.exit(retCode['ErrorCode'])
except SystemExit:
raise
except Exception as e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.debug(str(e))
if unInstalled is False:
installService.uninstall()
if self.__copyFile is not None:
s.deleteFile(installService.getShare(), os.path.basename(self.__copyFile))
sys.stdout.flush()
sys.exit(1)
class Pipes(Thread):
def __init__(self, transport, pipe, permissions, share=None):
Thread.__init__(self)
self.server = 0
self.transport = transport
self.credentials = transport.get_credentials()
self.tid = 0
self.fid = 0
self.share = share
self.port = transport.get_dport()
self.pipe = pipe
self.permissions = permissions
self.daemon = True
def connectPipe(self):
try:
lock.acquire()
global dialect
#self.server = SMBConnection('*SMBSERVER', self.transport.get_smb_connection().getRemoteHost(), sess_port = self.port, preferredDialect = SMB_DIALECT)
self.server = SMBConnection(self.transport.get_smb_connection().getRemoteName(), self.transport.get_smb_connection().getRemoteHost(),
sess_port=self.port, preferredDialect=dialect)
user, passwd, domain, lm, nt, aesKey, TGT, TGS = self.credentials
if self.transport.get_kerberos() is True:
self.server.kerberosLogin(user, passwd, domain, lm, nt, aesKey, kdcHost=self.transport.get_kdcHost(), TGT=TGT, TGS=TGS)
else:
self.server.login(user, passwd, domain, lm, nt)
lock.release()
self.tid = self.server.connectTree('IPC$')
self.server.waitNamedPipe(self.tid, self.pipe)
self.fid = self.server.openFile(self.tid,self.pipe,self.permissions, creationOption = 0x40, fileAttributes = 0x80)
self.server.setTimeout(1000000)
except:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.error("Something wen't wrong connecting the pipes(%s), try again" % self.__class__)
class RemoteStdOutPipe(Pipes):
def __init__(self, transport, pipe, permisssions):
Pipes.__init__(self, transport, pipe, permisssions)
def run(self):
self.connectPipe()
while True:
try:
ans = self.server.readFile(self.tid,self.fid, 0, 1024)
except:
pass
else:
try:
global LastDataSent
if ans != LastDataSent:
sys.stdout.write(ans.decode('cp437'))
sys.stdout.flush()
else:
# Don't echo what I sent, and clear it up
LastDataSent = ''
# Just in case this got out of sync, i'm cleaning it up if there are more than 10 chars,
# it will give false positives tho.. we should find a better way to handle this.
if LastDataSent > 10:
LastDataSent = ''
except:
pass
class RemoteStdErrPipe(Pipes):
def __init__(self, transport, pipe, permisssions):
Pipes.__init__(self, transport, pipe, permisssions)
def run(self):
self.connectPipe()
while True:
try:
ans = self.server.readFile(self.tid,self.fid, 0, 1024)
except:
pass
else:
try:
sys.stderr.write(str(ans))
sys.stderr.flush()
except:
pass
class RemoteShell(cmd.Cmd):
def __init__(self, server, port, credentials, tid, fid, share, transport):
cmd.Cmd.__init__(self, False)
self.prompt = '\x08'
self.server = server
self.transferClient = None
self.tid = tid
self.fid = fid
self.credentials = credentials
self.share = share
self.port = port
self.transport = transport
self.intro = '[!] Press help for extra shell commands'
def connect_transferClient(self):
#self.transferClient = SMBConnection('*SMBSERVER', self.server.getRemoteHost(), sess_port = self.port, preferredDialect = SMB_DIALECT)
self.transferClient = SMBConnection('*SMBSERVER', self.server.getRemoteHost(), sess_port=self.port,
preferredDialect=dialect)
user, passwd, domain, lm, nt, aesKey, TGT, TGS = self.credentials
if self.transport.get_kerberos() is True:
self.transferClient.kerberosLogin(user, passwd, domain, lm, nt, aesKey,
kdcHost=self.transport.get_kdcHost(), TGT=TGT, TGS=TGS)
else:
self.transferClient.login(user, passwd, domain, lm, nt)
def do_help(self, line):
print """
lcd {path} - changes the current local directory to {path}
exit - terminates the server process (and this session)
put {src_file, dst_path} - uploads a local file to the dst_path RELATIVE to the connected share (%s)
get {file} - downloads pathname RELATIVE to the connected share (%s) to the current local dir
! {cmd} - executes a local shell cmd
""" % (self.share, self.share)
self.send_data('\r\n', False)
def do_shell(self, s):
os.system(s)
self.send_data('\r\n')
def do_get(self, src_path):
try:
if self.transferClient is None:
self.connect_transferClient()
import ntpath
filename = ntpath.basename(src_path)
fh = open(filename,'wb')
logging.info("Downloading %s\%s" % (self.share, src_path))
self.transferClient.getFile(self.share, src_path, fh.write)
fh.close()
except Exception, e:
logging.critical(str(e))
pass
self.send_data('\r\n')
def do_put(self, s):
try:
if self.transferClient is None:
self.connect_transferClient()
params = s.split(' ')
if len(params) > 1:
src_path = params[0]
dst_path = params[1]
elif len(params) == 1:
src_path = params[0]
dst_path = '/'
src_file = os.path.basename(src_path)
fh = open(src_path, 'rb')
f = dst_path + '/' + src_file
pathname = string.replace(f,'/','\\')
logging.info("Uploading %s to %s\%s" % (src_file, self.share, dst_path))
self.transferClient.putFile(self.share, pathname.decode(sys.stdin.encoding), fh.read)
fh.close()
except Exception, e:
logging.error(str(e))
pass
self.send_data('\r\n')
def do_lcd(self, s):
if s == '':
print os.getcwd()
else:
os.chdir(s)
self.send_data('\r\n')
def emptyline(self):
self.send_data('\r\n')
return
def default(self, line):
self.send_data(line.decode(sys.stdin.encoding).encode('cp437')+'\r\n')
def send_data(self, data, hideOutput = True):
if hideOutput is True:
global LastDataSent
LastDataSent = data
else:
LastDataSent = ''
self.server.writeFile(self.tid, self.fid, data)
class RemoteStdInPipe(Pipes):
def __init__(self, transport, pipe, permisssions, share=None):
self.shell = None
Pipes.__init__(self, transport, pipe, permisssions, share)
def run(self):
self.connectPipe()
self.shell = RemoteShell(self.server, self.port, self.credentials, self.tid, self.fid, self.share, self.transport)
self.shell.cmdloop()
# Process command-line arguments.
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
print version.BANNER
parser = argparse.ArgumentParser(add_help = True, description = "PSEXEC like functionality example using RemComSvc.")
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
parser.add_argument('command', nargs='*', default = ' ', help='command (or arguments if -c is used) to execute at '
'the target (w/o path) - (default:cmd.exe)')
parser.add_argument('-c', action='store',metavar = "pathname", help='copy the filename for later execution, '
'arguments are passed in the command option')
parser.add_argument('-path', action='store', help='path of the command to execute')
parser.add_argument('-file', action='store', help="alternative RemCom binary (be sure it doesn't require CRT)")
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the '
'ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group = parser.add_argument_group('connection')
group.add_argument('-dc-ip', action='store', metavar="ip address",
help='IP Address of the domain controller. If omitted it will use the domain part (FQDN) specified in '
'the target parameter')
group.add_argument('-target-ip', action='store', metavar="ip address",
help='IP Address of the target machine. If omitted it will use whatever was specified as target. '
'This is useful when target is the NetBIOS name and you cannot resolve it')
group.add_argument('-port', choices=['139', '445'], nargs='?', default='445', metavar="destination port",
help='Destination port to connect to SMB Server')
group.add_argument('-service-name', action='store', metavar="service name", default = '', help='This will be the name of the service')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password, remoteName = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(
options.target).groups('')
#In case the password contains '@'
if '@' in remoteName:
password = password + '@' + remoteName.rpartition('@')[0]
remoteName = remoteName.rpartition('@')[2]
if domain is None:
domain = ''
if options.target_ip is None:
options.target_ip = remoteName
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
command = ' '.join(options.command)
if command == ' ':
command = 'cmd.exe'
executer = PSEXEC(command, options.path, options.file, options.c, int(options.port), username, password, domain, options.hashes,
options.aesKey, options.k, options.dc_ip, options.service_name)
executer.run(remoteName, options.target_ip)
| 39.638945
| 162
| 0.579623
|
794af0b36086db972b14e2b4ada89a0b64e84ce0
| 3,100
|
py
|
Python
|
source/scripts/python/frontend/source/frontend/settings.py
|
Tabzz98/core
|
02ddfe5e0f7ecaa833a8c36dbc059a968479d8ce
|
[
"Apache-2.0"
] | 1
|
2022-02-08T17:56:53.000Z
|
2022-02-08T17:56:53.000Z
|
source/scripts/python/frontend/source/frontend/settings.py
|
Tabzz98/core
|
02ddfe5e0f7ecaa833a8c36dbc059a968479d8ce
|
[
"Apache-2.0"
] | null | null | null |
source/scripts/python/frontend/source/frontend/settings.py
|
Tabzz98/core
|
02ddfe5e0f7ecaa833a8c36dbc059a968479d8ce
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for frontend project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+rr0rprnfxfg_bi3f4mhn9=t)5kw2wzk7ya$0gj0b2jdz+*cz^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'frontend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'frontend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| 25.619835
| 91
| 0.699032
|
794af12c7d480e2d45fcc4cb8504bacffdbc7f2c
| 4,211
|
py
|
Python
|
tests/layers/graph/test_usage.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 801
|
2015-09-23T09:24:47.000Z
|
2022-03-29T19:19:03.000Z
|
tests/layers/graph/test_usage.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 277
|
2015-09-22T19:48:50.000Z
|
2022-03-11T23:25:32.000Z
|
tests/layers/graph/test_usage.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 194
|
2015-09-23T15:03:57.000Z
|
2022-03-31T13:54:46.000Z
|
import numpy as np
from neupy import layers
from neupy.utils import asfloat
from base import BaseTestCase
class UsageTestCase(BaseTestCase):
def test_network_wrong_number_of_input_values(self):
network = layers.join(
layers.Input(2),
layers.Relu(10),
layers.Relu(1),
)
input_value_1 = asfloat(np.random.random((10, 2)))
input_value_2 = asfloat(np.random.random((10, 2)))
with self.assertRaisesRegexp(ValueError, "but 2 inputs was provided"):
network.output(input_value_1, input_value_2)
def test_multi_outputs_propagation(self):
network = layers.join(
layers.Input(4),
layers.parallel(
layers.Linear(2),
layers.Linear(3),
layers.Linear(4),
)
)
x = asfloat(np.random.random((7, 4)))
out1, out2, out3 = self.eval(network.output(x))
self.assertEqual((7, 2), out1.shape)
self.assertEqual((7, 3), out2.shape)
self.assertEqual((7, 4), out3.shape)
def test_multi_inputs_propagation(self):
network = layers.join(
layers.parallel(
layers.Input(10, name='input-1'),
layers.Input(4, name='input-2'),
),
layers.Concatenate(),
)
x1 = asfloat(np.random.random((3, 10)))
x2 = asfloat(np.random.random((3, 4)))
out1 = self.eval(network.output(x1, x2))
out2 = self.eval(network.output({'input-2': x2, 'input-1': x1}))
self.assertEqual((3, 14), out1.shape)
np.testing.assert_array_almost_equal(out1, out2)
def test_different_input_types(self):
input_layer = layers.Input(10, name='input')
network = layers.join(
input_layer,
layers.Sigmoid(5),
layers.Sigmoid(4),
)
x_matrix = asfloat(np.random.random((3, 10)))
out1 = self.eval(network.output(x_matrix))
self.assertEqual((3, 4), out1.shape)
out2 = self.eval(network.output({input_layer: x_matrix}))
np.testing.assert_array_almost_equal(out1, out2)
out3 = self.eval(network.output({'input': x_matrix}))
np.testing.assert_array_almost_equal(out2, out3)
unknown_layer = layers.Input(5, name='unk')
message = "The `unk` layer doesn't appear in the network"
with self.assertRaisesRegexp(ValueError, message):
network.output({unknown_layer: x_matrix})
def test_not_an_input_layer_exception(self):
network = layers.join(
layers.Input(10),
layers.Sigmoid(2, name='sigmoid-2'),
layers.Sigmoid(10),
)
x_test = asfloat(np.ones((7, 5)))
with self.assertRaisesRegexp(ValueError, "is not an input layer"):
network.output({'sigmoid-2': x_test})
def test_if_layer_in_the_graph(self):
network = layers.join(
layers.Input(10),
layers.Relu(2),
)
final_layer = layers.Sigmoid(1)
self.assertNotIn(final_layer, network)
network_2 = layers.join(network, final_layer)
self.assertIn(final_layer, network_2)
def test_graph_length(self):
network = layers.join(
layers.Input(10),
layers.Relu(3),
)
self.assertEqual(2, len(network))
network_2 = layers.join(
network,
layers.parallel(
layers.Relu(1),
layers.Relu(2),
),
)
self.assertEqual(2, len(network))
self.assertEqual(4, len(network_2))
def test_graph_predictions(self):
network = layers.join(
layers.Input(10),
layers.Relu(5),
layers.Relu(3),
)
input = np.random.random((100, 10))
output = network.predict(input, verbose=False)
self.assertEqual(output.shape, (100, 3))
output = network.predict(input, batch_size=10, verbose=False)
self.assertEqual(output.shape, (100, 3))
with self.assertRaisesRegexp(TypeError, "Unknown arguments"):
network.predict(input, batchsize=10)
| 31.425373
| 78
| 0.580622
|
794af1d47040ea5ff6943c00215c0fcc0668353b
| 2,902
|
py
|
Python
|
scripts/cross_f1.py
|
JD-AI-Research-NLP/RoR
|
201a7cc08c8b2911204d0dd966039fe99cce15a4
|
[
"Apache-2.0"
] | 15
|
2021-11-18T10:10:32.000Z
|
2022-03-16T07:58:06.000Z
|
scripts/cross_f1.py
|
JD-AI-Research-NLP/RoR
|
201a7cc08c8b2911204d0dd966039fe99cce15a4
|
[
"Apache-2.0"
] | 2
|
2021-11-30T12:15:49.000Z
|
2022-01-19T09:21:48.000Z
|
scripts/cross_f1.py
|
JD-AI-Research-NLP/RoR
|
201a7cc08c8b2911204d0dd966039fe99cce15a4
|
[
"Apache-2.0"
] | 2
|
2021-12-03T01:27:17.000Z
|
2021-12-22T02:44:16.000Z
|
import json
import numpy as np
import re
import string
from collections import Counter
import argparse
def add_arguments(parser):
parser.add_argument("--regional_answer", help="path to regional answer", required=True)
parser.add_argument("--global_answer", help="path to global answer", required=True)
parser.add_argument("--output_file", help="path to output file", required=True)
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def cross_f1_max(predictions):
cross_f1_max = []
for i in range(len(predictions)):
index = list(range(len(predictions)))
index.pop(i)
cross_f1_max.append(max([f1_score(predictions[i], predictions[j]) for j in index]))
return cross_f1_max
def cross_f1_mean(predictions):
cross_f1_mean = []
for i in range(len(predictions)):
index = list(range(len(predictions)))
index.pop(i)
cross_f1_mean.append(sum([f1_score(predictions[i], predictions[j]) for j in index])/len(index))
return cross_f1_mean
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
with open(args.regional_answer,'r') as f:
regional_answer = json.load(f)
with open(args.global_answer,'r') as f:
global_answer = json.load(f)
cross_answer = {}
delta = 0.1
gamma = 0.8
for (qid, answer),(_, g_answer) in zip(regional_answer.items(),global_answer.items()):
score = [i['score']*gamma for i in answer][:10]
text = [i['text'] for i in answer][:10]
score1 = [i['score']*(1-gamma) for i in g_answer][:10]
text1 = [i['text'] for i in g_answer][:10]
score = score + score1
text = text + text1
cross_f1 = cross_f1_mean(text)
score_list = [delta*i + (1-delta) *j for i,j in zip(score,cross_f1)]
max_idx = np.argmax(score_list)
cross_answer[qid] = text[max_idx]
with open(args.output_file,'w') as f:
json.dump(cross_answer,f)
| 34.963855
| 103
| 0.670917
|
794af2056466bf6f25fe5d53468c520fed4c79eb
| 22,921
|
py
|
Python
|
google/estimators/importance_sampling_ci.py
|
SnowflyLXF/FedDICE
|
a63a3233037e37ae27d6c130f37ffc4b92190d5e
|
[
"Apache-2.0"
] | null | null | null |
google/estimators/importance_sampling_ci.py
|
SnowflyLXF/FedDICE
|
a63a3233037e37ae27d6c130f37ffc4b92190d5e
|
[
"Apache-2.0"
] | null | null | null |
google/estimators/importance_sampling_ci.py
|
SnowflyLXF/FedDICE
|
a63a3233037e37ae27d6c130f37ffc4b92190d5e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats as stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from tf_agents.utils import common as tfagents_common
from typing import Any, Callable, Iterable, Optional, Sequence, Text, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.utils.common as common_lib
class ImportanceSamplingCI(object):
"""Approximate average reward of policy using importance sampling."""
def __init__(self,
dataset_spec,
policy_optimizer,
policy_network,
mode,
ci_method,
delta_tail,
gamma: Union[float, tf.Tensor],
reward_fn: Callable = None,
clipping: Optional[float] = 2000.,
policy_regularizer: float = 0.,
q_network=None,
q_optimizer=None,
target_update_tau: Union[float, tf.Tensor] = 0.01,
target_update_period: int = 1,
num_samples: Optional[int] = None):
"""Initializes the importance sampling estimator.
Args:
dataset_spec: The spec of the dataset that will be given.
policy_optimizer: The optimizer to use for learning policy.
policy_network: The policy NN network.
mode: Importance sampling estimator (e.g., "weighted-step-wise").
ci_method: Method for constructing confidence intervals (e.g., "CH" for
Chernoff-Hoeffding).
delta_tail: Total probability quantile threshold (will be halved in code
for 2-tail)
gamma: The discount factor to use.
reward_fn: A function that takes in an EnvStep and returns the reward for
that step. If not specified, defaults to just EnvStep.reward.
clipping: Threshold for clipping IS factor.
policy_regularizer: float on policy regularizer.
q_network: A function that returns the values for each observation and
action. If specified, the Q-values are learned and used for
doubly-robust estimation.
q_optimizer: TF optimizer for q_network.
target_update_tau: Rate at which to set target network parameters.
target_update_period: Rate at which to set target network parameters.
num_samples: Number of samples to take from policy to estimate average
next state value. If actions are discrete, this defaults to computing
average explicitly. If actions are not discrete, this defaults to using
a single sample.
"""
self._dataset_spec = dataset_spec
self._policy_optimizer = policy_optimizer
self._policy_network = policy_network
if self._policy_network is not None:
self._policy_network.create_variables()
self._mode = mode
self._ci_method = ci_method
self._delta_tail = delta_tail
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._clipping = clipping
self._policy_regularizer = policy_regularizer
self._q_network = q_network
if self._q_network is not None:
self._q_network.create_variables()
self._target_network = self._q_network.copy(name='TargetQNetwork')
self._target_network.create_variables()
self._target_update_tau = target_update_tau
self._target_update_period = target_update_period
self._update_targets = self._get_target_updater(
tau=self._target_update_tau, period=self._target_update_period)
self._q_optimizer = q_optimizer
self._initialize()
self._num_samples = num_samples
self._categorical_action = common_lib.is_categorical_spec(self._dataset_spec.action)
if not self._categorical_action and self._num_samples is None:
self._num_samples = 1
def _get_target_updater(self, tau=1.0, period=1):
def update():
return tfagents_common.soft_variables_update(
self._q_network.variables,
self._target_network.variables,
tau,
tau_non_trainable=1.0)
return tfagents_common.Periodically(update, period, 'update_targets')
def _initialize(self):
tfagents_common.soft_variables_update(
self._q_network.variables, self._target_network.variables, tau=1.0)
def _orthogonal_regularization(self, network):
reg = 0
for layer in network.layers:
if isinstance(layer, tf.keras.layers.Dense):
prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)
reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))
return reg
def _get_q_value(self, env_step):
if self._q_network is None:
return tf.zeros_like(env_step.reward)
return self._q_network((env_step.observation, env_step.action))[0]
def _get_v_value(self, env_step, policy):
return self._get_average_value(self._q_network, env_step, policy)
def _get_target_value(self, env_step, policy):
return self._get_average_value(self._target_network, env_step, policy)
def _get_average_value(self, network, env_step, policy):
if self._q_network is None:
return tf.zeros_like(env_step.reward)
tfagents_step = dataset_lib.convert_to_tfagents_timestep(env_step)
if self._categorical_action and self._num_samples is None:
action_weights = policy.distribution(
tfagents_step).action.probs_parameter()
action_dtype = self._dataset_spec.action.dtype
batch_size = tf.shape(action_weights)[0]
num_actions = tf.shape(action_weights)[-1]
actions = ( # Broadcast actions
tf.ones([batch_size, 1], dtype=action_dtype) *
tf.range(num_actions, dtype=action_dtype)[None, :])
else:
batch_size = tf.shape(env_step.observation)[0]
num_actions = self._num_samples
action_weights = tf.ones([batch_size, num_actions]) / num_actions
actions = tf.stack(
[policy.action(tfagents_step).action for _ in range(num_actions)],
axis=1)
flat_actions = tf.reshape(
actions,
tf.concat([[batch_size * num_actions], tf.shape(actions)[2:]], axis=0))
flat_observations = tf.reshape(
tf.tile(env_step.observation[:, None, ...],
[1, num_actions] + [1] * len(env_step.observation.shape[1:])),
tf.concat([[batch_size * num_actions], tf.shape(env_step.observation)[1:]], axis=0))
flat_values, _ = network((flat_observations, flat_actions))
values = tf.reshape(
flat_values,
tf.concat([[batch_size, num_actions], tf.shape(flat_values)[1:]], axis=0))
return tf.reduce_sum(values * action_weights, axis=1)
def _get_log_prob(self, policy_network, env_step):
# TODO(ofirnachum): env_step.action is shaped [B] but network's action_spec
# is BoundedTensorSpec(shape=[1], ...); which leads network to use a
# MVNDiag distribution here with event_shape=[1]. MVNDiag expects inputs of
# shape [B, 1].
return policy_network(env_step.observation)[0].log_prob(
env_step.action[..., tf.newaxis])
def clip_is_factor(self, is_factor):
return tf.minimum(self._clipping, tf.maximum(-self._clipping, is_factor))
def clip_log_factor(self, log_factor):
return tf.minimum(tf.math.log(self._clipping),
tf.maximum(-tf.math.log(self._clipping), log_factor))
def get_is_weighted_reward_samples(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
episode_limit: Optional[int] = None,
eps: Optional[float] = 1e-8):
"""Get the IS weighted reweard samples."""
episodes, valid_steps = dataset.get_all_episodes(limit=episode_limit)
total_num_steps_per_episode = tf.shape(valid_steps)[1] - 1
num_episodes = tf.shape(valid_steps)[0]
num_samples = num_episodes * total_num_steps_per_episode
init_env_step = tf.nest.map_structure(
lambda t: t[:, 0, ...], episodes)
env_step = tf.nest.map_structure(
lambda t: tf.squeeze(
tf.reshape(t[:, 0:total_num_steps_per_episode, ...],
[num_samples, -1])), episodes)
next_env_step = tf.nest.map_structure(
lambda t: tf.squeeze(
tf.reshape(t[:, 1:1 + total_num_steps_per_episode, ...],
[num_samples, -1])), episodes)
tfagents_env_step = dataset_lib.convert_to_tfagents_timestep(env_step)
gamma_weights = tf.reshape(
tf.pow(self._gamma, tf.cast(env_step.step_num, tf.float32)),
[num_episodes, total_num_steps_per_episode])
rewards = (-self._get_q_value(env_step) +
self._reward_fn(env_step) +
self._gamma * next_env_step.discount *
self._get_v_value(next_env_step, target_policy))
rewards = tf.reshape(rewards, [num_episodes, total_num_steps_per_episode])
init_values = self._get_v_value(init_env_step, target_policy)
init_offset = (1 - self._gamma) * init_values
target_log_probabilities = target_policy.distribution(
tfagents_env_step).action.log_prob(env_step.action)
if tf.rank(target_log_probabilities) > 1:
target_log_probabilities = tf.reduce_sum(target_log_probabilities, -1)
if self._policy_network is not None:
baseline_policy_log_probability = self._get_log_prob(
self._policy_network, env_step)
if tf.rank(baseline_policy_log_probability) > 1:
baseline_policy_log_probability = tf.reduce_sum(
baseline_policy_log_probability, -1)
policy_log_ratios = tf.reshape(
tf.maximum(-1.0 / eps, target_log_probabilities -
baseline_policy_log_probability),
[num_episodes, total_num_steps_per_episode])
else:
policy_log_ratios = tf.reshape(
tf.maximum(-1.0 / eps,
target_log_probabilities - env_step.get_log_probability()),
[num_episodes, total_num_steps_per_episode])
valid_steps_in = valid_steps[:, 0:total_num_steps_per_episode]
mask = tf.cast(
tf.logical_and(valid_steps_in, episodes.discount[:, :-1] > 0.),
tf.float32)
masked_rewards = tf.where(mask > 0, rewards, tf.zeros_like(rewards))
clipped_policy_log_ratios = mask * self.clip_log_factor(policy_log_ratios)
if self._mode in ['trajectory-wise', 'weighted-trajectory-wise']:
trajectory_avg_rewards = tf.reduce_sum(
masked_rewards * gamma_weights, axis=1) / tf.reduce_sum(
gamma_weights, axis=1)
trajectory_log_ratios = tf.reduce_sum(clipped_policy_log_ratios, axis=1)
if self._mode == 'trajectory-wise':
trajectory_avg_rewards *= tf.exp(trajectory_log_ratios)
return init_offset + trajectory_avg_rewards
else:
offset = tf.reduce_max(trajectory_log_ratios)
normalized_clipped_ratios = tf.exp(trajectory_log_ratios - offset)
normalized_clipped_ratios /= tf.maximum(
eps, tf.reduce_mean(normalized_clipped_ratios))
trajectory_avg_rewards *= normalized_clipped_ratios
return init_offset + trajectory_avg_rewards
elif self._mode in ['step-wise', 'weighted-step-wise']:
trajectory_log_ratios = mask * tf.cumsum(policy_log_ratios, axis=1)
if self._mode == 'step-wise':
trajectory_avg_rewards = tf.reduce_sum(
masked_rewards * gamma_weights * tf.exp(trajectory_log_ratios),
axis=1) / tf.reduce_sum(
gamma_weights, axis=1)
return init_offset + trajectory_avg_rewards
else:
# Average over data, for each time step.
offset = tf.reduce_max(trajectory_log_ratios, axis=0) # TODO: Handle mask.
normalized_imp_weights = tf.exp(trajectory_log_ratios - offset)
normalized_imp_weights /= tf.maximum(
eps,
tf.reduce_sum(mask * normalized_imp_weights, axis=0) /
tf.maximum(eps, tf.reduce_sum(mask, axis=0)))[None, :]
trajectory_avg_rewards = tf.reduce_sum(
masked_rewards * gamma_weights * normalized_imp_weights,
axis=1) / tf.reduce_sum(
gamma_weights, axis=1)
return init_offset + trajectory_avg_rewards
else:
ValueError('Estimator is not implemented!')
def estimate_average_reward(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
episode_limit: Optional[int] = None):
is_weighted_reward_samples = self.get_is_weighted_reward_samples(
dataset, target_policy, episode_limit)
return tf.reduce_mean(is_weighted_reward_samples)
def estimate_reward_ci(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
episode_limit: Optional[int] = None,
num_grid: Optional[int] = 100,
eps: Optional[float] = 1e-6,
num_bootstraps: Optional[int] = 10000,
num_bootstrap_samples: Optional[int] = 10000):
"""Estimate the confidence interval of reward."""
is_weighted_reward_samples = self.get_is_weighted_reward_samples(
dataset, target_policy, episode_limit)
episodes, valid_steps = dataset.get_all_episodes(limit=episode_limit)
num_episodes = tf.shape(valid_steps)[0]
max_abs_reward = tf.reduce_max(
tf.where(valid_steps, tf.abs(self._reward_fn(episodes)), 0.))
# mean estimate
center = self.estimate_average_reward(dataset, target_policy)
delta_tail_half = self._delta_tail / 2.0
num_episodes_float = tf.cast(num_episodes, tf.float32)
if self._ci_method == 'CH': # Chernoff-Hoeffding
width = max_abs_reward * tf.math.sqrt(
tf.math.log(1.0 / delta_tail_half) / num_episodes_float)
lb = center - width
ub = center + width
elif self._ci_method == 'BE': # Empirical Bernstein
constant_term = 7 * max_abs_reward * tf.math.log(
2.0 / delta_tail_half) / (3 * (num_episodes_float - 1))
variance_term = tf.reduce_sum(
tf.square(is_weighted_reward_samples - center))
variance_term *= tf.math.log(2.0 / delta_tail_half) / (
num_episodes_float - 1)
width = constant_term + tf.math.sqrt(variance_term) / num_episodes_float
lb = center - width
ub = center + width
elif self._ci_method == 'C-BE': # Clipped empirical Bernstein
# need to learn c
def compute_center_width(c_const):
"""Compute the center and width of CI."""
c_vec = c_const * tf.ones_like(is_weighted_reward_samples)
c_is_weighted_reward_samples = tf.minimum(is_weighted_reward_samples,
c_vec) / c_vec
constant_term = 7 * num_episodes_float * tf.math.log(
2.0 / delta_tail_half) / (3 * (num_episodes_float - 1))
center = tf.reduce_sum(c_is_weighted_reward_samples) / tf.reduce_sum(
1.0 / c_vec)
variance_term = tf.reduce_sum(
tf.square(c_is_weighted_reward_samples - center))
variance_term *= tf.math.log(2.0 / delta_tail_half) / (
num_episodes_float - 1)
width = (constant_term + tf.math.sqrt(variance_term)) / tf.reduce_sum(
1.0 / c_vec)
return center, width
def compute_bdd(c_const):
center, width = compute_center_width(c_const)
return center - width, center + width
def compute_obj(c_const, obj='width'):
center, width = compute_center_width(c_const)
if obj == 'lb':
return center - width
elif obj == 'ub': # minimize ub
return -(center + width)
elif obj == 'width':
return width
elif obj == 'lb_ub':
return -2 * width
else:
ValueError('Objective is not implemented')
c_grid = tf.linspace(eps, max_abs_reward, num_grid)
objs = tf.map_fn(compute_obj, c_grid, dtype=tf.float32)
star_index = tf.argmax(objs)
c_star = tf.gather(c_grid, star_index)
lb, ub = compute_bdd(c_star)
elif self._ci_method == 'TT': # Student-t test
# Two-tailed confidence intervals
t_statistic_quantile = stats.t.ppf(1 - delta_tail_half,
num_episodes_float - 1)
std_term = tf.math.sqrt(
tf.reduce_sum(tf.square(is_weighted_reward_samples - center)) /
(num_episodes_float - 1))
width = t_statistic_quantile * std_term / tf.math.sqrt(num_episodes_float)
lb = center - width
ub = center + width
elif self._ci_method == 'BCa': # Bootstrap
# see references
# https://faculty.washington.edu/heagerty/Courses/b572/public/GregImholte-1.pdf
# http://users.stat.umn.edu/~helwig/notes/bootci-Notes.pdf
gaussian_rv = tfp.distributions.Normal(loc=0, scale=1)
def _compute_bootstrap_lb_ub(reward_samples):
"""Compute Efron's bootstrap lb."""
sample_mean = tf.reduce_mean(reward_samples)
# Step 1, sample with replacement and compute subsampled mean
uniform_log_prob = tf.tile(
tf.expand_dims(tf.zeros(num_episodes), 0), [num_bootstraps, 1])
ind = tf.random.categorical(uniform_log_prob, num_bootstrap_samples)
bootstrap_subsamples = tf.gather(reward_samples, ind)
subsample_means = tf.reduce_mean(bootstrap_subsamples, axis=1)
# Step 2, sort subsample means, compute y, z_0, and a
sorted_subsample_means = tf.sort(
subsample_means, axis=0, direction='ASCENDING')
# bias factor
z_0 = gaussian_rv.quantile(
tf.reduce_sum(
tf.cast(
tf.greater(sample_mean, sorted_subsample_means),
tf.float32)) / float(num_bootstraps))
# y is the leave-one-out, jackknife sample mean
mask_matrix = tf.ones([num_episodes, num_episodes
]) - tf.eye(num_episodes)
leave_one_out_subsample_sums = tf.einsum('j,jk->k', reward_samples,
mask_matrix)
ys = leave_one_out_subsample_sums / (num_episodes_float - 1)
# average of jackknife estimate
y_bar = tf.reduce_mean(ys)
# acceleration factor
d_ys = y_bar - ys
a = tf.reduce_sum(tf.pow(d_ys, 3.0)) / tf.maximum(
eps, 6.0 * tf.pow(tf.reduce_sum(tf.pow(d_ys, 2.0)), 1.5))
# Step 3, compute z_scores for lb and ub
z_score_delta_tail = gaussian_rv.quantile(delta_tail_half)
z_score_1_delta_tail = gaussian_rv.quantile(1.0 - delta_tail_half)
z_lb = z_0 + (z_score_delta_tail + z_0) / tf.maximum(
eps, 1 - a * (z_score_delta_tail + z_0))
z_ub = z_0 + (z_score_1_delta_tail + z_0) / tf.maximum(
eps, 1 - a * (z_score_1_delta_tail + z_0))
# Step 4, compute corresponding quantiles and get bootstrap intervals
lb_index = tf.cast(
tf.maximum(
tf.minimum(
tf.floor(num_bootstraps * gaussian_rv.cdf(z_lb)),
num_bootstraps - 1), 1), tf.int64)
ub_index = tf.cast(
tf.maximum(
tf.minimum(
tf.floor(num_bootstraps * gaussian_rv.cdf(z_ub)),
num_bootstraps - 1), 1), tf.int64)
lb = tf.gather(sorted_subsample_means, lb_index)
ub = tf.gather(sorted_subsample_means, ub_index)
return lb, ub
lb, ub = _compute_bootstrap_lb_ub(is_weighted_reward_samples)
else:
ValueError('Confidence interval is not implemented!')
return [lb, ub]
@tf.function
def train_step(self, experience: dataset_lib.EnvStep,
target_policy: tf_policy.TFPolicy):
"""Performs a single training step based on batch and MLE.
Args:
experience: A batch of transitions. Elements must have shape [batch_size,
2, ...].
target_policy: The policy whose value we want to estimate.
Returns:
The losses and the train op.
"""
env_step = tf.nest.map_structure(lambda t: t[:, 0, ...], experience)
next_env_step = tf.nest.map_structure(lambda t: t[:, 1, ...], experience)
if self._policy_network is not None:
assert self._policy_optimizer is not None
with tf.GradientTape(
watch_accessed_variables=False, persistent=True) as tape:
tape.watch(self._policy_network.variables)
policy_loss = self.compute_policy_loss(env_step)
policy_loss += self._policy_regularizer * self._orthogonal_regularization(
self._policy_network)
policy_grads = tape.gradient(policy_loss, self._policy_network.variables)
policy_grad_op = self._policy_optimizer.apply_gradients(
zip(policy_grads, self._policy_network.variables))
else:
policy_loss = 0.0
if self._q_network is not None:
assert self._q_optimizer is not None
with tf.GradientTape(
watch_accessed_variables=False, persistent=True) as tape:
tape.watch(self._q_network.variables)
q_loss = self.compute_q_loss(env_step, next_env_step, target_policy)
q_grads = tape.gradient(q_loss, self._q_network.variables)
q_grad_op = self._q_optimizer.apply_gradients(
zip(q_grads, self._q_network.variables))
update_op = self._update_targets()
else:
q_loss = 0.0
return (tf.reduce_mean(policy_loss), tf.reduce_mean(q_loss))
def compute_policy_loss(self, env_step):
policy_loss = -tf.reduce_mean(
self._get_log_prob(self._policy_network, env_step))
return policy_loss
def compute_q_loss(self, env_step, next_env_step, target_policy):
q_value = self._get_q_value(env_step)
target_value = tf.stop_gradient(
self._get_target_value(next_env_step, target_policy))
reward = self._reward_fn(env_step)
td_error = (-q_value + reward +
self._gamma * next_env_step.discount * target_value)
return tf.math.square(td_error)
| 42.76306
| 92
| 0.66306
|
794af20f72d7184e76e6ca2f4cf13acda37ebcfe
| 1,811
|
py
|
Python
|
fwtest/comms/grbl_comms.py
|
firmware-testing/fwtest
|
4ab5303ebe94efd224ee21370200dd4f3e1fa997
|
[
"MIT"
] | null | null | null |
fwtest/comms/grbl_comms.py
|
firmware-testing/fwtest
|
4ab5303ebe94efd224ee21370200dd4f3e1fa997
|
[
"MIT"
] | null | null | null |
fwtest/comms/grbl_comms.py
|
firmware-testing/fwtest
|
4ab5303ebe94efd224ee21370200dd4f3e1fa997
|
[
"MIT"
] | null | null | null |
import time
import serial
class GrblComms:
def __init__(self, port: str) -> None:
# This ought to either look at the usb tree, or trigger a re-enumeration
# and watch dmesg... for now, require hardcoding the port.
self.port = port
self.serial = serial.Serial(self.port, timeout=0.1, xonxoff=1, baudrate=115200)
def communicate(self, line: str) -> None:
print("dump")
while self.chat(b''):
pass
# Restore "defaults" for the build
print("reset settings")
self.chat(b"\n$RST=$\n")
self.wait_for_idle(alarm_ok=True)
print("ctrl-x")
self.chat(b"\x18") # Ctrl-X to reset
self.wait_for_idle(alarm_ok=True)
print("unlock")
self.chat(b"$X\n")
self.wait_for_idle()
self.chat(b"$$\n")
print("write")
self.chat(line) # can be multiple lines
print("waiting")
self.wait_for_idle()
def configure(
self, x_steps_per_mm=400, x_axis_max_feed=1000, x_axis_accel=45,
max_jerk=0.5
):
return f"$100={x_steps_per_mm}\n$110={x_axis_max_feed}\n$120={x_axis_accel}\n".encode()
def chat(self, data):
print(">>", data)
self.serial.write(data)
self.serial.flush()
tmp = self.serial.read(1024)
print("<<", tmp)
return tmp
def wait_for_idle(self, alarm_ok=False):
# TODO: check timeout instead
for i in range(100):
data = self.chat(b"?\n")
if data.startswith(b"ok\r\n"):
data = data[len(b"ok\r\n"):]
if data.startswith(b"<Idle"):
break
elif alarm_ok and data.startswith(b"<Alarm"):
break
else:
raise Exception("Timeout")
| 28.746032
| 95
| 0.556599
|
794af36ab2e0b0dc3e9178242c3cf74bf0d467da
| 8,417
|
py
|
Python
|
greenglacier.py
|
TobyAbel/greenglacier
|
602fdd59ddb7d6bbc34d1ca38504c7d3d7bfe404
|
[
"Apache-2.0"
] | null | null | null |
greenglacier.py
|
TobyAbel/greenglacier
|
602fdd59ddb7d6bbc34d1ca38504c7d3d7bfe404
|
[
"Apache-2.0"
] | null | null | null |
greenglacier.py
|
TobyAbel/greenglacier
|
602fdd59ddb7d6bbc34d1ca38504c7d3d7bfe404
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2.7
from __future__ import print_function
import os
import hashlib
import math
import binascii
import gevent
import gevent.pool
import gevent.queue
import gevent.monkey
import pprint
gevent.monkey.patch_socket()
gevent.monkey.patch_ssl()
gevent.monkey.patch_os()
from retrying import retry
# the following helper functions are (temporarily) shamelessly stolen from boto.glacier.utils
_MEGABYTE = 1024 * 1024
DEFAULT_PART_SIZE = 4 * _MEGABYTE
MAXIMUM_NUMBER_OF_PARTS = 10000
# This is in USD and is correct for eu-west-1 at the time of writing
# CHECK THIS FOR YOURSELF!
PRICE_PER_THOUSAND_REQUESTS = 0.055
STORAGE_PRICE_PER_GB_MONTH = 0.004
RETRIEVAL_PRICE_PER_THOUSAND_REQUESTS = 0.055
RETRIEVAL_PRICE_PER_GB = 0.01
def tree_hash(fo):
"""
Given a hash of each 1MB chunk (from chunk_hashes) this will hash
together adjacent hashes until it ends up with one big one. So a
tree of hashes.
"""
hashes = []
hashes.extend(fo)
while len(hashes) > 1:
new_hashes = []
while True:
if len(hashes) > 1:
first = hashes.pop(0)
second = hashes.pop(0)
new_hashes.append(hashlib.sha256(first + second).digest())
elif len(hashes) == 1:
only = hashes.pop(0)
new_hashes.append(only)
else:
break
hashes.extend(new_hashes)
return hashes[0]
def chunk_hashes(bytestring, chunk_size=_MEGABYTE):
chunk_count = int(math.ceil(len(bytestring) / float(chunk_size)))
hashes = []
for i in range(chunk_count):
start = i * chunk_size
end = (i + 1) * chunk_size
hashes.append(hashlib.sha256(bytestring[start:end]).digest())
if not hashes:
return [hashlib.sha256(b'').digest()]
return hashes
def bytes_to_hex(str_as_bytes):
return binascii.hexlify(str_as_bytes)
def minimum_part_size(size_in_bytes, default_part_size=DEFAULT_PART_SIZE):
"""Calculate the minimum part size needed for a multipart upload.
Glacier allows a maximum of 10,000 parts per upload. It also
states that the maximum archive size is 10,000 * 4 GB, which means
the part size can range from 1MB to 4GB (provided it is one 1MB
multiplied by a power of 2).
This function will compute what the minimum part size must be in
order to upload a file of size ``size_in_bytes``.
It will first check if ``default_part_size`` is sufficient for
a part size given the ``size_in_bytes``. If this is not the case,
then the smallest part size than can accomodate a file of size
``size_in_bytes`` will be returned.
If the file size is greater than the maximum allowed archive
size of 10,000 * 4GB, a ``ValueError`` will be raised.
"""
# The default part size (4 MB) will be too small for a very large
# archive, as there is a limit of 10,000 parts in a multipart upload.
# This puts the maximum allowed archive size with the default part size
# at 40,000 MB. We need to do a sanity check on the part size, and find
# one that works if the default is too small.
part_size = _MEGABYTE
if (default_part_size * MAXIMUM_NUMBER_OF_PARTS) < size_in_bytes:
if size_in_bytes > (4096 * _MEGABYTE * 10000):
raise ValueError("File size too large: %s" % size_in_bytes)
min_part_size = size_in_bytes / 10000
power = 3
while part_size < min_part_size:
part_size = math.ldexp(_MEGABYTE, power)
power += 1
part_size = int(part_size)
else:
part_size = default_part_size
return part_size
# TODO: progress callbacks using basesubscriber
class MultipartUploadPart(object):
"""
Represent a part - have a part number, the upload, etc.
self.upload - does what you'd expect
this should be the first phase in subclassing below to handle S3
"""
pass
class MultipartPartUploader(gevent.Greenlet):
def __init__(self, upload, work, callback=None, retries=8):
gevent.Greenlet.__init__(self)
self.upload = upload
self.work = work
self.retries = retries
if callback:
self.link(callback)
def _run(self):
filename, offset, size = self.work
print('Loading chunk %s' % offset)
chunk = self.readfile(filename, offset, size)
return self.upload_part(chunk, offset, size)
def readfile(self, filename, offset, size):
filesize = os.stat(filename).st_size
print('Reading bytes %s to %s (or less, if we run out of file to read) of %s' % (offset * size, offset * size + size, filesize))
with open(filename, 'rb') as fileobj:
fileobj.seek(offset * size)
return fileobj.read(size)
def upload_part(self, chunk, offset, size):
@retry(stop_max_attempt_number=self.retries)
def retry_upload(range, checksum, body):
print('Uploading chunk %s - hashstring %s - range %s' % (offset, checksum, range))
self.upload.upload_part(range=range, checksum=str(checksum), body=body)
hashbytes = tree_hash(chunk_hashes(chunk))
hashstring = bytes_to_hex(hashbytes)
first_byte = offset * size
last_byte = first_byte + len(chunk) - 1
rangestr = 'bytes %d-%d/*' % (first_byte, last_byte)
retry_upload(rangestr, hashstring, chunk)
return offset, hashbytes
class GreenGlacierUploader(object):
class UploadFailedException(Exception):
pass
def __init__(self, vault, concurrent_uploads=10, part_size=4194304):
self.vault = vault
self.part_size = part_size # will be overridden on upload
self.concurrent_uploads = concurrent_uploads
def prepare(self, filename, description=None):
"""
Allows you to check the vital stats (including cost) of an upload
before you commit to it.
"""
self.filename = filename
self.description = description or filename
self.filesize = os.stat(self.filename).st_size
self.minimum = minimum_part_size(self.filesize)
self.part_size = max(self.part_size, self.minimum) if self.part_size else self.minimum
self.total_parts = int((self.filesize / self.part_size) + 1)
print('Preparing to upload %s with %s %s-sized parts' % (filename, self.total_parts, self.part_size))
print('This is expected to cost $%s in request fees, transfer is free' % (PRICE_PER_THOUSAND_REQUESTS * self.total_parts / 1000))
print('Storing this archive will cost $%s per month' % (STORAGE_PRICE_PER_GB_MONTH * self.filesize / 1000000000))
print('Retrieving this archive will cost $%s in request fees, and $%s in transfer fees' % (RETRIEVAL_PRICE_PER_THOUSAND_REQUESTS / 1000, RETRIEVAL_PRICE_PER_GB * self.filesize / 1000000000))
def upload(self, filename=None, description=None):
if filename and filename != self.filename:
self.prepare(filename, description)
else:
self.description = description or self.description
work_queue = gevent.queue.Queue()
print('Uploading %s with %s %s-sized parts...' % (self.filename, self.total_parts, self.part_size))
self.res = [None] * self.total_parts
multipart_upload = self.vault.initiate_multipart_upload(archiveDescription=self.description,
partSize=str(self.part_size))
for part in range(self.total_parts):
work_queue.put((self.filename, part, self.part_size))
active = gevent.pool.Pool(self.concurrent_uploads, MultipartPartUploader)
while not work_queue.empty(): # TODO: replace with list e.g. if work: spawn(m, work.pop())
work = work_queue.get()
active.spawn(multipart_upload, work, self.callback)
active.join() # wait for final chunks to upload..
print('Completing uploading with total size %s' % (self.filesize))
final_checksum = bytes_to_hex(tree_hash(self.res))
multipart_upload.complete(archiveSize=str(self.filesize), checksum=final_checksum)
def callback(self, g):
print('greenlet finished, saving value')
try:
part_num, chunk_hash = g.get()
self.res[part_num] = chunk_hash
except:
g.upload.abort()
raise
| 37.914414
| 198
| 0.663538
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.