hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
546ee763a66e875517fa6ca313b6ecb60b91f69b | 1,617 | py | Python | assets/python_scripts/xml2txt.py | weiSupreme/weiSupreme.github.io | 59d419f2d8207c7da0a427330b21546f4f0d85d0 | [
"CC-BY-4.0"
] | 3 | 2018-09-15T05:47:35.000Z | 2019-04-08T07:00:02.000Z | assets/python_scripts/xml2txt.py | weiSupreme/weiSupreme.github.io | 59d419f2d8207c7da0a427330b21546f4f0d85d0 | [
"CC-BY-4.0"
] | null | null | null | assets/python_scripts/xml2txt.py | weiSupreme/weiSupreme.github.io | 59d419f2d8207c7da0a427330b21546f4f0d85d0 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/evn python
# coding:utf-8
import os
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import sys
xml_dir = r"D:\Python\PythonProjects\333_xml"
xml_paths = [os.path.join(xml_dir, f) for f in os.listdir(xml_dir) if
os.path.isfile(os.path.join(xml_dir, f)) and f.endswith(".xml")]
strTemp = '' # 将txt文件清空
with open(os.path.join(xml_dir, "mytxt.txt"), "w") as f:
f.write(str(strTemp))
for xml_path in xml_paths:
f = xml_path
tree = ET.parse(f) # 打开xml文档
root = tree.getroot() # 获得root节点
print
"*" * 10
filename = root.find('filename').text
#filename = filename[:-4]
print
filename
########################################
for size in root.findall('size'): # 找到root节点下的size节点
width = size.find('width').text # 子节点下节点width的值
height = size.find('height').text # 子节点下节点height的值
print
width, height
########################################
for object in root.findall('object'): # 找到root节点下的所有object节点
name = object.find('name').text # 子节点下节点name的值
print
name
bndbox = object.find('bndbox') # 子节点下属性bndbox的值
xmin = bndbox.find('xmin').text
ymin = bndbox.find('ymin').text
xmax = bndbox.find('xmax').text
ymax = bndbox.find('ymax').text
print
xmin, ymin, xmax, ymax
strTemp=filename+'.png '+'basketball'+' '+str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+'\n'
with open(os.path.join(xml_dir, "mytxt.txt"), "a") as f:
f.write(str(strTemp))
| 29.4 | 102 | 0.577613 |
310be3133b5a907e7e7cbd37d3c67bd8cd6b05b5 | 1,153 | py | Python | tests/unit/test_utils.py | Informasjonsforvaltning/organization-page-bffe | 473dc9606649f864618f4f8bfc4a6a2a035f06d7 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_utils.py | Informasjonsforvaltning/organization-page-bffe | 473dc9606649f864618f4f8bfc4a6a2a035f06d7 | [
"Apache-2.0"
] | 47 | 2020-05-14T07:54:48.000Z | 2022-03-29T22:17:08.000Z | tests/unit/test_utils.py | Informasjonsforvaltning/organization-page-bffe | 473dc9606649f864618f4f8bfc4a6a2a035f06d7 | [
"Apache-2.0"
] | null | null | null | """Unit test cases for utils."""
import pytest
from fdk_organization_bff.utils.utils import resource_is_new, url_with_params
@pytest.mark.unit
def test_url_with_params() -> None:
"""Params is added correctly."""
url_0 = url_with_params("http://localhost:8000/endpoint", None)
url_1 = url_with_params("http://localhost:8000/endpoint", {})
url_2 = url_with_params("http://localhost:8000/endpoint", {"key0": "value0"})
url_3 = url_with_params(
"http://localhost:8000/endpoint", {"key0": "value0", "key1": "value1"}
)
assert url_0 == "http://localhost:8000/endpoint"
assert url_1 == "http://localhost:8000/endpoint"
assert url_2 == "http://localhost:8000/endpoint?key0=value0"
assert url_3 == "http://localhost:8000/endpoint?key0=value0&key1=value1"
@pytest.mark.unit
def test_resource_is_new_handles_bad_date_format() -> None:
"""Check that resource_is_new handles bad data."""
res_0 = resource_is_new({})
res_1 = resource_is_new({"issued": {}})
res_2 = resource_is_new({"issued": {"value": "2020/10/10"}})
assert res_0 is False
assert res_1 is False
assert res_2 is False
| 34.939394 | 81 | 0.690373 |
a39ec4558c0977b5ee346411c8ee3f8e0b782534 | 1,221 | py | Python | mycli/clistyle.py | steverobbins/mycli | ad19b143da483f97a657a69eeff88f9a74966520 | [
"BSD-3-Clause"
] | 2 | 2015-08-01T03:39:58.000Z | 2015-11-08T07:54:59.000Z | mycli/clistyle.py | steverobbins/mycli | ad19b143da483f97a657a69eeff88f9a74966520 | [
"BSD-3-Clause"
] | null | null | null | mycli/clistyle.py | steverobbins/mycli | ad19b143da483f97a657a69eeff88f9a74966520 | [
"BSD-3-Clause"
] | null | null | null | from pygments.token import Token
from pygments.style import Style
from pygments.util import ClassNotFound
from prompt_toolkit.styles import default_style_extensions
import pygments.styles
def style_factory(name):
try:
style = pygments.styles.get_style_by_name(name)
except ClassNotFound:
style = pygments.styles.get_style_by_name('native')
class CLIStyle(Style):
styles = {}
styles.update(style.styles)
styles.update(default_style_extensions)
styles.update({
Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
Token.Menu.Completions.ProgressButton: 'bg:#003333',
Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
Token.SelectedText: '#ffffff bg:#6666aa',
Token.IncrementalSearchMatch: '#ffffff bg:#4444aa',
Token.IncrementalSearchMatch.Current: '#ffffff bg:#44aa44',
Token.Toolbar: 'bg:#440044 #ffffff',
Token.Toolbar: 'bg:#222222 #aaaaaa',
Token.Toolbar.Off: 'bg:#222222 #888888',
Token.Toolbar.On: 'bg:#222222 #ffffff',
})
return CLIStyle
| 35.911765 | 76 | 0.657658 |
94a18da4560be081da6b3059d998fab14276d9a8 | 1,052 | py | Python | api/test/api/responders_test/test_data/constants.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | api/test/api/responders_test/test_data/constants.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | api/test/api/responders_test/test_data/constants.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | ###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
URL = "/constants"
UNKNOWN_NAME = "unknown constant"
NAME = "distributions"
CONSTANTS_WITH_SPECIFIC_NAME = [{
"id": "YmPDAQAchr39KjECQ",
"name": NAME,
"data": [{
"value": "Canonical-icehouse",
"label": "Canonical-icehouse"
}, {
"value": "Canonical-juno",
"label": "Canonical-juno"
}],
}]
| 43.833333 | 79 | 0.455323 |
14180ef85d9e9750aa4cb6f06a8af20b49de6651 | 1,754 | py | Python | eggs/bx_python-0.7.2-py2.6-linux-x86_64-ucs4.egg/EGG-INFO/scripts/maf_to_concat_fasta.py | psnehal/MethylSig | 5efad71e71ff2515feff2e49579c856ef9a1bbd8 | [
"CC-BY-3.0"
] | null | null | null | eggs/bx_python-0.7.2-py2.6-linux-x86_64-ucs4.egg/EGG-INFO/scripts/maf_to_concat_fasta.py | psnehal/MethylSig | 5efad71e71ff2515feff2e49579c856ef9a1bbd8 | [
"CC-BY-3.0"
] | null | null | null | eggs/bx_python-0.7.2-py2.6-linux-x86_64-ucs4.egg/EGG-INFO/scripts/maf_to_concat_fasta.py | psnehal/MethylSig | 5efad71e71ff2515feff2e49579c856ef9a1bbd8 | [
"CC-BY-3.0"
] | null | null | null | #!/afs/bx.psu.edu/project/pythons/py2.6-linux-x86_64-ucs4/bin/python2.6
"""
Read a maf and print the text as a fasta file, concatenating blocks. A
specific subset of species can be chosen.
usage %prog [options] species1,species2,... < maf_file > fasta_file
--fill="expression": Insert this between blocks
--wrap=columns: Wrap FASTA to this many columns
"""
from optparse import OptionParser
import textwrap
import sys
from bx.align import maf
def __main__():
# Parse command line arguments
parser = OptionParser()
parser.add_option( "--fill", action="store", default=None, type="string", help="" )
parser.add_option( "--wrap", action="store", default=None, type="int", help="" )
parser.add_option( "--nowrap", action="store_true", default=False, dest="nowrap", help="" )
( options, args ) = parser.parse_args()
species = []
for arg in args: species.extend(arg.split(','))
fill = ""
if options.fill: fill = eval( options.fill )
wrap = 50
if (options.wrap != None): wrap = options.wrap
elif (options.nowrap): wrap = 0
# create the concatenated sequences
texts = {}
for s in species: texts[s] = []
maf_reader = maf.Reader( sys.stdin )
for m in maf_reader:
for s in species:
c = m.get_component_by_src_start( s )
if c: texts[s].append( c.text )
else: texts[s].append( "-" * m.text_size )
for s in species:
print ">" + s
print_n( fill.join( texts[s] ), wrap )
def print_n( s, n, f = sys.stdout ):
if (n <= 0):
print >> f, s
else:
p = 0
while p < len( s ):
print >> f, s[p:min(p+n,len(s))]
p += n
if __name__ == "__main__": __main__()
| 28.290323 | 95 | 0.600912 |
a74c597dc4960b95584b13a311f8948e26638c61 | 3,549 | py | Python | sdk/servicebus/azure-servicebus/samples/async_samples/mgmt_subscription_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/servicebus/azure-servicebus/samples/async_samples/mgmt_subscription_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/servicebus/azure-servicebus/samples/async_samples/mgmt_subscription_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Example to show managing subscription entities under a ServiceBus namespace, including
- Create a subscription
- Get subscription properties and runtime information
- Update a subscription
- Delete a subscription
- List subscriptions under the given ServiceBus Namespace
"""
# pylint: disable=C0111
import os
import asyncio
import uuid
from azure.servicebus.aio.management import ServiceBusAdministrationClient
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
TOPIC_NAME = os.environ['SERVICE_BUS_TOPIC_NAME']
SUBSCRIPTION_NAME = "sb_mgmt_sub" + str(uuid.uuid4())
async def create_subscription(servicebus_mgmt_client):
print("-- Create Subscription")
await servicebus_mgmt_client.create_subscription(TOPIC_NAME, SUBSCRIPTION_NAME)
print("Subscription {} is created.".format(SUBSCRIPTION_NAME))
print("")
async def delete_subscription(servicebus_mgmt_client):
print("-- Delete Subscription")
await servicebus_mgmt_client.delete_subscription(TOPIC_NAME, SUBSCRIPTION_NAME)
print("Subscription {} is deleted.".format(SUBSCRIPTION_NAME))
print("")
async def list_subscriptions(servicebus_mgmt_client):
print("-- List Subscriptions")
async for subscription_properties in servicebus_mgmt_client.list_subscriptions(TOPIC_NAME):
print("Subscription Name:", subscription_properties.name)
print("")
async def get_and_update_subscription(servicebus_mgmt_client):
print("-- Get and Update Subscription")
subscription_properties = await servicebus_mgmt_client.get_subscription(TOPIC_NAME, SUBSCRIPTION_NAME)
print("Subscription Name:", subscription_properties.name)
print("Please refer to SubscriptionDescription for complete available settings.")
print("")
# update by updating the properties in the model
subscription_properties.max_delivery_count = 5
await servicebus_mgmt_client.update_subscription(TOPIC_NAME, subscription_properties)
# update by passing keyword arguments
subscription_properties = await servicebus_mgmt_client.get_subscription(TOPIC_NAME, SUBSCRIPTION_NAME)
await servicebus_mgmt_client.update_subscription(TOPIC_NAME, subscription_properties, max_delivery_count=3)
async def get_subscription_runtime_properties(servicebus_mgmt_client):
print("-- Get Subscription Runtime Properties")
get_subscription_runtime_properties = await servicebus_mgmt_client.get_subscription_runtime_properties(TOPIC_NAME, SUBSCRIPTION_NAME)
print("Subscription Name:", get_subscription_runtime_properties.name)
print("Please refer to SubscriptionRuntimeProperties from complete available runtime properties.")
print("")
async def main():
async with ServiceBusAdministrationClient.from_connection_string(CONNECTION_STR) as servicebus_mgmt_client:
await create_subscription(servicebus_mgmt_client)
await list_subscriptions(servicebus_mgmt_client)
await get_and_update_subscription(servicebus_mgmt_client)
await get_subscription_runtime_properties(servicebus_mgmt_client)
await delete_subscription(servicebus_mgmt_client)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 43.280488 | 137 | 0.75796 |
15e89279227612cb8f967e13981f4d21eb54e8d4 | 2,423 | py | Python | oscar/lib/python2.7/site-packages/prompt_toolkit/styles/from_pygments.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/prompt_toolkit/styles/from_pygments.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/prompt_toolkit/styles/from_pygments.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | """
Adaptor for building prompt_toolkit styles, starting from a Pygments style.
Usage::
from pygments.styles.tango import TangoStyle
style = style_from_pygments(pygments_style_cls=TangoStyle)
"""
from __future__ import unicode_literals
from .base import Style
from .from_dict import style_from_dict
__all__ = (
'PygmentsStyle',
'style_from_pygments',
)
# Following imports are only needed when a ``PygmentsStyle`` class is used.
try:
from pygments.style import Style as pygments_Style
from pygments.styles.default import DefaultStyle as pygments_DefaultStyle
except ImportError:
pygments_Style = None
pygments_DefaultStyle = None
def style_from_pygments(style_cls=pygments_DefaultStyle,
style_dict=None,
include_defaults=True):
"""
Shortcut to create a :class:`.Style` instance from a Pygments style class
and a style dictionary.
Example::
from prompt_toolkit.styles.from_pygments import style_from_pygments
from pygments.styles import get_style_by_name
style = style_from_pygments(get_style_by_name('monokai'))
:param style_cls: Pygments style class to start from.
:param style_dict: Dictionary for this style. `{Token: style}`.
:param include_defaults: (`bool`) Include prompt_toolkit extensions.
"""
assert style_dict is None or isinstance(style_dict, dict)
assert style_cls is None or issubclass(style_cls, pygments_Style)
styles_dict = {}
if style_cls is not None:
styles_dict.update(style_cls.styles)
if style_dict is not None:
styles_dict.update(style_dict)
return style_from_dict(styles_dict, include_defaults=include_defaults)
class PygmentsStyle(Style):
" Deprecated. "
def __new__(cls, pygments_style_cls):
assert issubclass(pygments_style_cls, pygments_Style)
return style_from_dict(pygments_style_cls.styles)
def invalidation_hash(self):
pass
@classmethod
def from_defaults(cls, style_dict=None,
pygments_style_cls=pygments_DefaultStyle,
include_extensions=True):
" Deprecated. "
return style_from_pygments(
style_cls=pygments_style_cls,
style_dict=style_dict,
include_defaults=include_extensions)
| 31.064103 | 78 | 0.689641 |
0152fec4750f3db6055010ef2270dd2c335f8656 | 1,354 | py | Python | backend/corpora/common/utils/cloudfront.py | chanzuckerberg/dcp-prototype | 24d2323ba5ae1482395da35ea11c42708e3a52ce | [
"MIT"
] | 2 | 2020-02-07T18:12:12.000Z | 2020-02-11T14:59:03.000Z | backend/corpora/common/utils/cloudfront.py | HumanCellAtlas/dcp-prototype | 44ca66a266004124f39d7d3e3dd75e9076012ff0 | [
"MIT"
] | 173 | 2020-01-29T17:48:02.000Z | 2020-03-20T02:52:58.000Z | backend/corpora/common/utils/cloudfront.py | HumanCellAtlas/dcp-prototype | 44ca66a266004124f39d7d3e3dd75e9076012ff0 | [
"MIT"
] | 1 | 2020-03-20T17:06:54.000Z | 2020-03-20T17:06:54.000Z | import boto3
import uuid
from typing import List
from backend.corpora.common.corpora_config import CorporaCloudfrontConfig
import logging
client = boto3.client("cloudfront")
# Since Cloudfront is only used in deployed environments (dev, staging, prod),
# only trigger an invalidation if the distribution_id is defined in secrets manager.
# Otherwise this will be a no-op
def create_invalidation(paths: List[str]):
try:
distribution = CorporaCloudfrontConfig().distribution_id
except RuntimeError: # Will be raised if the attribute is not found (i.e. in rdev)
logging.debug("No Cloudfront distribution found in secrets, will not invalidate")
return None
return _create_invalidation(distribution, paths)
def _create_invalidation(distribution: str, paths: List[str]):
invalidation_id = str(uuid.uuid4())
logging.info(f"Requesting invalidation {invalidation_id} for distribution {distribution}")
return client.create_invalidation(
DistributionId=distribution,
InvalidationBatch={
"Paths": {
"Quantity": len(paths),
"Items": paths,
},
"CallerReference": invalidation_id,
},
)
def create_invalidation_for_index_paths():
return create_invalidation(["/dp/v1/datasets/index", "/dp/v1/collections/index"])
| 33.02439 | 94 | 0.70901 |
3890e1baae75008f888c9888e11fda7dee555729 | 4,897 | py | Python | axiol/functions.py | ramnreddy15/Axiol | 39c6828c74709ac4dc27af666945c4a48d37258d | [
"MIT"
] | null | null | null | axiol/functions.py | ramnreddy15/Axiol | 39c6828c74709ac4dc27af666945c4a48d37258d | [
"MIT"
] | null | null | null | axiol/functions.py | ramnreddy15/Axiol | 39c6828c74709ac4dc27af666945c4a48d37258d | [
"MIT"
] | null | null | null | import asyncio
import random
from variables import DEFAULT_PREFIX
from database import PREFIXES, LEVELDATABASE, PLUGINS, PERMISSIONS
async def get_prefix(ctx):
prefix = await PREFIXES.find_one({"_id": ctx.guild.id})
if prefix is not None:
return prefix["prefix"]
else:
return DEFAULT_PREFIX
async def get_xprange(guild_id):
collection = LEVELDATABASE.get_collection(str(guild_id))
settings = await collection.find_one({"_id": 0})
return settings["xprange"]
async def get_randomtext(typing_time):
f = open("resources/words.txt").read()
words = f.split("\n")
if typing_time == 60:
r = range(60)
elif typing_time == 30:
r = range(40)
elif typing_time == 15:
r = range(25)
elif typing_time == 10:
r = range(15)
else:
r = range(1)
return " ".join([random.choice(words) for i in r])
def get_code(amount):
return ''.join(random.choices("ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"1234567890", k = amount))
"""
Some functions to counter errors and warnings while working locally :p
To get everything work properly database needs to be updates even if it's working locally
on a single guild, this is because lots of places have major database dependencies.
First function simply updates all plugin and permissions documents with a new plugin, only used when some new plugin is added,
not required to use this function to fix any errors or warnings.
Second function does the main job, it checks for all plugin, permission, leveling (if enabled) and prefix documents,
then updates/adds them if they aren't there.
I would have loved to say that I did this intentionally to avoid people from stealing code but it was just me writing bad code
which ended up benefiting ¯\_(ツ)_/¯
"""
#Adding new plugin and permissions
async def update_plugins_and_permissions(plugin):
await PLUGINS.update_many(
{ plugin: { "$exists": False } },
{
"$set": { plugin : True }
}
)
await PERMISSIONS.update_many(
{ plugin: {"$exists": False} },
{
"$set": { plugin: {}}
}
)
#updating leveling, plugin, prefix and permission data
async def update_db(guild_ids):
plugins_update = []
permissions_update = []
leveling_update = []
for guild_id in guild_ids:
Guild_Plugins = await PLUGINS.find_one({"_id": guild_id})
if not await PLUGINS.count_documents({"_id": guild_id}, limit=1):
PLUGINS.insert_one({
"_id": guild_id,
"Leveling": False,
"Moderation": True,
"ReactionRoles": True,
"Welcome": False,
"Verification": False,
"Chatbot": True,
"AutoMod": False,
"Karma": False,
"Fun": True,
"Giveaway": True
})
plugins_update.append(guild_id)
print(f"✅{guild_id} - Plugins 🔧")
if not await PERMISSIONS.count_documents({"_id": guild_id}, limit=1):
PERMISSIONS.insert_one({
"_id": guild_id,
"Leveling": {},
"Moderation": {},
"ReactionRoles": {},
"Welcome": {},
"Verification": {},
"Chatbot": {},
"Commands": {},
"AutoMod": {},
"Karma": {},
"Fun": {},
"Giveaway": {}
})
permissions_update.append(guild_id)
print(f"✅{guild_id} - Permissions 🔨")
if Guild_Plugins["Leveling"]:
if str(guild_id) not in await LEVELDATABASE.list_collection_names():
GuildLevelDB = await LEVELDATABASE.create_collection(str(guild_id))
await GuildLevelDB.insert_one({
"_id": 0,
"xprange": [15, 25],
"alertchannel": None,
"blacklistedchannels": [],
"alerts": True
})
leveling_update.append(guild_id)
print(f"✅{guild_id} - Leveling 📊")
#Only use this when working locally
try:
await PREFIXES.insert_one({
"_id": guild_id,
"prefix": "ax"
})
print(f"✅{guild_id} - Prefix ⚪")
except:
print(f"❌{guild_id} - Prefix ⚪")
print(f"Update results\n{len(plugins_update)} plugins\n{len(permissions_update)} permissions\n{len(leveling_update)} leveling")
# serveridlist = [843516084266729512, 751491708465840159]
# loop = asyncio.get_event_loop()
# loop.run_until_complete(updatedb(serveridlist))
#update_plugins_and_permissions("Giveaway") | 30.416149 | 131 | 0.566061 |
920cbe75be00713016fbf37939080542d048245b | 3,740 | py | Python | git_commenter/cli.py | fossabot/git-commenter | 5634e639b48368df3ffc47451fadb17adb20c300 | [
"MIT"
] | null | null | null | git_commenter/cli.py | fossabot/git-commenter | 5634e639b48368df3ffc47451fadb17adb20c300 | [
"MIT"
] | null | null | null | git_commenter/cli.py | fossabot/git-commenter | 5634e639b48368df3ffc47451fadb17adb20c300 | [
"MIT"
] | null | null | null | import argparse
import pyperclip
from termcolor import cprint
from .git import GitUtility
from .question import Question
class InvalidArgumentError(ValueError):
def __str__(self):
return "The multiple modes can not be specified."
class CLI:
def __init__(self, message):
"""
Args:
message (str, None): Message input as an argument
"""
self.git = GitUtility()
self.question = Question()
self.message = message
self.modes = []
def register_mode(self, args):
self.modes = [mode for mode, value in vars(args).items() if value]
if not self.modes or self.modes == ["clipboard"]:
self.modes.append("normal")
if "message" in self.modes and "template" in self.modes:
raise InvalidArgumentError()
def run(self):
if "clean" in self.modes:
if self.question.ask_clean_use_history():
self.question.data_loader.clean_use_history()
cprint("Use history cleaned.", "yellow")
else:
emoji, verb, object_, template = (None, None, None, None)
# ask
if "normal" in self.modes:
emoji = self.question.ask_emoji()
verb = self.question.ask_verb()
object_ = self.question.ask_object()
modifier = self.question.ask_modifier()
self.message = self.make_message(verb, object_, modifier)
commit_message = f"{emoji} : {self.message}"
elif "message" in self.modes:
emoji = self.question.ask_emoji()
commit_message = f"{emoji} : {self.message}"
elif "template" in self.modes:
template = self.question.ask_template()
commit_message = template
# commit
if self.question.ask_commit(commit_message):
self.question.data_loader.store_history(
emoji, verb, object_, template
)
if "clipboard" in self.modes:
pyperclip.copy(commit_message)
cprint("Commit message copied to clipboard.", "yellow")
else:
self.git.commit(commit_message)
@staticmethod
def make_message(verb, object_, modifier):
if verb is not None:
verb = verb.capitalize()
words = [
word for word in [verb, object_, modifier] if word is not None
]
message = " ".join(words)
return message
def main():
from . import __version__
cprint(
r""" ____ ____ ____ _
U /"___|uU /"___|U /"___||"| ___
\| | _ /\| | u \| | uU | | u |_"_|
| |_| | | |/__ | |/__\| |/__ | |
\____| \____| \____||_____| U/| |\u
_)(|_ _// \\ _// \\ // \\.-,_|___|_,-.
(__)__) (__)(__)(__)(__|_")("_)\_)-' '-(_/
""",
"magenta",
)
parser = argparse.ArgumentParser(description="Git Commenter")
parser.add_argument(
"-c", "--clipboard", action="store_true", help="copy to clipboard"
)
parser.add_argument(
"-m", "--message", help="commit with input message", type=str
)
parser.add_argument(
"-t",
"--template",
action="store_true",
help="commit with selected template",
)
parser.add_argument(
"--clean", action="store_true", help="clean up use history"
)
parser.add_argument(
"--version",
action="version",
version=__version__,
help="show version and exit",
)
args = parser.parse_args()
cli = CLI(message=args.message)
cli.register_mode(args)
cli.run()
| 28.549618 | 75 | 0.543048 |
d400aa1345560450bf3ec0cfc9543ba0a7b90a5a | 17,117 | bzl | Python | go/private/rules/test.bzl | alxn/rules_go | 678db7ff7269d3bb13af342617b8deddec6ad7ae | [
"Apache-2.0"
] | null | null | null | go/private/rules/test.bzl | alxn/rules_go | 678db7ff7269d3bb13af342617b8deddec6ad7ae | [
"Apache-2.0"
] | null | null | null | go/private/rules/test.bzl | alxn/rules_go | 678db7ff7269d3bb13af342617b8deddec6ad7ae | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//go/private:context.bzl",
"go_context",
)
load(
"//go/private:common.bzl",
"asm_exts",
"cgo_exts",
"go_exts",
"pkg_dir",
"split_srcs",
)
load(
"//go/private/rules:binary.bzl",
"gc_linkopts",
)
load(
"//go/private:providers.bzl",
"GoArchive",
"GoLibrary",
"GoSource",
"INFERRED_PATH",
"get_archive",
)
load(
"//go/private/rules:transition.bzl",
"go_transition_rule",
)
load(
"//go/private:mode.bzl",
"LINKMODE_NORMAL",
)
load(
"@bazel_skylib//lib:structs.bzl",
"structs",
)
def _testmain_library_to_source(go, attr, source, merge):
source["deps"] = source["deps"] + [attr.library]
def _go_test_impl(ctx):
"""go_test_impl implements go testing.
It emits an action to run the test generator, and then compiles the
test into a binary."""
go = go_context(ctx)
# Compile the library to test with internal white box tests
internal_library = go.new_library(go, testfilter = "exclude")
internal_source = go.library_to_source(go, ctx.attr, internal_library, ctx.coverage_instrumented())
internal_archive = go.archive(go, internal_source)
go_srcs = split_srcs(internal_source.srcs).go
# Compile the library with the external black box tests
external_library = go.new_library(
go,
name = internal_library.name + "_test",
importpath = internal_library.importpath + "_test",
testfilter = "only",
)
external_source = go.library_to_source(go, struct(
srcs = [struct(files = go_srcs)],
deps = internal_archive.direct + [internal_archive],
x_defs = ctx.attr.x_defs,
), external_library, ctx.coverage_instrumented())
external_source, internal_archive = _recompile_external_deps(go, external_source, internal_archive, [t.label for t in ctx.attr.embed])
external_archive = go.archive(go, external_source)
external_srcs = split_srcs(external_source.srcs).go
# now generate the main function
if ctx.attr.rundir:
if ctx.attr.rundir.startswith("/"):
run_dir = ctx.attr.rundir
else:
run_dir = pkg_dir(ctx.label.workspace_root, ctx.attr.rundir)
else:
run_dir = pkg_dir(ctx.label.workspace_root, ctx.label.package)
main_go = go.declare_file(go, path = "testmain.go")
arguments = go.builder_args(go, "gentestmain")
arguments.add("-output", main_go)
if ctx.configuration.coverage_enabled:
arguments.add("-coverage")
arguments.add(
# the l is the alias for the package under test, the l_test must be the
# same with the test suffix
"-import",
"l=" + internal_source.library.importpath,
)
arguments.add(
"-import",
"l_test=" + external_source.library.importpath,
)
arguments.add("-pkgname", internal_source.library.importpath)
arguments.add_all(go_srcs, before_each = "-src", format_each = "l=%s")
ctx.actions.run(
inputs = go_srcs,
outputs = [main_go],
mnemonic = "GoTestGenTest",
executable = go.toolchain._builder,
arguments = [arguments],
)
test_gc_linkopts = gc_linkopts(ctx)
if not go.mode.debug:
# Disable symbol table and DWARF generation for test binaries.
test_gc_linkopts.extend(["-s", "-w"])
# Link in the run_dir global for bzltestutil
test_gc_linkopts.extend(["-X", "github.com/bazelbuild/rules_go/go/tools/bzltestutil.RunDir=" + run_dir])
# Now compile the test binary itself
test_library = GoLibrary(
name = go._ctx.label.name + "~testmain",
label = go._ctx.label,
importpath = "testmain",
importmap = "testmain",
importpath_aliases = (),
pathtype = INFERRED_PATH,
is_main = True,
resolve = None,
)
test_deps = external_archive.direct + [external_archive] + ctx.attr._testmain_additional_deps
if ctx.configuration.coverage_enabled:
test_deps.append(go.coverdata)
test_source = go.library_to_source(go, struct(
srcs = [struct(files = [main_go])],
deps = test_deps,
), test_library, False)
test_archive, executable, runfiles = go.binary(
go,
name = ctx.label.name,
source = test_source,
test_archives = [internal_archive.data],
gc_linkopts = test_gc_linkopts,
version_file = ctx.version_file,
info_file = ctx.info_file,
)
# Bazel only looks for coverage data if the test target has an
# InstrumentedFilesProvider. If the provider is found and at least one
# source file is present, Bazel will set the COVERAGE_OUTPUT_FILE
# environment variable during tests and will save that file to the build
# events + test outputs.
return [
test_archive,
DefaultInfo(
files = depset([executable]),
runfiles = runfiles,
executable = executable,
),
OutputGroupInfo(
compilation_outputs = [internal_archive.data.file],
),
coverage_common.instrumented_files_info(
ctx,
source_attributes = ["srcs"],
dependency_attributes = ["deps", "embed"],
extensions = ["go"],
),
]
_go_test_kwargs = {
"implementation": _go_test_impl,
"attrs": {
"data": attr.label_list(allow_files = True),
"srcs": attr.label_list(allow_files = go_exts + asm_exts + cgo_exts),
"deps": attr.label_list(providers = [GoLibrary]),
"embed": attr.label_list(providers = [GoLibrary]),
"importpath": attr.string(),
"gc_goopts": attr.string_list(),
"gc_linkopts": attr.string_list(),
"rundir": attr.string(),
"x_defs": attr.string_dict(),
"linkmode": attr.string(default = LINKMODE_NORMAL),
"cgo": attr.bool(),
"cdeps": attr.label_list(),
"cppopts": attr.string_list(),
"copts": attr.string_list(),
"cxxopts": attr.string_list(),
"clinkopts": attr.string_list(),
"_go_context_data": attr.label(default = "//:go_context_data"),
"_testmain_additional_deps": attr.label_list(
providers = [GoLibrary],
default = ["@io_bazel_rules_go//go/tools/bzltestutil"],
),
# Workaround for bazelbuild/bazel#6293. See comment in lcov_merger.sh.
"_lcov_merger": attr.label(
executable = True,
default = "@io_bazel_rules_go//go/tools/builders:lcov_merger",
cfg = "target",
),
},
"executable": True,
"test": True,
"toolchains": ["@io_bazel_rules_go//go:toolchain"],
}
go_test = rule(**_go_test_kwargs)
go_transition_test = go_transition_rule(**_go_test_kwargs)
def _recompile_external_deps(go, external_source, internal_archive, library_labels):
"""Recompiles some archives in order to split internal and external tests.
go_test, like 'go test', splits tests into two separate archives: an
internal archive ('package foo') and an external archive
('package foo_test'). The library under test is embedded into the internal
archive. The external archive may import it and may depend on symbols
defined in the internal test files.
To avoid conflicts, the library under test must not be linked into the test
binary, since the internal test archive embeds the same sources.
Libraries imported by the external test that transitively import the
library under test must be recompiled too, or the linker will complain that
export data they were compiled with doesn't match the export data they
are linked with.
This function identifies which archives may need to be recompiled, then
declares new output files and actions to recompile them. This is an
unfortunately an expensive process requiring O(V+E) time and space in the
size of the test's dependency graph for each test.
Args:
go: go object returned by go_context.
external_source: GoSource for the external archive.
internal_archive: GoArchive for the internal archive.
library_labels: labels for embedded libraries under test.
Returns:
external_soruce: recompiled GoSource for the external archive. If no
recompilation is needed, the original GoSource is returned.
internal_archive: recompiled GoArchive for the internal archive. If no
recompilation is needed, the original GoSource is returned.
"""
# If no libraries are embedded in the internal archive, then nothing needs
# to be recompiled.
if not library_labels:
return external_source, internal_archive
# Build a map from labels to GoArchiveData.
# If none of the librares embedded in the internal archive are in the
# dependency graph, then nothing needs to be recompiled.
arc_data_list = depset(transitive = [get_archive(dep).transitive for dep in external_source.deps]).to_list()
label_to_arc_data = {a.label: a for a in arc_data_list}
if all([l not in label_to_arc_data for l in library_labels]):
return external_source, internal_archive
# Build a depth-first post-order list of dependencies starting with the
# external archive. Each archive appears after its dependencies and before
# its dependents.
#
# This is tricky because Starlark doesn't support recursion or while loops.
# We simulate a while loop by iterating over a list of 2N elements where
# N is the number of archives. Each archive is pushed onto the stack
# twice: once before its dependencies are pushed, and once after.
# dep_list is the post-order list of dependencies we're building.
dep_list = []
# stack is a stack of targets to process. We're done when it's empty.
stack = [get_archive(dep).data.label for dep in external_source.deps]
# deps_pushed tracks the status of each target.
# DEPS_UNPROCESSED means the target is on the stack, but its dependencies
# are not.
# ON_DEP_LIST means the target and its dependencies have been added to
# dep_list.
# Non-negative integers are the number of dependencies on the stack that
# still need to be processed.
# A target is on the stack if its status is DEPS_UNPROCESSED or 0.
DEPS_UNPROCESSED = -1
ON_DEP_LIST = -2
deps_pushed = {l: DEPS_UNPROCESSED for l in stack}
# dependents maps labels to lists of known dependents. When a target is
# processed, its dependents' deps_pushed count is deprecated.
dependents = {l: [] for l in stack}
# step is a list to iterate over to simulate a while loop. i tracks
# iterations.
step = [None] * (2 * len(arc_data_list))
i = 0
for _ in step:
if len(stack) == 0:
break
i += 1
label = stack.pop()
if deps_pushed[label] == 0:
# All deps have been added to dep_list. Append this target to the
# list. If a dependent is not waiting for anything else, push
# it back onto the stack.
dep_list.append(label)
for p in dependents.get(label, []):
deps_pushed[p] -= 1
if deps_pushed[p] == 0:
stack.append(p)
continue
# deps_pushed[label] == None, indicating we don't know whether this
# targets dependencies have been processed. Other targets processed
# earlier may depend on them.
deps_pushed[label] = 0
arc_data = label_to_arc_data[label]
for c in arc_data._dep_labels:
if c not in deps_pushed:
# Dependency not seen yet; push it.
stack.append(c)
deps_pushed[c] = None
deps_pushed[label] += 1
dependents[c] = [label]
elif deps_pushed[c] != 0:
# Dependency pushed, not processed; wait for it.
deps_pushed[label] += 1
dependents[c].append(label)
if deps_pushed[label] == 0:
# No dependencies to wait for; push self.
stack.append(label)
if i != len(step):
fail("assertion failed: iterated %d times instead of %d" % (i, len(step)))
# Determine which dependencies need to be recompiled because they depend
# on embedded libraries.
need_recompile = {}
for label in dep_list:
arc_data = label_to_arc_data[label]
need_recompile[label] = any([
dep in library_labels or need_recompile[dep]
for dep in arc_data._dep_labels
])
# Recompile the internal archive without dependencies that need
# recompilation. This breaks a cycle which occurs because the deps list
# is shared between the internal and external archive. The internal archive
# can't import anything that imports itself.
internal_source = internal_archive.source
internal_deps = [dep for dep in internal_source.deps if not need_recompile[get_archive(dep).data.label]]
attrs = structs.to_dict(internal_source)
attrs["deps"] = internal_deps
internal_source = GoSource(**attrs)
internal_archive = go.archive(go, internal_source, _recompile_suffix = ".recompileinternal")
# Build a map from labels to possibly recompiled GoArchives.
label_to_archive = {}
i = 0
for label in dep_list:
i += 1
recompile_suffix = ".recompile%d" % i
# If this library is the internal archive, use the recompiled version.
if label == internal_archive.data.label:
label_to_archive[label] = internal_archive
continue
# If this is a library embedded into the internal test archive,
# use the internal test archive instead.
if label in library_labels:
label_to_archive[label] = internal_archive
continue
# Create a stub GoLibrary and GoSource from the archive data.
arc_data = label_to_arc_data[label]
library = GoLibrary(
name = arc_data.name,
label = arc_data.label,
importpath = arc_data.importpath,
importmap = arc_data.importmap,
importpath_aliases = arc_data.importpath_aliases,
pathtype = arc_data.pathtype,
resolve = None,
testfilter = None,
is_main = False,
)
deps = [label_to_archive[d] for d in arc_data._dep_labels]
source = GoSource(
library = library,
mode = go.mode,
srcs = arc_data.srcs,
orig_srcs = arc_data.orig_srcs,
orig_src_map = dict(zip(arc_data.srcs, arc_data._orig_src_map)),
cover = arc_data._cover,
x_defs = dict(arc_data._x_defs),
deps = deps,
gc_goopts = arc_data._gc_goopts,
runfiles = go._ctx.runfiles(files = arc_data.data_files),
cgo = arc_data._cgo,
cdeps = arc_data._cdeps,
cppopts = arc_data._cppopts,
copts = arc_data._copts,
cxxopts = arc_data._cxxopts,
clinkopts = arc_data._clinkopts,
cgo_exports = arc_data._cgo_exports,
)
# If this archive needs to be recompiled, use go.archive.
# Otherwise, create a stub GoArchive, using the original file.
if need_recompile[label]:
recompile_suffix = ".recompile%d" % i
archive = go.archive(go, source, _recompile_suffix = recompile_suffix)
else:
archive = GoArchive(
source = source,
data = arc_data,
direct = deps,
libs = depset(direct = [arc_data.file], transitive = [a.libs for a in deps]),
transitive = depset(direct = [arc_data], transitive = [a.transitive for a in deps]),
x_defs = source.x_defs,
cgo_deps = depset(direct = arc_data._cgo_deps, transitive = [a.cgo_deps for a in deps]),
cgo_exports = depset(direct = list(source.cgo_exports), transitive = [a.cgo_exports for a in deps]),
runfiles = source.runfiles,
mode = go.mode,
)
label_to_archive[label] = archive
# Finally, we need to replace external_source.deps with the recompiled
# archives.
attrs = structs.to_dict(external_source)
attrs["deps"] = [label_to_archive[get_archive(dep).data.label] for dep in external_source.deps]
return GoSource(**attrs), internal_archive
| 39.259174 | 138 | 0.647543 |
c41f64a60e0266e67ca8d294c0123334e3e86f44 | 561 | py | Python | test_login.py | olga121/Selenium_Webdriver_2107 | 13e43aa5e4a2e04c6453ba634778fcbb5c793f40 | [
"Apache-2.0"
] | null | null | null | test_login.py | olga121/Selenium_Webdriver_2107 | 13e43aa5e4a2e04c6453ba634778fcbb5c793f40 | [
"Apache-2.0"
] | null | null | null | test_login.py | olga121/Selenium_Webdriver_2107 | 13e43aa5e4a2e04c6453ba634778fcbb5c793f40 | [
"Apache-2.0"
] | null | null | null | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("https://www.nasa.gov/")
driver.find_element_by_name('query').send_keys('Leonov')
driver.find_element_by_xpath("//input[@value='Search']").click()
WebDriverWait(driver,10).until(EC.title_is("Leonov - NASA Search Results"))
| 29.526316 | 79 | 0.755793 |
0d05be740e09157b590f036be1d2972da65a7a29 | 270 | py | Python | ros2_draw_squares/launch/call_move_robot_in_square_service_server.launch.py | samialperen/ros2_move_robot | b80693589632a37b20a3cac26004a4a546f1717f | [
"MIT"
] | null | null | null | ros2_draw_squares/launch/call_move_robot_in_square_service_server.launch.py | samialperen/ros2_move_robot | b80693589632a37b20a3cac26004a4a546f1717f | [
"MIT"
] | 3 | 2022-03-30T15:22:36.000Z | 2022-03-30T18:23:13.000Z | ros2_draw_squares/launch/call_move_robot_in_square_service_server.launch.py | samialperen/ros2_move_robot | b80693589632a37b20a3cac26004a4a546f1717f | [
"MIT"
] | null | null | null | from launch import LaunchDescription
import launch_ros.actions
def generate_launch_description():
return LaunchDescription([
launch_ros.actions.Node(
package='ros2_draw_squares', executable='move_robot_service_client', output='screen'),
])
| 27 | 98 | 0.744444 |
74890957a4a2779f58b2ffb16f5cbbdb24b071e2 | 979 | py | Python | scripts/slave/recipe_modules/syzygy/gclient_config.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/syzygy/gclient_config.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/syzygy/gclient_config.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T10:57:32.000Z | 2020-07-23T10:57:32.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import DEPS
CONFIG_CTX = DEPS['gclient'].CONFIG_CTX
@CONFIG_CTX()
def syzygy(c):
# A syzygy configuration for the gclient module.
c.got_revision_mapping['src'] = 'got_revision'
c.delete_unversioned_trees = True
# Configure the checkout of the Syzygy repository.
s = c.solutions.add()
s.name = 'src'
s.url = ('https://chromium.googlesource.com/external/' +
'github.com/google/syzygy.git/')
s.deps_file = 'DEPS'
s.managed = False
# Configure the src-internal checkout.
s = c.solutions.add()
s.name = 'src-internal'
s.url = ('https://chrome-internal.googlesource.com/chrome/syzygy/' +
'internal.DEPS.git')
s.managed = False
@CONFIG_CTX(includes=['syzygy'])
def syzygy_official(dummy_c):
pass
@CONFIG_CTX(includes=['syzygy'])
def kasko_official(dummy_c):
pass
| 26.459459 | 72 | 0.702758 |
79c4e773afe00fe8c4655e5ccde6337f7e73d45f | 407 | py | Python | indoorair_front/indoorair_front/wsgi.py | ydang5/indoorair-front | 24264fbbd0da0ab007d9dc058311dd4914953d59 | [
"BSD-3-Clause"
] | null | null | null | indoorair_front/indoorair_front/wsgi.py | ydang5/indoorair-front | 24264fbbd0da0ab007d9dc058311dd4914953d59 | [
"BSD-3-Clause"
] | null | null | null | indoorair_front/indoorair_front/wsgi.py | ydang5/indoorair-front | 24264fbbd0da0ab007d9dc058311dd4914953d59 | [
"BSD-3-Clause"
] | null | null | null | """
WSGI config for indoorair_front project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'indoorair_front.settings')
application = get_wsgi_application()
| 23.941176 | 78 | 0.793612 |
26d3a0a46bdd79f94d4fec343460258a1465dd73 | 1,764 | py | Python | rabbitai/charts/commands/importers/dispatcher.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/charts/commands/importers/dispatcher.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/charts/commands/importers/dispatcher.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | 1 | 2021-07-09T16:29:50.000Z | 2021-07-09T16:29:50.000Z | import logging
from typing import Any, Dict
from marshmallow.exceptions import ValidationError
from rabbitai.charts.commands.importers import v1
from rabbitai.commands.base import BaseCommand
from rabbitai.commands.exceptions import CommandInvalidError
from rabbitai.commands.importers.exceptions import IncorrectVersionError
logger = logging.getLogger(__name__)
command_versions = [
v1.ImportChartsCommand,
]
class ImportChartsCommand(BaseCommand):
"""
Import charts.
This command dispatches the import to different versions of the command
until it finds one that matches.
"""
# pylint: disable=unused-argument
def __init__(self, contents: Dict[str, str], *args: Any, **kwargs: Any):
self.contents = contents
self.args = args
self.kwargs = kwargs
def run(self) -> None:
# iterate over all commands until we find a version that can
# handle the contents
for version in command_versions:
command = version(self.contents, *self.args, **self.kwargs)
try:
command.run()
return
except IncorrectVersionError:
logger.debug("File not handled by command, skipping")
except (CommandInvalidError, ValidationError) as exc:
# found right version, but file is invalid
logger.info("Command failed validation")
raise exc
except Exception as exc:
# validation succeeded but something went wrong
logger.exception("Error running import command")
raise exc
raise CommandInvalidError("Could not find a valid command to import file")
def validate(self) -> None:
pass
| 32.072727 | 82 | 0.65873 |
ed34d314b62d11e9c93e8f9bab37da59ed218373 | 14,808 | py | Python | qiime2/core/type/signature.py | ebolyen/qiime2 | f8d53101035ab4618587fe608f21f6e467acc74a | [
"BSD-3-Clause"
] | null | null | null | qiime2/core/type/signature.py | ebolyen/qiime2 | f8d53101035ab4618587fe608f21f6e467acc74a | [
"BSD-3-Clause"
] | null | null | null | qiime2/core/type/signature.py | ebolyen/qiime2 | f8d53101035ab4618587fe608f21f6e467acc74a | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import inspect
import copy
import qiime2.sdk
from .grammar import TypeExpression
from .primitive import is_primitive_type
from .semantic import is_semantic_type
from .visualization import Visualization
from ..util import ImmutableBase
class _NoValue:
def __repr__(self):
return "NOVALUE"
class ParameterSpec(ImmutableBase):
NOVALUE = _NoValue()
def __init__(self, qiime_type=NOVALUE, view_type=NOVALUE, default=NOVALUE,
description=NOVALUE):
self.qiime_type = qiime_type
self.view_type = view_type
self.default = default
self.description = description
self._freeze_()
def has_qiime_type(self):
return self.qiime_type is not self.NOVALUE
def has_view_type(self):
return self.view_type is not self.NOVALUE
def has_default(self):
return self.default is not self.NOVALUE
def has_description(self):
return self.description is not self.NOVALUE
def __repr__(self):
return ("ParameterSpec(qiime_type=%r, view_type=%r, default=%r, "
"description=%r)" % (self.qiime_type, self.view_type,
self.default, self.description))
def __eq__(self, other):
return (self.qiime_type == other.qiime_type and
self.view_type == other.view_type and
self.default == other.default and
self.description == other.description)
def __ne__(self, other):
return not (self == other)
# Note: Pipeline doesn't exist yet but it is expected to accept zero or more
# input semantic types, zero or more parameters, and produce one or more output
# semantic types or Visualization types.
class PipelineSignature:
builtin_args = ('ctx',)
def __init__(self, callable, inputs, parameters, outputs,
input_descriptions=None, parameter_descriptions=None,
output_descriptions=None):
"""
Parameters
----------
callable : callable
Callable with view type annotations on parameters and return.
inputs : dict
Parameter name to semantic type.
parameters : dict
Parameter name to primitive type.
outputs : list of tuple
Each tuple contains the name of the output (str) and its QIIME
type.
input_descriptions : dict, optional
Input name to description string.
parameter_descriptions : dict, optional
Parameter name to description string.
output_descriptions : dict, optional
Output name to description string.
"""
inputs, parameters, outputs, signature_order = \
self._parse_signature(callable, inputs, parameters, outputs,
input_descriptions, parameter_descriptions,
output_descriptions)
self._assert_valid_inputs(inputs)
self._assert_valid_parameters(parameters)
self._assert_valid_outputs(outputs)
self.inputs = inputs
self.parameters = parameters
self.outputs = outputs
self.signature_order = signature_order
def _parse_signature(self, callable, inputs, parameters, outputs,
input_descriptions=None, parameter_descriptions=None,
output_descriptions=None):
# Initialize dictionaries if non-existant.
if input_descriptions is None:
input_descriptions = {}
if parameter_descriptions is None:
parameter_descriptions = {}
if output_descriptions is None:
output_descriptions = {}
# Copy so we can "exhaust" the collections and check for missing params
inputs = copy.copy(inputs)
parameters = copy.copy(parameters)
input_descriptions = copy.copy(input_descriptions)
parameter_descriptions = copy.copy(parameter_descriptions)
output_descriptions = copy.copy(output_descriptions)
builtin_args = list(self.builtin_args)
annotated_inputs = collections.OrderedDict()
annotated_parameters = collections.OrderedDict()
annotated_outputs = collections.OrderedDict()
signature_order = collections.OrderedDict()
for name, parameter in inspect.signature(callable).parameters.items():
if (parameter.kind == parameter.VAR_POSITIONAL or
parameter.kind == parameter.VAR_KEYWORD):
raise TypeError("Variadic definitions are unsupported: %r" %
name)
if builtin_args:
if builtin_args[0] != name:
raise TypeError("Missing builtin argument %r, got %r" %
(builtin_args[0], name))
builtin_args = builtin_args[1:]
continue
view_type = ParameterSpec.NOVALUE
if parameter.annotation is not parameter.empty:
view_type = parameter.annotation
default = ParameterSpec.NOVALUE
if parameter.default is not parameter.empty:
default = parameter.default
if name in inputs:
description = input_descriptions.pop(name,
ParameterSpec.NOVALUE)
param_spec = ParameterSpec(
qiime_type=inputs.pop(name), view_type=view_type,
default=default, description=description)
annotated_inputs[name] = param_spec
signature_order[name] = param_spec
elif name in parameters:
description = parameter_descriptions.pop(name,
ParameterSpec.NOVALUE)
param_spec = ParameterSpec(
qiime_type=parameters.pop(name), view_type=view_type,
default=default, description=description)
annotated_parameters[name] = param_spec
signature_order[name] = param_spec
elif name not in self.builtin_args:
raise TypeError("Parameter in callable without QIIME type:"
" %r" % name)
# we should have popped both of these empty by this point
if inputs or parameters:
raise TypeError("Callable does not have parameter(s): %r"
% (list(inputs) + list(parameters)))
if 'return' in callable.__annotations__:
output_views = qiime2.core.util.tuplize(
callable.__annotations__['return'])
if len(output_views) != len(outputs):
raise TypeError("Number of registered outputs (%r) does not"
" match annotation (%r)" %
(len(outputs), len(output_views)))
for (name, qiime_type), view_type in zip(outputs, output_views):
description = output_descriptions.pop(name,
ParameterSpec.NOVALUE)
annotated_outputs[name] = ParameterSpec(
qiime_type=qiime_type, view_type=view_type,
description=description)
else:
for name, qiime_type in outputs:
description = output_descriptions.pop(name,
ParameterSpec.NOVALUE)
annotated_outputs[name] = ParameterSpec(
qiime_type=qiime_type, description=description)
# we should have popped the descriptions empty by this point
if input_descriptions or parameter_descriptions or output_descriptions:
raise TypeError(
"Callable does not have parameter(s)/output(s) found in "
"descriptions: %r" % [*input_descriptions,
*parameter_descriptions,
*output_descriptions])
return (annotated_inputs, annotated_parameters, annotated_outputs,
signature_order)
def _assert_valid_inputs(self, inputs):
for input_name, spec in inputs.items():
if not is_semantic_type(spec.qiime_type):
raise TypeError(
"Input %r must be a semantic QIIME type, not %r"
% (input_name, spec.qiime_type))
if not isinstance(spec.qiime_type, TypeExpression):
raise TypeError(
"Input %r must be a complete semantic type expression, "
"not %r" % (input_name, spec.qiime_type))
if spec.has_default() and spec.default is not None:
raise ValueError(
"Input %r has a default value of %r. Only a default "
"value of `None` is supported for inputs."
% (input_name, spec.default))
def _assert_valid_parameters(self, parameters):
for param_name, spec in parameters.items():
if not is_primitive_type(spec.qiime_type):
raise TypeError(
"Parameter %r must be a primitive QIIME type, not %r"
% (param_name, spec.qiime_type))
if not isinstance(spec.qiime_type, TypeExpression):
raise TypeError(
"Parameter %r must be a complete primitive type "
"expression, not %r" % (param_name, spec.qiime_type))
if (spec.has_default() and
spec.default is not None and
spec.default not in spec.qiime_type):
raise TypeError("Default value for parameter %r is not of "
"semantic QIIME type %r or `None`."
% (param_name, spec.qiime_type))
def _assert_valid_outputs(self, outputs):
if len(outputs) == 0:
raise TypeError("%s requires at least one output"
% self.__class__.__name__)
for output_name, spec in outputs.items():
if not (is_semantic_type(spec.qiime_type) or
spec.qiime_type == Visualization):
raise TypeError(
"Output %r must be a semantic QIIME type or "
"Visualization, not %r"
% (output_name, spec.qiime_type))
if not isinstance(spec.qiime_type, TypeExpression):
raise TypeError(
"Output %r must be a complete type expression, not %r"
% (output_name, spec.qiime_type))
def decode_parameters(self, **kwargs):
params = {}
for key, spec in self.parameters.items():
if (spec.has_default() and
spec.default is None and
kwargs[key] is None):
params[key] = None
else:
params[key] = spec.qiime_type.decode(kwargs[key])
return params
def check_types(self, **kwargs):
for name, spec in self.signature_order.items():
if kwargs[name] not in spec.qiime_type:
# A type mismatch is unacceptable unless the value is None
# and this parameter's default value is None.
if not (spec.has_default() and
spec.default is None and
kwargs[name] is None):
raise TypeError("Argument to parameter %r is not a "
"subtype of %r." % (name, spec.qiime_type))
def solve_output(self, **input_types):
# TODO implement solving here. The check for concrete output types may
# be unnecessary here if the signature's constructor can detect
# unsolvable signatures and ensure that solving will always produce
# concrete output types.
solved_outputs = self.outputs
for output_name, spec in solved_outputs.items():
if not spec.qiime_type.is_concrete():
raise TypeError(
"Solved output %r must be a concrete type, not %r" %
(output_name, spec.qiime_type))
return solved_outputs
def __repr__(self):
lines = []
for group in 'inputs', 'parameters', 'outputs':
lookup = getattr(self, group)
lines.append('%s:' % group)
for name, spec in lookup.items():
lines.append(' %s: %r' % (name, spec))
return '\n'.join(lines)
def __eq__(self, other):
return (type(self) is type(other) and
self.inputs == other.inputs and
self.parameters == other.parameters and
self.outputs == other.outputs and
self.signature_order == other.signature_order)
def __ne__(self, other):
return not (self == other)
class MethodSignature(PipelineSignature):
builtin_args = ()
def _assert_valid_outputs(self, outputs):
super()._assert_valid_outputs(outputs)
# Assert all output types are semantic types. The parent class is less
# strict in its output type requirements.
for output_name, spec in outputs.items():
if not is_semantic_type(spec.qiime_type):
raise TypeError(
"Output %r must be a semantic QIIME type, not %r" %
(output_name, spec.qiime_type))
class VisualizerSignature(PipelineSignature):
builtin_args = ('output_dir',)
def __init__(self, callable, inputs, parameters, input_descriptions=None,
parameter_descriptions=None):
outputs = [('visualization', Visualization)]
output_descriptions = None
super().__init__(callable, inputs, parameters, outputs,
input_descriptions, parameter_descriptions,
output_descriptions)
def _assert_valid_outputs(self, outputs):
super()._assert_valid_outputs(outputs)
output = outputs['visualization']
if output.has_view_type() and output.view_type is not None:
raise TypeError(
"Visualizer callable cannot return anything. Its return "
"annotation must be `None`, not %r. Write output to "
"`output_dir`." % output.view_type)
| 41.712676 | 79 | 0.577256 |
14acd9a44b7a5cc95962cb159185a8d4148f3999 | 13,519 | py | Python | tests/terraform/graph/graph_builder/test_graph_builder.py | jamesholland-uk/checkov | d73fd4bd7096d48ab3434a92a177bcc55605460a | [
"Apache-2.0"
] | null | null | null | tests/terraform/graph/graph_builder/test_graph_builder.py | jamesholland-uk/checkov | d73fd4bd7096d48ab3434a92a177bcc55605460a | [
"Apache-2.0"
] | null | null | null | tests/terraform/graph/graph_builder/test_graph_builder.py | jamesholland-uk/checkov | d73fd4bd7096d48ab3434a92a177bcc55605460a | [
"Apache-2.0"
] | null | null | null | import os
import shutil
from unittest import TestCase
from checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector
from checkov.terraform.graph_builder.graph_components.block_types import BlockType
from checkov.terraform.graph_builder.graph_to_tf_definitions import convert_graph_vertices_to_tf_definitions
from checkov.terraform.graph_manager import TerraformGraphManager
from checkov.terraform.parser import external_modules_download_path
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
class TestGraphBuilder(TestCase):
def test_build_graph(self):
resources_dir = os.path.join(TEST_DIRNAME, '../resources/general_example')
graph_manager = TerraformGraphManager(db_connector=NetworkxConnector())
graph, tf_definitions = graph_manager.build_graph_from_source_directory(resources_dir)
expected_num_of_var_nodes = 3
expected_num_of_locals_nodes = 1
expected_num_of_resources_nodes = 1
expected_num_of_provider_nodes = 1
vertices_by_block_type = graph.vertices_by_block_type
self.assertEqual(expected_num_of_var_nodes, len(vertices_by_block_type[BlockType.VARIABLE]))
self.assertEqual(expected_num_of_locals_nodes, len(vertices_by_block_type[BlockType.LOCALS]))
self.assertEqual(expected_num_of_resources_nodes, len(vertices_by_block_type[BlockType.RESOURCE]))
self.assertEqual(expected_num_of_provider_nodes, len(vertices_by_block_type[BlockType.PROVIDER]))
provider_node = graph.vertices[vertices_by_block_type[BlockType.PROVIDER][0]]
resource_node = graph.vertices[vertices_by_block_type[BlockType.RESOURCE][0]]
local_node = graph.vertices[vertices_by_block_type[BlockType.LOCALS][0]]
var_bucket_name_node = None
var_region_node = None
var_aws_profile_node = None
for index in vertices_by_block_type[BlockType.VARIABLE]:
var_node = graph.vertices[index]
if var_node.name == 'aws_profile':
var_aws_profile_node = var_node
if var_node.name == 'bucket_name':
var_bucket_name_node = var_node
if var_node.name == 'region':
var_region_node = var_node
self.check_edge(graph, resource_node, local_node, 'bucket')
self.check_edge(graph, resource_node, provider_node, 'provider')
self.check_edge(graph, resource_node, var_region_node, 'region')
self.check_edge(graph, provider_node, var_aws_profile_node, 'profile')
self.check_edge(graph, local_node, var_bucket_name_node, 'bucket_name')
def check_edge(self, graph, node_from, node_to, expected_label):
hashed_from = node_from.get_hash()
hashed_to = node_to.get_hash()
matching_edges = []
for edge in graph.edges:
if graph.vertices[edge.origin].get_hash() == hashed_from and graph.vertices[edge.dest].get_hash() == hashed_to:
matching_edges.append(edge)
self.assertGreater(len(matching_edges), 0,
f'expected to find edge from [{node_from.block_type} {node_from.name}] to [{node_to.block_type} {node_to.name}] with label [{expected_label}]')
if not any(e.label == expected_label for e in matching_edges):
self.fail(
f'expected to find edge from [{node_from.block_type} {node_from.name}] to [{node_to.block_type} {node_to.name}] with label [{expected_label}], found edges: {[str(e) for e in matching_edges]}')
@staticmethod
def get_vertex_by_name_and_type(local_graph, block_type, name, multiple=False):
vertices = [local_graph.vertices[i] for i in local_graph.vertices_block_name_map[block_type][name]]
if multiple:
return vertices
return vertices[0]
def test_update_vertices_configs_deep_nesting(self):
resources_dir = os.path.join(TEST_DIRNAME, '../resources/variable_rendering/render_deep_nesting')
graph_manager = TerraformGraphManager(NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(resources_dir, render_variables=True)
expected_config = {'aws_s3_bucket': {'default': {'server_side_encryption_configuration': [
{'rule': [{'apply_server_side_encryption_by_default': [
{'sse_algorithm': ['AES256'], 'kms_master_key_id': ['']}]}]}]}}}
actual_config = local_graph.vertices[local_graph.vertices_by_block_type.get(BlockType.RESOURCE)[0]].config
self.assertDictEqual(expected_config, actual_config)
print('')
def test_build_graph_with_linked_modules(self):
# see the image to view the expected graph in tests/resources/modules/linked_modules/expected_graph.png
resources_dir = os.path.realpath(os.path.join(TEST_DIRNAME, '../resources/modules/linked_modules'))
graph_manager = TerraformGraphManager(NetworkxConnector())
local_graph, tf_definitions = graph_manager.build_graph_from_source_directory(resources_dir, render_variables=False)
vertices_by_block_type = local_graph.vertices_by_block_type
expected_vertices_num_by_type = {
BlockType.VARIABLE: 5,
BlockType.RESOURCE: 5,
BlockType.OUTPUT: 3,
BlockType.MODULE: 2,
BlockType.DATA: 1,
}
for block_type, count in expected_vertices_num_by_type.items():
self.assertEqual(count, len(vertices_by_block_type[block_type]))
output_this_lambda_func_arn = self.get_vertex_by_name_and_type(local_graph, BlockType.OUTPUT,
'this_lambda_function_arn')
output_this_lambda_func_name = self.get_vertex_by_name_and_type(local_graph, BlockType.OUTPUT,
'this_lambda_function_name')
output_this_s3_bucket_id = self.get_vertex_by_name_and_type(local_graph, BlockType.OUTPUT, 'this_s3_bucket_id')
resource_aws_lambda_function = self.get_vertex_by_name_and_type(local_graph, BlockType.RESOURCE,
'aws_lambda_function.this')
resource_aws_s3_bucket_policy = self.get_vertex_by_name_and_type(local_graph, BlockType.RESOURCE,
'aws_s3_bucket_policy.this')
resource_aws_s3_bucket = self.get_vertex_by_name_and_type(local_graph, BlockType.RESOURCE, 'aws_s3_bucket.this')
self.check_edge(local_graph, node_from=output_this_lambda_func_arn, node_to=resource_aws_lambda_function,
expected_label='value')
self.check_edge(local_graph, node_from=output_this_lambda_func_name, node_to=resource_aws_lambda_function,
expected_label='value')
self.check_edge(local_graph, node_from=output_this_s3_bucket_id, node_to=resource_aws_s3_bucket_policy,
expected_label='value')
self.check_edge(local_graph, node_from=output_this_s3_bucket_id, node_to=resource_aws_s3_bucket,
expected_label='value')
def test_build_graph_with_linked_registry_modules(self):
resources_dir = os.path.realpath(
os.path.join(TEST_DIRNAME, '../resources/modules/registry_security_group_inner_module'))
graph_manager = TerraformGraphManager(NetworkxConnector())
local_graph, tf_definitions = graph_manager.build_graph_from_source_directory(resources_dir,
render_variables=True,
download_external_modules=True)
outputs_vpcs = self.get_vertex_by_name_and_type(local_graph, BlockType.OUTPUT, 'security_group_vpc_id',
multiple=True)
resource_flow_log = self.get_vertex_by_name_and_type(local_graph, BlockType.RESOURCE,
'aws_flow_log.related_flow_log')
resource_security_group_this = self.get_vertex_by_name_and_type(local_graph, BlockType.RESOURCE,
'aws_security_group.this')
resource_security_group_this_name_prefix = self.get_vertex_by_name_and_type(local_graph, BlockType.RESOURCE,
'aws_security_group.this_name_prefix')
output_this_security_group_vpc_id_inner = [o for o in outputs_vpcs if 'http-80' in o.path][0]
output_this_security_group_vpc_id_outer = [o for o in outputs_vpcs if 'http-80' not in o.path][0]
self.check_edge(local_graph, node_from=resource_flow_log, node_to=output_this_security_group_vpc_id_inner,
expected_label='vpc_id')
self.check_edge(local_graph, node_from=output_this_security_group_vpc_id_outer,
node_to=resource_security_group_this, expected_label='value')
self.check_edge(local_graph, node_from=output_this_security_group_vpc_id_outer,
node_to=resource_security_group_this_name_prefix, expected_label='value')
# cleanup
if os.path.exists(os.path.join(resources_dir, external_modules_download_path)):
shutil.rmtree(os.path.join(resources_dir, external_modules_download_path))
def test_build_graph_with_deep_nested_edges(self):
resources_dir = os.path.realpath(os.path.join(TEST_DIRNAME, '../resources/k8_service'))
graph_manager = TerraformGraphManager(NetworkxConnector())
local_graph, tf = graph_manager.build_graph_from_source_directory(resources_dir, render_variables=True)
resource_kubernetes_deployment = self.get_vertex_by_name_and_type(local_graph, BlockType.RESOURCE,
'kubernetes_deployment.bazel_remote_cache')
locals_name = self.get_vertex_by_name_and_type(local_graph, BlockType.LOCALS, 'name')
locals_labels = self.get_vertex_by_name_and_type(local_graph, BlockType.LOCALS, 'labels')
self.check_edge(local_graph, node_from=locals_labels, node_to=locals_name,
expected_label="labels.app.kubernetes.io/name")
self.check_edge(local_graph, node_from=resource_kubernetes_deployment, node_to=locals_name,
expected_label="metadata.name")
self.check_edge(local_graph, node_from=resource_kubernetes_deployment, node_to=locals_name,
expected_label="spec.template.metadata.name")
self.check_edge(local_graph, node_from=resource_kubernetes_deployment, node_to=locals_name,
expected_label="spec.template.spec.container.name")
self.check_edge(local_graph, node_from=resource_kubernetes_deployment, node_to=locals_name,
expected_label="spec.template.spec.volume.1.config_map.name")
def test_blocks_from_local_graph_module(self):
resources_dir = os.path.realpath(os.path.join(TEST_DIRNAME, '../resources/modules/stacks'))
graph_manager = TerraformGraphManager(NetworkxConnector())
local_graph, tf = graph_manager.build_graph_from_source_directory(resources_dir, render_variables=True)
tf, _ = convert_graph_vertices_to_tf_definitions(local_graph.vertices, resources_dir)
found_results = 0
for key, value in tf.items():
if key.startswith(os.path.join(os.path.dirname(resources_dir), 's3_inner_modules', 'inner', 'main.tf')):
conf = value['resource'][0]['aws_s3_bucket']['inner_s3']
if 'stage/main' in key or 'prod/main' in key:
self.assertTrue(conf['versioning'][0]['enabled'][0])
found_results += 1
elif 'test/main' in key:
self.assertFalse(conf['versioning'][0]['enabled'][0])
found_results += 1
self.assertEqual(found_results, 3)
def test_build_graph_with_dynamic_blocks(self):
resources_dir = os.path.realpath(os.path.join(TEST_DIRNAME, '../resources/dynamic_lambda_function'))
graph_manager = TerraformGraphManager(NetworkxConnector())
local_graph, tf = graph_manager.build_graph_from_source_directory(resources_dir, render_variables=True)
lambda_attributes = local_graph.vertices[0].attributes
self.assertIn("dead_letter_config", lambda_attributes.keys())
def test_get_attribute_dict_with_list_value(self):
# given
resources_dir = os.path.join(TEST_DIRNAME, "../resources/s3_bucket_grant")
graph_manager = TerraformGraphManager(NetworkxConnector())
local_graph, _ = graph_manager.build_graph_from_source_directory(resources_dir, render_variables=True)
# when
attributes = local_graph.vertices[
local_graph.vertices_by_block_type.get(BlockType.RESOURCE)[0]
].get_attribute_dict()
# then
expected_grant_attribute = [
{"permissions": ["READ_ACP"], "type": "Group", "uri": "http://acs.amazonaws.com/groups/global/AllUsers"},
{"id": "1234567890", "permissions": ["FULL_CONTROL"], "type": "CanonicalUser"},
]
self.assertCountEqual(expected_grant_attribute, attributes["grant"])
| 61.171946 | 208 | 0.684074 |
bb68d08f2f5d89eca636785c1c25060b28d7ac52 | 2,222 | py | Python | filter.py | pitpig/rozowoo | 79ec617978252deed688aaf33c67fa1b0cebe56a | [
"MIT"
] | 1 | 2019-01-09T19:15:30.000Z | 2019-01-09T19:15:30.000Z | filter.py | pitpig/rozowoo | 79ec617978252deed688aaf33c67fa1b0cebe56a | [
"MIT"
] | null | null | null | filter.py | pitpig/rozowoo | 79ec617978252deed688aaf33c67fa1b0cebe56a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from django import template
from model import *
import django.template.defaultfilters as defaultfilters
import urllib
register = template.Library()
from datetime import *
@register.filter
def datetz(date,format): #datetime with timedelta
t=timedelta(seconds=3600*g_blog.timedelta)
return defaultfilters.date(date+t,format)
@register.filter
def TimestampISO8601(t):
"""Seconds since epoch (1970-01-01) --> ISO 8601 time string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
@register.filter
def urlencode(value):
return urllib.quote(value.encode('utf8'))
@register.filter
def check_current(v1,v2):
if v1==v2:
return "current"
else:
return ""
@register.filter
def excerpt_more(entry,value='..more'):
return entry.get_content_excerpt(value.decode('utf8'))
@register.filter
def dict_value(v1,v2):
return v1[v2]
from app.html_filter import html_filter
plog_filter = html_filter()
plog_filter.allowed = {
'a': ('href', 'target', 'name'),
'b': (),
'blockquote': (),
'pre': (),
'em': (),
'i': (),
'img': ('src', 'width', 'height', 'alt', 'title'),
'strong': (),
'u': (),
'font': ('color', 'size'),
'p': (),
'h1': (),
'h2': (),
'h3': (),
'h4': (),
'h5': (),
'h6': (),
'table': (),
'tr': (),
'th': (),
'td': (),
'ul': (),
'ol': (),
'li': (),
'br': (),
'hr': (),
}
plog_filter.no_close += ('br',)
plog_filter.allowed_entities += ('nbsp','ldquo', 'rdquo', 'hellip',)
plog_filter.make_clickable_urls = False # enable this will get a bug about a and img
@register.filter
def do_filter(data):
return plog_filter.go(data)
'''
tag like {%mf header%}xxx xxx{%endmf%}
'''
@register.tag("mf")
def do_mf(parser, token):
nodelist = parser.parse(('endmf',))
parser.delete_first_token()
return MfNode(nodelist,token)
class MfNode(template.Node):
def __init__(self, nodelist,token):
self.nodelist = nodelist
self.token=token
def render(self, context):
tokens= self.token.split_contents()
if len(tokens)<2:
raise TemplateSyntaxError, "'mf' tag takes one argument: the filter name is needed"
fname=tokens[1]
output = self.nodelist.render(context)
return g_blog.tigger_filter(fname,output) | 22.22 | 86 | 0.654815 |
0daf571315d900b1297ea876d815cc733e1908fe | 10,367 | py | Python | genmakefiles.py | mtk-watch/android_external_v8 | 29eb30806a59123b1f9faf9083a12d26fa418fad | [
"BSD-3-Clause"
] | null | null | null | genmakefiles.py | mtk-watch/android_external_v8 | 29eb30806a59123b1f9faf9083a12d26fa418fad | [
"BSD-3-Clause"
] | null | null | null | genmakefiles.py | mtk-watch/android_external_v8 | 29eb30806a59123b1f9faf9083a12d26fa418fad | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import subprocess
import sys
from antlr4 import *
from gnparser.gnLexer import gnLexer
from gnparser.gnParser import gnParser
from gnparser.gnListener import gnListener
from string import Template
DBG = False
# Reformat the specified Android.bp file
def _bpFmt(filename):
## NOTE: bpfmt does not set error code even when the bp file is illegal.
print subprocess.check_output(["bpfmt", "-w", filename])
def _bpList(entries):
return '[' + ",".join(['"' + x + '"' for x in entries]) + ']'
# Write an Android.bp in the simpler format used by v8_libplatform and
# v8_libsampler
def _writeBP(filename, module_name, sources):
if not sources:
raise ValueError('No sources for ' + filename)
with open(filename, 'w') as out:
out.write(Template('''
// GENERATED, do not edit
// for changes, see genmakefiles.py
cc_library_static {
name: "$module_name",
defaults: ["v8_defaults"],
srcs: $srcs,
local_include_dirs: ["src", "include"],
}
''').substitute({'module_name': module_name, 'srcs' : _bpList(sorted(sources))}))
_bpFmt(filename)
def _writeV8SrcBP(getSourcesFunc):
sources = getSourcesFunc(None)
if not sources:
raise ValueError('Must specify v8_base target properties')
sources.add('src/setup-isolate-full.cc')
# sources.add('src/builtins/setup-builtins-internal.cc')
# sources.add('src/interpreter/setup-interpreter-internal.cc')
arm_src = list(getSourcesFunc('arm') - sources)
arm64_src = list(getSourcesFunc('arm64') - sources)
x86_src = list(getSourcesFunc('x86') - sources)
x86_64_src = list(getSourcesFunc('x64') - sources)
mips_src = list(getSourcesFunc('mips') - sources)
mips64_src = list(getSourcesFunc('mips64') - sources)
filename = 'Android.v8.bp'
with open(filename, 'w') as out:
out.write(Template('''
// GENERATED, do not edit
// for changes, see genmakefiles.py
cc_library_static {
name: "libv8src",
defaults: ["v8_defaults"],
srcs: $srcs,
arch: {
arm: {
srcs: $arm_src,
},
arm64: {
srcs: $arm64_src,
},
mips: {
srcs: $mips_src,
},
mips64: {
srcs: $mips64_src,
},
x86: {
srcs: $x86_src,
},
x86_64: {
srcs: $x86_64_src,
},
},
target: {
android: {
cflags: ["-DANDROID_LINK_SHARED_ICU4C"],
},
},
local_include_dirs: ["src"],
header_libs: ["libicuuc_headers", "libicui18n_headers"],
generated_headers: ["v8_torque_file"],
generated_sources: ["v8_torque_file_cc"],
}
''').substitute({'srcs': _bpList(sorted(sources)),
'arm_src': _bpList(sorted(arm_src)),
'arm64_src': _bpList(sorted(arm64_src)),
'mips_src': _bpList(sorted(mips_src)),
'mips64_src': _bpList(sorted(mips64_src)),
'x86_src': _bpList(sorted(x86_src)),
'x86_64_src': _bpList(sorted(x86_64_src)),
}))
_bpFmt(filename)
def _writeGeneratedFilesBP(sources):
if not sources:
raise ValueError('Must specify j2sc target properties')
filename = 'Android.v8gen.bp'
with open(filename, 'w') as out:
out.write(Template('''
// GENERATED, do not edit
// for changes, see genmakefiles.py
filegroup {
name: "v8_js_lib_files",
srcs: $srcs,
}
''').substitute({'srcs' : _bpList(sources)})) ## Not sorted intentionally
_bpFmt(filename)
def _writeLibBaseBP(sources):
if not sources:
raise ValueError('Must specify v8_libbase target properties')
filename = 'Android.base.bp'
with open(filename, 'w') as out:
out.write(Template('''
// GENERATED, do not edit
// for changes, see genmakefiles.py
cc_library_static {
name: "libv8base",
defaults: ["v8_defaults"],
host_supported: true,
srcs: $srcs,
local_include_dirs: ["src"],
target: {
android: {
srcs: ["src/base/debug/stack_trace_android.cc"],
},
linux: {
srcs: ["src/base/platform/platform-linux.cc"],
},
host: {
srcs: ["src/base/debug/stack_trace_posix.cc"],
cflags: ["-UANDROID"],
},
darwin: {
srcs: ["src/base/platform/platform-macos.cc"],
},
},
}
''').substitute({'srcs' : _bpList(sorted(sources))}))
_bpFmt(filename)
def _expr_to_str(expr):
val = expr.unaryexpr().primaryexpr()
if val.String():
return val.String().getText()[1:-1] ## Strip quotation marks around string
elif val.Identifier():
return val.Identifier().getText()
else:
if DBG: print 'WARN: unhandled primary expression'
return None
class V8GnListener(gnListener):
def __init__(self, target, arch, only_cc_files):
super(gnListener, self).__init__()
self._match = False
self._depth = 0
self._target = target
self._arch = arch
self._sources = []
self._fixed_conditions = {
'use_jumbo_build' : True,
'use_jumbo_build==true' : True,
'is_win' : False,
'is_linux' : False,
'v8_postmortem_support' : False,
'v8_enable_i18n_support': True,
'!v8_enable_i18n_support': False,
'current_os!="aix"' : True,
'is_posix||is_fuchsia' : True,
'v8_current_cpu=="arm"' : arch == 'arm',
'v8_current_cpu=="arm64"' : arch == 'arm64',
'v8_current_cpu=="x86"' : arch == 'x86',
'v8_current_cpu=="x64"' : arch == 'x64',
'v8_current_cpu=="mips"||v8_current_cpu=="mipsel"' : arch == 'mips',
'v8_current_cpu=="mips64"||v8_current_cpu=="mips64el"' : arch == 'mips64',
'v8_current_cpu=="ppc"||v8_current_cpu=="ppc64"' : False,
'v8_current_cpu=="s390"||v8_current_cpu=="s390x"' : False,
}
self._only_cc_files = only_cc_files
def _match_call_target(self, ctx):
call_type = ctx.Identifier().getText()
if not call_type in ['v8_source_set', 'v8_component', 'action']: return False
call_name = _expr_to_str(ctx.exprlist().expr(0))
return call_name == self._target
def enterCall(self, ctx):
if self._depth == 1 and self._match_call_target(ctx):
self._match = True
self._conditions = [] ## [(value, condition), ...]
if DBG: print 'Found call', str(ctx.Identifier()), ctx.exprlist().getText()
def exitCall(self, ctx):
if self._match and self._match_call_target(ctx):
self._match = False
self._conditions = []
if DBG: print 'Left call'
def _extract_sources(self, ctx):
op = ctx.AssignOp().getText()
if not ctx.expr().unaryexpr().primaryexpr().exprlist():
## sources += check_header_includes_sources
return
srcs = map(_expr_to_str, ctx.expr().unaryexpr().primaryexpr().exprlist().expr())
if self._only_cc_files:
srcs = [x for x in srcs if x.endswith('.cc')]
if DBG: print '_extract_sources: ', len(srcs), "condition:", self._conditions
if op == '=':
if self._sources:
print "WARN: override sources"
self._sources = srcs
elif op == '+=':
self._sources.extend(srcs)
def _compute_condition(self, ctx):
condition = ctx.expr().getText()
if DBG: print '_extract_condition', condition
if condition in self._fixed_conditions:
result = self._fixed_conditions[condition]
else:
print 'WARN: unknown condition, assume False', condition
self._fixed_conditions[condition] = False
result = False
if DBG: print 'Add condition:', condition
self._conditions.append((result, condition))
def enterCondition(self, ctx):
if not self._match: return
self._compute_condition(ctx)
def enterElsec(self, ctx):
if not self._match: return
c = self._conditions[-1]
self._conditions[-1] = (not c[0], c[1])
if DBG: print 'Negate condition:', self._conditions[-1]
def exitCondition(self, ctx):
if not self._match: return
if DBG: print 'Remove conditions: ', self._conditions[-1]
del self._conditions[-1]
def _flatten_conditions(self):
if DBG: print '_flatten_conditions: ', self._conditions
for condition, _ in self._conditions:
if not condition:
return False
return True
def enterAssignment(self, ctx):
if not self._match: return
if ctx.lvalue().Identifier().getText() == "sources":
if self._flatten_conditions():
self._extract_sources(ctx)
def enterStatement(self, ctx):
self._depth += 1
def exitStatement(self, ctx):
self._depth -= 1
def get_sources(self):
seen = set()
result = []
## Deduplicate list while maintaining ordering. needed for js2c files
for s in self._sources:
if not s in seen:
result.append(s)
seen.add(s)
return result
def parseSources(tree, target, arch = None, only_cc_files = True):
listener = V8GnListener(target, arch, only_cc_files)
ParseTreeWalker().walk(listener, tree)
return listener.get_sources()
def GenerateMakefiles():
f = FileStream(os.path.join(os.getcwd(), './BUILD.gn'))
lexer = gnLexer(f)
stream = CommonTokenStream(lexer)
parser = gnParser(stream)
tree = parser.r()
_writeBP('Android.platform.bp', 'libv8platform', parseSources(tree, "v8_libplatform"))
_writeBP('Android.sampler.bp', 'libv8sampler', parseSources(tree, "v8_libsampler"))
_writeV8SrcBP(lambda arch: set(parseSources(tree, "v8_base", arch) + parseSources(tree, "v8_initializers", arch)))
_writeGeneratedFilesBP(parseSources(tree, "js2c", None, False))
_writeLibBaseBP(parseSources(tree, "v8_libbase"))
if __name__ == '__main__':
GenerateMakefiles()
| 33.334405 | 118 | 0.593904 |
f3e1beb5f7bd251084f124c79eb225d34be1c904 | 3,735 | py | Python | abtools/utils/decorators.py | menis/abtools | bfc7c6c508b174bb3b74d8f152156242ddd2ee77 | [
"MIT"
] | 9 | 2016-06-13T20:00:04.000Z | 2022-03-19T19:07:23.000Z | abtools/utils/decorators.py | menis/abtools | bfc7c6c508b174bb3b74d8f152156242ddd2ee77 | [
"MIT"
] | null | null | null | abtools/utils/decorators.py | menis/abtools | bfc7c6c508b174bb3b74d8f152156242ddd2ee77 | [
"MIT"
] | 4 | 2018-04-10T09:05:21.000Z | 2022-01-27T21:23:06.000Z | #!/usr/bin/env python
# filename: decorators.py
#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
def lazy_property(func):
'''
Wraps a property to provide lazy evaluation. Eliminates boilerplate.
Also provides for setting and deleting the property.
Use as you would use the @property decorator::
# OLD:
class MyClass():
def __init__():
self._compute = None
@property
def compute(self):
if self._compute is None:
# computationally intense stuff
# ...
# ...
self._compute = result
return self._compute
@compute.setter
def compute(self, value):
self._compute = value
# NEW:
class MyClass():
def __init__():
pass
@lazy_property
def compute(self):
# computationally intense stuff
# ...
# ...
return result
.. note:
Properties wrapped with ``lazy_property`` are only evaluated once.
If the instance state changes, lazy properties will not be automatically
re-evaulated and the update must be explicitly called for::
c = MyClass(data)
prop = c.lazy_property
# If you update some data that affects c.lazy_property
c.data = new_data
# c.lazy_property won't change
prop == c.lazy_property # TRUE
# If you want to update c.lazy_property, you can delete it, which will
# force it to be recomputed (with the new data) the next time you use it
del c.lazy_property
new_prop = c.lazy_property
new_prop == prop # FALSE
'''
attr_name = '_lazy_' + func.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, func(self))
return getattr(self, attr_name)
@_lazy_property.deleter
def _lazy_property(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
@_lazy_property.setter
def _lazy_property(self, value):
setattr(self, attr_name, value)
return _lazy_property
def coroutine(func):
'''
Initializes a coroutine -- essentially it just takes a
generator function and calls generator.next() to get
things going.
'''
def start(*args, **kwargs):
cr = func(*args, **kwargs)
cr.next()
return cr
return start
| 31.923077 | 99 | 0.624096 |
51819166b8d9013f41886275a6f2467e807ce7e0 | 7,744 | py | Python | qiskit/aqua/algorithms/adaptive/vq_algorithm.py | aaita92/qiskit-aqua | 8681045790123eefc347dfd05dee547bddc3d2df | [
"Apache-2.0"
] | 2 | 2020-01-22T23:45:59.000Z | 2020-01-22T23:46:02.000Z | qiskit/aqua/algorithms/adaptive/vq_algorithm.py | aaita92/qiskit-aqua | 8681045790123eefc347dfd05dee547bddc3d2df | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/algorithms/adaptive/vq_algorithm.py | aaita92/qiskit-aqua | 8681045790123eefc347dfd05dee547bddc3d2df | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Variational Quantum Algorithm Base Class. This class can be used an
interface for working with Variation Quantum
Algorithms, such as VQE, QAOA, or VSVM, and also provides helper utilities
for implementing new variational algorithms.
Writing a new variational algorithm is a simple as extending this class,
implementing a cost function for the new
algorithm to pass to the optimizer, and running the find_minimum()
function below to begin the optimization.
Alternatively, all of the functions below can be overridden to opt-out of
this infrastructure but still meet the
interface requirements.
"""
from typing import Optional, Callable
import time
import logging
from abc import abstractmethod
import numpy as np
from qiskit.aqua import AquaError
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua.components.optimizers import Optimizer
from qiskit.aqua.components.variational_forms import VariationalForm
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class VQAlgorithm(QuantumAlgorithm):
"""
The Variational Quantum Algorithm Base Class.
"""
def __init__(self,
var_form: VariationalForm,
optimizer: Optimizer,
cost_fn: Optional[Callable] = None,
initial_point: Optional[np.ndarray] = None) -> None:
super().__init__()
if var_form is None:
raise AquaError('Missing variational form.')
self._var_form = var_form
if optimizer is None:
raise AquaError('Missing optimizer.')
self._optimizer = optimizer
self._cost_fn = cost_fn
self._initial_point = initial_point
self._parameterized_circuits = None
@abstractmethod
def get_optimal_cost(self):
""" get optimal cost """
raise NotImplementedError()
@abstractmethod
def get_optimal_circuit(self):
""" get optimal circuit """
raise NotImplementedError()
@abstractmethod
def get_optimal_vector(self):
""" get optimal vector """
raise NotImplementedError()
def find_minimum(self, initial_point=None, var_form=None,
cost_fn=None, optimizer=None, gradient_fn=None):
"""Optimize to find the minimum cost value.
Returns:
dict: Optimized variational parameters, and corresponding minimum cost value.
Raises:
ValueError: invalid input
"""
initial_point = initial_point if initial_point is not None else self._initial_point
var_form = var_form if var_form is not None else self._var_form
cost_fn = cost_fn if cost_fn is not None else self._cost_fn
optimizer = optimizer if optimizer is not None else self._optimizer
nparms = var_form.num_parameters
bounds = var_form.parameter_bounds
if initial_point is not None and len(initial_point) != nparms:
raise ValueError(
'Initial point size {} and parameter size {} mismatch'.format(
len(initial_point), nparms))
if len(bounds) != nparms:
raise ValueError('Variational form bounds size does not match parameter size')
# If *any* value is *equal* in bounds array to None then the problem does *not* have bounds
problem_has_bounds = not np.any(np.equal(bounds, None))
# Check capabilities of the optimizer
if problem_has_bounds:
if not optimizer.is_bounds_supported:
raise ValueError('Problem has bounds but optimizer does not support bounds')
else:
if optimizer.is_bounds_required:
raise ValueError('Problem does not have bounds but optimizer requires bounds')
if initial_point is not None:
if not optimizer.is_initial_point_supported:
raise ValueError('Optimizer does not support initial point')
else:
if optimizer.is_initial_point_required:
low = [(l if l is not None else -2 * np.pi) for (l, u) in bounds]
high = [(u if u is not None else 2 * np.pi) for (l, u) in bounds]
initial_point = self.random.uniform(low, high)
start = time.time()
if not optimizer.is_gradient_supported: # ignore the passed gradient function
gradient_fn = None
logger.info('Starting optimizer.\nbounds=%s\ninitial point=%s', bounds, initial_point)
opt_params, opt_val, num_optimizer_evals = optimizer.optimize(var_form.num_parameters,
cost_fn,
variable_bounds=bounds,
initial_point=initial_point,
gradient_function=gradient_fn)
eval_time = time.time() - start
ret = {}
ret['num_optimizer_evals'] = num_optimizer_evals
ret['min_val'] = opt_val
ret['opt_params'] = opt_params
ret['eval_time'] = eval_time
return ret
def get_prob_vector_for_params(self, construct_circuit_fn, params_s,
quantum_instance, construct_circuit_args=None):
""" Helper function to get probability vectors for a set of params """
circuits = []
for params in params_s:
circuit = construct_circuit_fn(params, **construct_circuit_args)
circuits.append(circuit)
results = quantum_instance.execute(circuits)
probs_s = []
for circuit in circuits:
if quantum_instance.is_statevector:
sv = results.get_statevector(circuit)
probs = np.real(sv * np.conj(sv))
probs_s.append(probs)
else:
counts = results.get_counts(circuit)
probs_s.append(self.get_probabilities_for_counts(counts))
return np.array(probs_s)
def get_probabilities_for_counts(self, counts):
""" get probabilities for counts """
shots = sum(counts.values())
states = int(2 ** len(list(counts.keys())[0]))
probs = np.zeros(states)
for k, v in counts.items():
probs[int(k, 2)] = v / shots
return probs
@property
def initial_point(self):
""" returns initial point """
return self._initial_point
@initial_point.setter
def initial_point(self, new_value):
""" set initial point """
self._initial_point = new_value
@property
@abstractmethod
def optimal_params(self):
""" returns optimal parameters """
raise NotImplementedError()
@property
def var_form(self):
""" returns var forms """
return self._var_form
@var_form.setter
def var_form(self, new_value):
""" sets var forms """
self._var_form = new_value
@property
def optimizer(self):
""" returns optimizer """
return self._optimizer
def cleanup_parameterized_circuits(self):
""" set parameterized circuits to None """
self._parameterized_circuits = None
| 37.230769 | 100 | 0.634168 |
7247f929364d2de8f8be14e48728efc4d36406ae | 5,508 | py | Python | service/paginator.py | getcircle/python-soa | 590b53691ff959713e331c25353d8c9280e10100 | [
"MIT"
] | null | null | null | service/paginator.py | getcircle/python-soa | 590b53691ff959713e331c25353d8c9280e10100 | [
"MIT"
] | null | null | null | service/paginator.py | getcircle/python-soa | 590b53691ff959713e331c25353d8c9280e10100 | [
"MIT"
] | null | null | null | # Copy of Django's paginator: https://github.com/django/django/blob/master/django/core/paginator.py
import collections
from math import ceil
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0,
allow_empty_first_page=True, count=None, disabled=False):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
self._pre_calculated = count is not None
if count is not None:
self._count = int(count)
if disabled:
# if the list is empty, just use the default page size
self.per_page = len(object_list) or self.per_page
def validate_number(self, number):
"""
Validates the given 1-based page number.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def get_page_bottom_top(self, number):
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return bottom, top
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
bottom, top = self.get_page_bottom_top(number)
if not self._pre_calculated:
page_object_list = self.object_list[bottom:top]
else:
page_object_list = self.object_list
return self._get_page(page_object_list, number, self)
def _get_page(self, *args, **kwargs):
"""
Returns an instance of a single page.
This hook can be used by subclasses to use an alternative to the
standard :cls:`Page` object.
"""
return Page(*args, **kwargs)
def _get_count(self):
"""
Returns the total number of objects, across all pages.
"""
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"""
Returns the total number of pages.
"""
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return list(range(1, self.num_pages + 1))
page_range = property(_get_page_range)
class Page(collections.Sequence):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
if not isinstance(self.object_list, list):
self.object_list = list(self.object_list)
return self.object_list[index]
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| 32.210526 | 99 | 0.611837 |
741cfd4513df7d3ca1fdbcaf14b96a5a5972210e | 1,511 | py | Python | Pypractice/test.py | JaydenYL/Projects | b51c0476f7be80f0b0d6aa84592966ecb4343d76 | [
"MIT"
] | 5 | 2021-09-06T04:27:56.000Z | 2021-12-14T14:50:27.000Z | Pypractice/test.py | JaydenYL/Projects | b51c0476f7be80f0b0d6aa84592966ecb4343d76 | [
"MIT"
] | null | null | null | Pypractice/test.py | JaydenYL/Projects | b51c0476f7be80f0b0d6aa84592966ecb4343d76 | [
"MIT"
] | null | null | null | import unittest
import random
import string
import subprocess
def rand_string(length=15):
printable = string.printable[:-5]
random_string = ''.join([
printable[random.randint(
0, len(printable) - 1
)]
for _ in range(length)
])
return random_string
global_key = rand_string()
class SampleTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.class_key = rand_string()
super().__init__(*args, **kwargs)
def guess(self, process, key):
guess = process.stdout.readline().decode('utf-8').strip('\n')
response = "{guess} {{message}}".format(guess=guess)
if guess == key:
response = response.format(message="is correct!")
else:
response = response.format(message=self)
process.stdin.write((response + '\n').encode())
process.stdin.flush()
return guess
def key_check(self, key):
process = subprocess.Popen('python pybreak.py'.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
first_guess = self.guess(process, key)
print(first_guess)
second_guess = self.guess(process, key)
print(second_guess)
process.kill()
if (key not in first_guess) and (key not in second_guess):
self.assertIn(key, second_guess)
return
def test_global_key(self):
global global_key
key = global_key
self.key_check(key)
return
def test_class_key(self):
key = self.class_key
self.key_check(key)
return
def __repr__(self):
return "Is not the key!"
if __name__ == '__main__':
unittest.main() | 21.898551 | 63 | 0.684977 |
a61d581f084c3f6bf19a46821e4d2b2362444ce1 | 1,239 | py | Python | setup.py | afeinstein20/tess_infos | c546f2c775fea203f2dfce1610305cae184a9b69 | [
"MIT"
] | null | null | null | setup.py | afeinstein20/tess_infos | c546f2c775fea203f2dfce1610305cae184a9b69 | [
"MIT"
] | null | null | null | setup.py | afeinstein20/tess_infos | c546f2c775fea203f2dfce1610305cae184a9b69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 13:37:42 2020
@author:
Maximilian N. Günther
MIT Kavli Institute for Astrophysics and Space Research,
Massachusetts Institute of Technology,
77 Massachusetts Avenue,
Cambridge, MA 02109,
USA
Email: maxgue@mit.edu
Web: www.mnguenther.com
"""
from setuptools import setup, find_packages
setup(
name = 'tess_infos',
packages = find_packages(),
version = '0.1.0',
description = 'Extremely fast Pythonic access to the full TICv8, GAIA DR2 and Banyan Sigma parameters for all TESS short-cadence targets',
author = 'Maximilian N. Günther',
author_email = 'maxgue@mit.edu',
url = 'https://github.com/MNGuenther/tess_infos',
download_url = 'https://github.com/MNGuenther/tess_infos',
license='MIT',
classifiers=['Development Status :: 4 - Beta', #3 - Alpha / 4 - Beta / 5 - Production/Stable
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python'],
install_requires=['feather-format>=0.4.1', 'pyarrow', 'pandas',
'numpy', 'tqdm', 'matplotlib'],
include_package_data = False
)
| 28.159091 | 142 | 0.653753 |
3ac4349d4f7370291c657a23307f34ddd86fd394 | 8,095 | py | Python | sgk/sparse/sparse_matrix.py | egonrian/google-research | 8177adbe9ca0d7e5a9463b54581fe6dd27be0974 | [
"Apache-2.0"
] | 3 | 2021-01-18T04:46:49.000Z | 2021-03-05T09:21:40.000Z | sgk/sparse/sparse_matrix.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 7 | 2021-11-10T19:44:38.000Z | 2022-02-10T06:48:39.000Z | sgk/sparse/sparse_matrix.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines primitive sparse matrix type for use with sparse ops."""
import numpy as np
import tensorflow.compat.v1 as tf
from sgk.sparse import connectors
from sgk.sparse import initializers
SPARSE_MATRIX_COLLECTION = "sparse_matrices"
def get_trainable_sparse_matrices():
"""Returns a list of all trainable sparse matrices."""
return tf.get_collection(SPARSE_MATRIX_COLLECTION)
def track_trainable_sparse_matrix(sm):
"""Adds a sparse matrix to the collection."""
assert sm.trainable
if sm not in tf.get_collection_ref(SPARSE_MATRIX_COLLECTION):
tf.add_to_collection(SPARSE_MATRIX_COLLECTION, sm)
def _dense_to_sparse(matrix):
"""Converts dense numpy matrix to a csr sparse matrix."""
assert len(matrix.shape) == 2
# Extract the nonzero values.
values = matrix.compress((matrix != 0).flatten())
# Calculate the offset of each row.
mask = (matrix != 0).astype(np.int32)
row_offsets = np.concatenate(([0], np.cumsum(np.add.reduce(mask, axis=1))),
axis=0)
# Create the row indices and sort them.
row_indices = np.argsort(-1 * np.diff(row_offsets))
# Extract the column indices for the nonzero values.
x = mask * (np.arange(matrix.shape[1]) + 1)
column_indices = x.compress((x != 0).flatten())
column_indices = column_indices - 1
# Cast the desired precision.
values = values.astype(np.float32)
row_indices, row_offsets, column_indices = [
x.astype(np.uint32) for x in
[row_indices, row_offsets, column_indices]
]
return values, row_indices, row_offsets, column_indices
class SparseTopology(object):
"""Describes a sparse matrix, with no values."""
def __init__(self,
name,
shape=None,
mask=None,
connector=connectors.Uniform(0.8),
dtype=tf.float32):
if mask is None:
assert shape is not None and len(shape) == 2
mask = connector(np.ones(shape))
self._shape = shape
else:
assert shape is None
assert len(mask.shape) == 2
self._shape = mask.shape
self._name = name
self._dtype = dtype
self._sparsity = 1.0 - np.count_nonzero(mask) / mask.size
# Create a numpy version of the sparse mask.
_, row_indices_, row_offsets_, column_indices_ = _dense_to_sparse(mask)
# Create tensors for the mask shape on the host. These are for internal
# use and should generally not be used by end-user. Use the normal python
# 'shape' property instead.
with tf.device("cpu"):
self._rows = tf.get_variable(
initializer=self._shape[0],
trainable=False,
name=self._name + "_rows",
dtype=tf.int32)
self._columns = tf.get_variable(
initializer=self._shape[1],
trainable=False,
name=self._name + "_columns",
dtype=tf.int32)
# Convert the sparse mask to TensorFlow variables.
self._row_indices = tf.get_variable(
initializer=row_indices_,
trainable=False,
name=self._name + "_row_indices",
dtype=tf.uint32)
self._row_offsets = tf.get_variable(
initializer=row_offsets_,
trainable=False,
name=self._name + "_row_offsets",
dtype=tf.uint32)
self._column_indices = tf.get_variable(
initializer=column_indices_,
trainable=False,
name=self._name + "_column_indices",
dtype=tf.uint32)
@property
def name(self):
return self._name
@property
def shape(self):
return self._shape
@property
def size(self):
return np.prod(self._shape)
@property
def dtype(self):
return self._dtype
@property
def sparsity(self):
return self._sparsity
@property
def row_indices(self):
return self._row_indices
@property
def row_offsets(self):
return self._row_offsets
@property
def column_indices(self):
return self._column_indices
class SparseMatrix(object):
"""Compressed sparse row matrix type."""
def __init__(self,
name,
shape=None,
matrix=None,
initializer=initializers.Uniform(),
connector=connectors.Uniform(0.8),
trainable=True,
dtype=tf.float32):
if matrix is None:
assert shape is not None and len(shape) == 2
matrix = connector(initializer(shape))
self._shape = shape
else:
assert shape is None
assert len(matrix.shape) == 2
self._shape = matrix.shape
self._name = name
self._trainable = trainable
self._dtype = dtype
self._sparsity = 1.0 - np.count_nonzero(matrix) / matrix.size
# Create a numpy version of the sparse matrix.
values_, row_indices_, row_offsets_, column_indices_ = _dense_to_sparse(
matrix)
# Create tensors for the matrix shape on the host. These are for internal
# use and should generally not be used by end-user. Use the normal python
# 'shape' property instead.
with tf.device("cpu"):
self._rows = tf.get_variable(
initializer=self._shape[0],
trainable=False,
name=self._name + "_rows",
dtype=tf.int32)
self._columns = tf.get_variable(
initializer=self._shape[1],
trainable=False,
name=self._name + "_columns",
dtype=tf.int32)
# Convert the sparse matrix to TensorFlow variables.
self._values = tf.get_variable(
initializer=values_,
trainable=self.trainable,
name=self._name + "_values",
dtype=self._dtype)
self._row_indices = tf.get_variable(
initializer=row_indices_,
trainable=False,
name=self._name + "_row_indices",
dtype=tf.uint32)
self._row_offsets = tf.get_variable(
initializer=row_offsets_,
trainable=False,
name=self._name + "_row_offsets",
dtype=tf.uint32)
self._column_indices = tf.get_variable(
initializer=column_indices_,
trainable=False,
name=self._name + "_column_indices",
dtype=tf.uint32)
# Add this matrix to the collection of trainable matrices.
track_trainable_sparse_matrix(self)
@classmethod
def _wrap_existing(cls, shape, rows, columns, values, row_indices,
row_offsets, column_indices):
"""Helper to wrap existing tensors in a SparseMatrix object."""
matrix = cls.__new__(cls)
# Set the members appropriately.
#
# pylint: disable=protected-access
matrix._shape = shape
matrix._rows = rows
matrix._columns = columns
matrix._values = values
matrix._row_indices = row_indices
matrix._row_offsets = row_offsets
matrix._column_indices = column_indices
matrix._name = values.name
matrix._trainable = False
matrix._dtype = values.dtype
# pylint: enable=protected-access
return matrix
@property
def name(self):
return self._name
@property
def shape(self):
return self._shape
@property
def size(self):
return np.prod(self._shape)
@property
def trainable(self):
return self._trainable
@property
def dtype(self):
return self._dtype
@property
def sparsity(self):
return self._sparsity
@property
def values(self):
return self._values
@property
def row_indices(self):
return self._row_indices
@property
def row_offsets(self):
return self._row_offsets
@property
def column_indices(self):
return self._column_indices
| 28.403509 | 77 | 0.665596 |
0d037cc912676edd5833bb17061c211595ca2116 | 6,714 | py | Python | jolly-jellyfish/src/page_maker/templatetags/render_thumbnail.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | jolly-jellyfish/src/page_maker/templatetags/render_thumbnail.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | jolly-jellyfish/src/page_maker/templatetags/render_thumbnail.py | Artemis21/summer-code-jam-2020 | 1323288cb1e75b3aa4141c2c6e378f9219cf73d0 | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | import os
from pathlib import Path
from typing import Union
from PIL import Image
from django import template
from django.conf import settings
from django.http import HttpRequest
from django.utils import timezone
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FireOptions
from ..models import Webpage, Template
register = template.Library() # registers function in this file as template tags for use within Django templates
try:
DRIVER_PATH = os.environ['SELENIUM_DRIVER']
except KeyError:
raise KeyError('SELENIUM_DRIVER system environment variable not found.\n'
'Check the README for details on setting it.\n'
'If you have already done so and this error persists, try restarting your PC.')
def add_options(
options_object: Union[ChromeOptions, FireOptions], window_size: str) -> Union[ChromeOptions, FireOptions]:
"""
Used to quickly add options to options_object - since these are shared by both ChromeOptions & FireOptions
:param options_object: Options object imported from selenium.webdriver.BROWSER.options
:param window_size: string in format 'height, width'
:return Options object with common arguments added
"""
options_object.add_argument('--headless')
options_object.add_argument('--hide-scrollbars')
options_object.add_argument(f'--window-size={window_size}')
return options_object
@register.simple_tag()
def render_thumbnail(request: HttpRequest, url_to_render: str, page_obj: Union[Webpage, Template]) -> None:
"""
This function needs to be called from within django html templates using:
{% load render_thumbnail %}
{% render_thumbnail request url_to_render page_obj as none %}
It saves a thumbnail/screenshot of the page from within which it is called to page_obj's thumbnail attribute.
This is accomplished using a selenium browser driver running in a headless configuration.
Since, when running a local server, this results in the server being pinged twice (once within itself),
a ConnectionResetError may be output to the console.
This is safe to ignore (but can't be escaped, with except, for some reason).
:param request: A Django HttpRequest object, sourced from the page which is calling this function.
This is needed in order to access additional GET info and to build a full url (including domain).
:param url_to_render: The relative url of the webpage to render (e.g. '/pages/page_name').
Accessed within a template using something like:
{% url 'webpage-view' pagename=webpage.name as url_to_render %}.
Also needed to build full url.
:param page_obj: Either a 'Webpage' or 'Template' Django object. Accessed in order to save thumbnail path.
"""
is_template = isinstance(page_obj, Template) # True if object is an instance of models.Template
is_outdated = False
if not is_template:
# only Webpages have the thumbnail_edit_date attr (since they can be edited by the user)
# True if the thumbnail is outdated or does not exist yet (thumbnail_edit_date datetime set to 1, 1, 1)
is_outdated = page_obj.thumbnail_edit_date < page_obj.last_edit_date
if is_template:
# str(page_obj.thumbnail) returns rel path to thumbnail in str form (not comparable otherwise)
if 'placeholder_img.png' not in str(page_obj.thumbnail): # checks if the template actually needs a thumbnail
is_template = False
# To avoid infinite loops as the page calls itself to render,
# ensures function only runs if url doesn't end with '?rendering=true' (added below)
is_rendering = bool(request.GET.get('rendering', ''))
if (is_template or is_outdated) and not is_rendering: # if a thumbnail is needed
base_dir = Path(settings.BASE_DIR)
media_dir = Path(settings.MEDIA_ROOT)
# 'template_' prefix for Template Objects; 'pg_' for Webpages.
image_prefix = 'template_' if is_template else 'pg_'
# Can't use only the obj's name as this may contain characters not permitted by file system
image_path = Path('thumbnails') / f'{image_prefix}thumb-{page_obj.id}.png'
full_os_path = str(base_dir / media_dir / image_path)
if is_template:
window_size = '500,500' # thumbnails for Templates, due to less content, can be much smaller
else:
window_size = '1980,1080'
if 'chromedriver' in DRIVER_PATH:
options = add_options(ChromeOptions(), window_size)
driver = webdriver.Chrome(executable_path=DRIVER_PATH, options=options)
elif 'geckodriver' in DRIVER_PATH:
options = add_options(FireOptions(), window_size)
driver = webdriver.Firefox(executable_path=DRIVER_PATH, options=options)
else:
raise Exception('The driver specified in SELENIUM_DRIVER is not supported.\n'
"Currently, only Chrome/Chromium ('chromedriver') and "
"Firefox ('geckodriver') are supported.\n"
'Please install one of these browsers and the associated driver.')
url = request.build_absolute_uri(url_to_render) + '?rendering=true'
# NB: This function raises: "ConnectionResetError: [WinError 10054] ...
# An existing connection was forcibly closed by the remote host" in console.
# This has no functional effect on the program and
# is likely just a side-effect of the server being pinged from within itself essentially.
#
# Celery (for asynchronous tasks) no longer supports Windows - same with Redis.
# This means that, in this current dev environment where there are multiple OSs at play,
# having a functioning webpage, while this function executes in the background,
# doesn't seem possible 😭 TODO: possible using Docker?
driver.get(url)
driver.save_screenshot(full_os_path)
driver.close()
img = Image.open(full_os_path)
# image shrunk (aspect ration maintained) to reduce storage space and reduce footprint used on actual webpage
img.thumbnail(size=(500, 500))
img.save(full_os_path)
page_obj.thumbnail = f'{image_path}' # saves relative path to thumbnail to ImageField attribute
# only Webpage objects can be edited by user so only they have the thumbnail_edit_date attribute
if not is_template:
page_obj.thumbnail_edit_date = timezone.now()
page_obj.save() # actually commit changes to model database
| 50.481203 | 117 | 0.70703 |
defb4a5b23287240ed28ec1695d2fdfbec9ac122 | 3,189 | py | Python | voicecontrol/pinyin_typing.py | thautwarm/voicecontrol | dc5565d114fe80f0f06a0e7c541ee447fb7712f3 | [
"MIT"
] | 2 | 2021-06-05T08:27:44.000Z | 2021-06-05T13:46:27.000Z | voicecontrol/pinyin_typing.py | thautwarm/voicecontrol | dc5565d114fe80f0f06a0e7c541ee447fb7712f3 | [
"MIT"
] | null | null | null | voicecontrol/pinyin_typing.py | thautwarm/voicecontrol | dc5565d114fe80f0f06a0e7c541ee447fb7712f3 | [
"MIT"
] | null | null | null | import os
from pynput.keyboard import Key, Controller
import psutil
from voicecontrol.st import *
keyboard = Controller()
voice_input_exe = "iFlyVoice.exe"
def get_process(name: str):
for p in psutil.process_iter():
if p.name() == name:
return p
def pressed(k):
keyboard.press(k)
keyboard.release(k)
def get_en_US_model_path():
import speech_recognition as sr
import os.path as p
return p.join(p.dirname(sr.__file__), "pocketsphinx-data", "en-US")
class 休息模式(DSModel, CommandsFromMethods):
path = get_en_US_model_path()
decider = staticmethod(just_hyp)
def okay(self, st: StateMachine):
st.model = 控制模式()
class 输入模式(DSModel, CommandsFromMethods):
path = get_en_US_model_path()
decider = staticmethod(just_hyp)
def okay(self, st: StateMachine):
pressed(Key.f8)
st.model = 控制模式()
class 控制模式(DSModel, CommandsFromMethods):
path = os.path.join(os.path.dirname(__file__), "models", "control")
decider = staticmethod(vote_most_common_word)
def 符(self, st: StateMachine):
tb = (',', " 。","“”", "!","?","【】","「」","()")
tb = (',', " 。","“”", "!","?","【】","「」","()")
i = st.state
if i <= len(tb):
for k in tb[i-1]:
pressed(k)
st.state = 1
def 五(self, st: StateMachine):
st.state += 5
def 四(self, st: StateMachine):
st.state += 4
def 三(self, st: StateMachine):
st.state += 3
def 二(self, st: StateMachine):
st.state += 2
def 一(self, st: StateMachine):
st.state += 1
def 上(self, st: StateMachine):
for _ in range(st.state):
pressed(Key.up)
st.state = 1
def 下(self, st: StateMachine):
for _ in range(st.state):
pressed(Key.down)
st.state = 1
def 左(self, st: StateMachine):
for _ in range(st.state):
pressed(Key.left)
st.state = 1
def 右(self, st: StateMachine):
for _ in range(st.state):
pressed(Key.right)
st.state = 1
def 撤(self, st: StateMachine):
with keyboard.pressed(Key.ctrl):
pressed("z")
def 重(self, st: StateMachine):
with keyboard.pressed(Key.ctrl):
with keyboard.pressed(Key.shift):
pressed("z")
def 删除(self, st: StateMachine):
for _ in range(st.state):
pressed(Key.backspace)
st.state = 1
def 首(self, st: StateMachine):
with keyboard.pressed(Key.ctrl):
pressed("a")
def 末(self, st: StateMachine):
with keyboard.pressed(Key.ctrl):
pressed("e")
def 行(self, st: StateMachine):
pressed(Key.enter)
def 休息(self, st: StateMachine):
st.model = 休息模式()
def 输入(self, st: StateMachine):
st.model = 输入模式()
pressed(Key.f8)
def main():
from voicecontrol.indicator import create_root
st = StateMachine(1, 控制模式())
st.add_event(create_root())
st.start_loop()
| 23.622222 | 72 | 0.541549 |
886d9901ecbf2c8cd2bbab9ed49548abcb8f9bdc | 2,059 | py | Python | tests/step16_tests.py | svaningelgem/advent_of_code_2021 | 80351508d6d6953392bc57af20e1fac05ab3ec2a | [
"MIT"
] | null | null | null | tests/step16_tests.py | svaningelgem/advent_of_code_2021 | 80351508d6d6953392bc57af20e1fac05ab3ec2a | [
"MIT"
] | null | null | null | tests/step16_tests.py | svaningelgem/advent_of_code_2021 | 80351508d6d6953392bc57af20e1fac05ab3ec2a | [
"MIT"
] | null | null | null | from pathlib import Path
from step16 import decode_hex, Packet, get_version_sum
TEST_INPUT = Path(__file__).parent / 'step16.txt'
REAL_INPUT = Path(__file__).parent.parent / 'src/step16.txt'
def test_step16():
assert decode_hex('D2FE28') == '110100101111111000101000'
assert decode_hex('38006F45291200') == '00111000000000000110111101000101001010010001001000000000'
assert decode_hex('EE00D40C823060') == '11101110000000001101010000001100100000100011000001100000'
sut = Packet('D2FE28')
assert sut.version == 6
assert sut.packet_id == 4
assert sut.is_literal
assert sut._value == 2021
sut = Packet('38006F45291200')
assert sut.version == 1
assert sut.packet_id == 6
assert not sut.is_literal
assert sut._value is None
assert len(sut.sub_packets) == 2
assert sut.sub_packets[0]._value == 10
assert sut.sub_packets[1]._value == 20
sut = Packet('EE00D40C823060')
assert sut.version == 7
assert sut.packet_id == 3
assert not sut.is_literal
assert sut._value is None
assert len(sut.sub_packets) == 3
assert sut.sub_packets[0]._value == 1
assert sut.sub_packets[1]._value == 2
assert sut.sub_packets[2]._value == 3
assert get_version_sum('8A004A801A8002F478') == 16
assert get_version_sum('620080001611562C8802118E34') == 12
assert get_version_sum('C0015000016115A2E0802F182340') == 23
assert get_version_sum('A0016C880162017C3686B18A3D4780') == 31
def test_step16_real_data():
assert get_version_sum(REAL_INPUT.read_text()) == 879
def test_step16_part2():
assert Packet('C200B40A82').value == 3
assert Packet('04005AC33890').value == 54
assert Packet('880086C3E88112').value == 7
assert Packet('CE00C43D881120').value == 9
assert Packet('D8005AC2A8F0').value == 1
assert Packet('F600BC2D8F').value == 0
assert Packet('9C005AC2F8F0').value == 0
assert Packet('9C0141080250320F1802104A08').value == 1
def test_step16_part2_real_data():
assert Packet(REAL_INPUT.read_text()).value == 539051801941
| 33.209677 | 101 | 0.718796 |
932afd4ea007227b4e807dcdbb7690b3100d8a9f | 1,817 | py | Python | scapy/error.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | 3 | 2019-04-09T22:59:33.000Z | 2019-06-14T09:23:24.000Z | scapy/error.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | null | null | null | scapy/error.py | kosciolek/J-Tracert | 89ed802f700e02600138ad7132e6a856463620dd | [
"MIT"
] | 1 | 2018-11-15T12:37:04.000Z | 2018-11-15T12:37:04.000Z | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Logging subsystem and basic exception class.
"""
#############################
##### Logging subsystem #####
#############################
class Scapy_Exception(Exception):
pass
import logging, traceback, time
class ScapyFreqFilter(logging.Filter):
def __init__(self):
logging.Filter.__init__(self)
self.warning_table = {}
def filter(self, record):
from scapy.config import conf
wt = conf.warning_threshold
if wt > 0:
stk = traceback.extract_stack()
caller=None
for f,l,n,c in stk:
if n == 'warning':
break
caller = l
tm,nb = self.warning_table.get(caller, (0,0))
ltm = time.time()
if ltm-tm > wt:
tm = ltm
nb = 0
else:
if nb < 2:
nb += 1
if nb == 2:
record.msg = "more "+record.msg
else:
return 0
self.warning_table[caller] = (tm,nb)
return 1
log_scapy = logging.getLogger("scapy")
log_scapy.addHandler(logging.NullHandler())
log_runtime = logging.getLogger("scapy.runtime") # logs at runtime
log_runtime.addFilter(ScapyFreqFilter())
log_interactive = logging.getLogger("scapy.interactive") # logs in interactive functions
log_loading = logging.getLogger("scapy.loading") # logs when loading Scapy
def warning(x, *args, **kargs):
"""
Prints a warning during runtime.
"""
log_runtime.warning(x, *args, **kargs)
| 29.786885 | 89 | 0.549257 |
c1e575cc633f6fc2fb98acfe03f36e3654118c5a | 30,336 | py | Python | pretorched/gans/biggan.py | schwettmann/pretorched-x | ce8c3712434b3cd5d85dcbe8582ff51ddfa7d4ed | [
"MIT"
] | null | null | null | pretorched/gans/biggan.py | schwettmann/pretorched-x | ce8c3712434b3cd5d85dcbe8582ff51ddfa7d4ed | [
"MIT"
] | null | null | null | pretorched/gans/biggan.py | schwettmann/pretorched-x | ce8c3712434b3cd5d85dcbe8582ff51ddfa7d4ed | [
"MIT"
] | null | null | null | """biggan.py
Implements BigGAN architecture from
Large Scale GAN Training for High Fidelity Natural Image Synthesis by Andrew Brock, Jeff Donahue, and Karen Simonyan.
Adapted from https://github.com/ajbrock/BigGAN-PyTorch.
"""
import functools
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn import init
import pretorched.layers as layers
class GBlock(nn.Module):
"""Residual block for generator.
Note that this class assumes the kernel size and padding (and any other
settings) have been selected in the main generator module and passed in
through the conv_func arg. Similar rules apply with bn_func (the input
size [which is actually the number of channels of the conditional info] must
be preselected)
"""
def __init__(self, in_channels, out_channels,
conv_func=nn.Conv2d,
bn_func=layers.bn,
activation=None,
upsample=None):
super().__init__()
self.in_channels, self.out_channels = in_channels, out_channels
self.conv_func, self.bn_func = conv_func, bn_func
self.activation = activation
self.upsample = upsample
# Conv layers
self.conv1 = self.conv_func(self.in_channels, self.out_channels)
self.conv2 = self.conv_func(self.out_channels, self.out_channels)
self.learnable_sc = in_channels != out_channels or upsample
if self.learnable_sc:
self.conv_sc = self.conv_func(in_channels, out_channels,
kernel_size=1, padding=0)
# Batchnorm layers
self.bn1 = self.bn_func(in_channels)
self.bn2 = self.bn_func(out_channels)
def forward(self, x, y):
h = self.activation(self.bn1(x, y))
if self.upsample:
h = self.upsample(h)
x = self.upsample(x)
h = self.conv1(h)
h = self.activation(self.bn2(h, y))
h = self.conv2(h)
if self.learnable_sc:
x = self.conv_sc(x)
return h + x
class DBlock(nn.Module):
""" Residual block for discriminator."""
def __init__(self, in_channels, out_channels, conv_func=layers.SNConv2d, wide=True,
preactivation=False, activation=None, downsample=None,):
super().__init__()
self.in_channels, self.out_channels = in_channels, out_channels
# If using wide D (as in SA-GAN and BigGAN), change the channel pattern
self.hidden_channels = self.out_channels if wide else self.in_channels
self.conv_func = conv_func
self.preactivation = preactivation
self.activation = activation
self.downsample = downsample
# Conv layers
self.conv1 = self.conv_func(self.in_channels, self.hidden_channels)
self.conv2 = self.conv_func(self.hidden_channels, self.out_channels)
self.learnable_sc = True if (in_channels != out_channels) or downsample else False
if self.learnable_sc:
self.conv_sc = self.conv_func(in_channels, out_channels,
kernel_size=1, padding=0)
def shortcut(self, x):
if self.preactivation:
if self.learnable_sc:
x = self.conv_sc(x)
if self.downsample:
x = self.downsample(x)
else:
if self.downsample:
x = self.downsample(x)
if self.learnable_sc:
x = self.conv_sc(x)
return x
def forward(self, x):
if self.preactivation:
h = F.relu(x) # This line *must* be an out-of-place ReLU or it will negatively affect the shortcut connection.
else:
h = x
h = self.conv1(h)
h = self.conv2(self.activation(h))
if self.downsample:
h = self.downsample(h)
return h + self.shortcut(x)
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
"""Architectures for G.
Attention is passed in in the format '32_64' to mean applying an attention
block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64
"""
arch = {}
arch[512] = {'in_channels': [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],
'out_channels': [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],
'upsample': [True] * 7,
'resolution': [8, 16, 32, 64, 128, 256, 512],
'attention': {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3, 10)}}
arch[256] = {'in_channels': [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels': [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample': [True] * 6,
'resolution': [8, 16, 32, 64, 128, 256],
'attention': {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3, 9)}}
arch[128] = {'in_channels': [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels': [ch * item for item in [16, 8, 4, 2, 1]],
'upsample': [True] * 5,
'resolution': [8, 16, 32, 64, 128],
'attention': {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3, 8)}}
arch[64] = {'in_channels': [ch * item for item in [16, 16, 8, 4]],
'out_channels': [ch * item for item in [16, 8, 4, 2]],
'upsample': [True] * 4,
'resolution': [8, 16, 32, 64],
'attention': {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3, 7)}}
arch[32] = {'in_channels': [ch * item for item in [4, 4, 4]],
'out_channels': [ch * item for item in [4, 4, 4]],
'upsample': [True] * 3,
'resolution': [8, 16, 32],
'attention': {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3, 6)}}
return arch
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn', verbose=False,
**kwargs):
super(Generator, self).__init__()
# Channel width mulitplier
self.ch = G_ch
# Dimensionality of the latent space
self.dim_z = dim_z
# The initial spatial dimensions
self.bottom_width = bottom_width
# Resolution of the output
self.resolution = resolution
# Kernel size?
self.kernel_size = G_kernel_size
# Attention?
self.attention = G_attn
# number of classes, for use in categorical conditional generation
self.n_classes = n_classes
# Use shared embeddings?
self.G_shared = G_shared
# Dimensionality of the shared embedding? Unused if not using G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
# Hierarchical latent space?
self.hier = hier
# Cross replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# nonlinearity for residual blocks
self.activation = G_activation
# Initialization style
self.init = G_init
# Parameterization style
self.G_param = G_param
# Normalization style
self.norm_style = norm_style
# Epsilon for BatchNorm?
self.BN_eps = BN_eps
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# fp16?
self.fp16 = G_fp16
# Print out model information during init?
self.verbose = verbose
# Architecture dict
self.arch = G_arch(self.ch, self.attention)[resolution]
# If using hierarchical latents, adjust z
if self.hier:
# Number of places z slots into
self.num_slots = len(self.arch['in_channels']) + 1
self.z_chunk_size = (self.dim_z // self.num_slots)
# Recalculate latent dimensionality for even splitting into chunks
self.dim_z = self.z_chunk_size * self.num_slots
else:
self.num_slots = 1
self.z_chunk_size = 0
# Which convs, batchnorms, and linear layers to use
if self.G_param == 'SN':
self.conv_func = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.linear_func = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.conv_func = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.linear_func = nn.Linear
# We use a non-spectral-normed embedding here regardless;
# For some reason applying SN to G's embedding seems to randomly cripple G
self.embedding_func = nn.Embedding
bn_linear = (functools.partial(self.linear_func, bias=False) if self.G_shared
else self.embedding_func)
self.bn_func = functools.partial(layers.ccbn,
linear_func=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
input_size=(self.shared_dim + self.z_chunk_size if self.G_shared
else self.n_classes),
norm_style=self.norm_style,
eps=self.BN_eps)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.embedding_func(n_classes, self.shared_dim) if G_shared
else nn.Identity())
# First linear layer
self.linear = self.linear_func(self.dim_z // self.num_slots,
self.arch['in_channels'][0] * (self.bottom_width ** 2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
conv_func=self.conv_func,
bn_func=self.bn_func,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
if self.verbose:
print('Adding attention layer in G at resolution {}'.format(self.arch['resolution'][index]))
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.conv_func)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# output layer: batchnorm-relu-conv.
# Consider using a non-spectral conv here
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.conv_func(self.arch['out_channels'][-1], 3))
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
if self.verbose:
print('Using fp16 adam in G...')
from . import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
if self.verbose:
print('Param count for G''s initialized parameters: {}'.format(self.param_count))
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
def forward(self, z, y, embed=False, layer=None):
if embed:
y = self.shared(y)
# If hierarchical, concatenate zs and ys
if self.hier:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
else:
ys = [y] * len(self.blocks)
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
if index == layer: return h
# Second inner loop in case block has multiple layers
for block in blocklist:
h = block(h, ys[index])
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
@property
def z_dim(self):
return self.dim_z
def D_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
"""Discriminator architecture, same paradigm as G's above."""
arch = {}
arch[256] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 8, 16]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],
'downsample': [True] * 6 + [False],
'resolution': [128, 64, 32, 16, 8, 4, 4],
'attention': {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2, 8)}}
arch[128] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 16]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 16, 16]],
'downsample': [True] * 5 + [False],
'resolution': [64, 32, 16, 8, 4, 4],
'attention': {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2, 8)}}
arch[64] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 16]],
'downsample': [True] * 4 + [False],
'resolution': [32, 16, 8, 4, 4],
'attention': {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2, 7)}}
arch[32] = {'in_channels': [3] + [item * ch for item in [4, 4, 4]],
'out_channels': [item * ch for item in [4, 4, 4, 4]],
'downsample': [True, True, False, False],
'resolution': [16, 16, 16, 16],
'attention': {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2, 6)}}
return arch
class Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', verbose=False, **kwargs):
super().__init__()
# Width multiplier
self.ch = D_ch
# Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?
self.D_wide = D_wide
# Resolution
self.resolution = resolution
# Kernel size
self.kernel_size = D_kernel_size
# Attention?
self.attention = D_attn
# Number of classes
self.n_classes = n_classes
# Activation
self.activation = D_activation
# Initialization style
self.init = D_init
# Parameterization style
self.D_param = D_param
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Fp16?
self.fp16 = D_fp16
# Print model info during init?
self.verbose = verbose
# Architecture
self.arch = D_arch(self.ch, self.attention)[resolution]
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right now
if self.D_param == 'SN':
self.conv_func = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.linear_func = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.embedding_func = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
# Prepare model
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[DBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
conv_func=self.conv_func,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
if self.verbose:
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.conv_func)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.linear_func(self.arch['out_channels'][-1], output_dim)
# Embedding for projection discrimination
self.embed = self.embedding_func(self.n_classes, self.arch['out_channels'][-1])
# Initialize weights
if not skip_init:
self.init_weights()
# Set up optimizer
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
if self.verbose:
print('Using fp16 adam in D...')
from . import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
if self.verbose:
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Stick x into h for cleaner for loops without flow control
h = x
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
for block in blocklist:
h = block(h)
# Apply global sum pooling as in SN-GAN
h = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
out = self.linear(h)
# Get projection of final featureset onto class vectors and add to evidence
out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)
return out
class G_D(nn.Module):
"""Parallelized G_D to minimize cross-gpu communication.
Without this, Generator outputs would get all-gathered and then rebroadcast.
"""
def __init__(self, G, D):
super().__init__()
self.G = G
self.D = D
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
split_D=False):
# If training G, enable grad tape
with torch.set_grad_enabled(train_G):
# Get Generator output given noise
G_z = self.G(z, self.G.shared(gy))
# Cast as necessary
if self.G.fp16 and not self.D.fp16:
G_z = G_z.float()
if self.D.fp16 and not self.G.fp16:
G_z = G_z.half()
# Split_D means to run D once with real data and once with fake,
# rather than concatenating along the batch dimension.
if split_D:
D_fake = self.D(G_z, gy)
if x is not None:
D_real = self.D(x, dy)
return D_fake, D_real
else:
if return_G_z:
return D_fake, G_z
else:
return D_fake
# If real data is provided, concatenate it with the Generator's output
# along the batch dimension for improved efficiency.
else:
D_input = torch.cat([G_z, x], 0) if x is not None else G_z
D_class = torch.cat([gy, dy], 0) if dy is not None else gy
# Get Discriminator output
D_out = self.D(D_input, D_class)
if x is not None:
return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
else:
if return_G_z:
return D_out, G_z
else:
return D_out
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'weights')
model_weights = {
'imagenet': {
128: {
'G': os.path.join(root, '100k', 'G.pth'),
'G_ema': os.path.join(root, '100k', 'G_ema.pth'),
'state_dict': os.path.join(root, '100k', 'state_dict.pth'),
}
}
}
root_url = 'http://pretorched-x.csail.mit.edu/gans/BigGAN'
ganocracy_root_url = 'http://ganocracy.csail.mit.edu/models'
tfhub_urls = {
'imagenet': {
128: os.path.join(ganocracy_root_url, 'tfbiggan_128-13f17ff2.pth'),
256: os.path.join(ganocracy_root_url, 'tfbiggan_256-a4cf3382.pth'),
512: os.path.join(ganocracy_root_url, 'tfbiggan_512-447bfb81.pth'),
}
}
model_urls = {
'places365': {
128: {
96: {
'D': os.path.join(root_url, 'biggan128_D_places365-8afb2a4d.pth'),
'G': os.path.join(root_url, 'biggan128_G_places365-43cd58c0.pth'),
'G_ema': os.path.join(root_url, 'biggan128_G_ema_places365-78c78abe.pth'),
'state_dict': os.path.join(root_url, 'biggan128_state_dict_places365-3d39f6bb.pth')
}},
256: {
96: {
'D': os.path.join(root_url, 'biggan256_D_ch96_places365-44bf2902.pth'),
'G': os.path.join(root_url, 'biggan256_G_ch96_places365-5adac787.pth'),
'G_ema': os.path.join(root_url, 'biggan256_G_ema_ch96_places365-ac277771.pth'),
'state_dict': os.path.join(root_url, 'biggan256_state_dict_ch96_places365-b4f6daf6.pth'),
},
128: {
'D': os.path.join(root_url, 'biggan256_D_ch128_places365-a6f7d3b6.pth'),
'G': os.path.join(root_url, 'biggan256_G_ch128_places365-47f6e48c.pth'),
'G_ema': os.path.join(root_url, 'biggan256_G_ema_ch128_places365-6fb66feb.pth'),
'state_dict': os.path.join(root_url, 'biggan256_state_dict_ch128_places365-7594847d.pth'),
}},
},
'imagenet': {
128: {
96: {
'D': os.path.join(root_url, 'biggan128_D_imagenet-9fd72e50.pth'),
'G': os.path.join(root_url, 'biggan128_G_imagenet-94e0b761.pth'),
'G_ema': os.path.join(root_url, 'biggan128_G_ema_imagenet-c9706dfb.pth'),
'state_dict': os.path.join(root_url, 'biggan128_state_dict_imagenet-4aad5089.pth'),
}},
}
}
def BigGAN(resolution=256, pretrained='imagenet', load_ema=True, tfhub=True, ch=128):
if resolution == 128:
# Set default for now.
ch = 96
attn = {128: '64', 256: '128', 512: '64'}
dim_z = {128: 120, 256: 140, 512: 128}
config = {
'G_param': 'SN', 'D_param': 'SN',
'G_ch': 96, 'D_ch': 96,
'D_wide': True, 'G_shared': True,
'shared_dim': 128, 'dim_z': dim_z[resolution],
'hier': True, 'cross_replica': False,
'mybn': False, 'G_activation': nn.ReLU(inplace=True),
'G_attn': attn[resolution],
'norm_style': 'bn',
'G_init': 'ortho', 'skip_init': True, 'no_optim': True,
'G_fp16': False, 'G_mixed_precision': False,
'accumulate_stats': False, 'num_standing_accumulations': 16,
'G_eval_mode': True,
'BN_eps': 1e-04, 'SN_eps': 1e-04,
'num_G_SVs': 1, 'num_G_SV_itrs': 1, 'resolution': resolution,
'n_classes': 1000
}
version = 'G_ema' if load_ema else 'G'
if tfhub and pretrained == 'imagenet':
url = tfhub_urls[pretrained][resolution]
weights = torch.hub.load_state_dict_from_url(url)
G = Generator(**config)
G.load_state_dict(weights, strict=False)
G.eval()
return G
elif pretrained is not None:
url = model_urls[pretrained][resolution][ch][version]
sd_url = model_urls[pretrained][resolution][ch]['state_dict']
weights = torch.hub.load_state_dict_from_url(url)
state_dict = torch.hub.load_state_dict_from_url(sd_url)
G = Generator(**state_dict['config'])
G.load_state_dict(weights, strict=False)
G.eval()
return G
G = Generator(**config)
return G
def fix_class(G, y):
f = G.forward
def forward(self, z):
bs = z.size(0)
c = y * torch.ones(bs, device=z.device).long()
return f(z, c, embed=True)
setattr(G.__class__, 'forward', forward)
return G
| 43.901592 | 123 | 0.548688 |
06c38ca5a3d50d9e9dfbe617e18a755385e83173 | 3,267 | py | Python | src/primaires/pnj/commandes/pspawn/__init__.py | vlegoff/tsunami | 36b3b974f6eefbf15cd5d5f099fc14630e66570b | [
"BSD-3-Clause"
] | 14 | 2015-08-21T19:15:21.000Z | 2017-11-26T13:59:17.000Z | src/primaires/pnj/commandes/pspawn/__init__.py | vincent-lg/tsunami | 36b3b974f6eefbf15cd5d5f099fc14630e66570b | [
"BSD-3-Clause"
] | 20 | 2015-09-29T20:50:45.000Z | 2018-06-21T12:58:30.000Z | src/primaires/pnj/commandes/pspawn/__init__.py | vlegoff/tsunami | 36b3b974f6eefbf15cd5d5f099fc14630e66570b | [
"BSD-3-Clause"
] | 3 | 2015-05-02T19:42:03.000Z | 2018-09-06T10:55:00.000Z | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'pspawn'.
"""
from primaires.interpreteur.commande.commande import Commande
class CmdPspawn(Commande):
"""Commande 'pspawn'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "pspawn", "pspawn")
self.groupe = "administrateur"
self.nom_categorie = "batisseur"
self.schema = "(<nombre>) <ident_prototype_pnj>"
self.aide_courte = "fait apparaître un PNJ dans la salle"
self.aide_longue = \
"Cette commande permet de faire apparaître un PNJ dans " \
"la salle où vous vous trouvez. Elle prend en paramètre " \
"obligatoire le prototype depuis lequel créer le PNJ."
def interpreter(self, personnage, dic_masques):
"""Interprétation de la commande"""
prototype = dic_masques["ident_prototype_pnj"].prototype
salle = personnage.salle
nb_pnj = 1
if prototype.squelette is None:
personnage << "|err|Ce prototype n'a aucun squelette défini.|ff|"
return
if dic_masques["nombre"] is not None:
nb_pnj = dic_masques["nombre"].nombre
i = 0
while i < nb_pnj:
pnj = type(self).importeur.pnj.creer_PNJ(prototype)
pnj.salle = salle
i += 1
salle.envoyer("{} apparaissent du néant.".format(
prototype.get_nom(nb_pnj)))
else:
pnj = type(self).importeur.pnj.creer_PNJ(prototype)
pnj.salle = salle
salle.envoyer("{} apparaît du néant.".format(
pnj.nom_singulier))
| 41.884615 | 79 | 0.676155 |
1f15f325c10419b6635c94e464dad42538b22998 | 87 | py | Python | ComRISB/pygtool/gs_ml.py | comscope/comsuite | d51c43cad0d15dc3b4d1f45e7df777cdddaa9d6c | [
"BSD-3-Clause"
] | 18 | 2019-06-15T18:08:21.000Z | 2022-01-30T05:01:29.000Z | ComRISB/pygtool/gs_ml.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | null | null | null | ComRISB/pygtool/gs_ml.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | 11 | 2019-06-05T02:57:55.000Z | 2021-12-29T02:54:25.000Z | #!/usr/bin/env python
from pyglib.gsolver.gs_ml_v2 import driver_gs_ml
driver_gs_ml()
| 17.4 | 48 | 0.804598 |
b9eb6dc2a8a3d52943c35a49f2065c09bd0cd92d | 2,298 | py | Python | networks/carn_arch.py | xyzhu1/EMASRN | a0f1dc9b78ba814a8b7e3291a0625598cf6c7609 | [
"MIT"
] | 19 | 2020-09-18T08:49:19.000Z | 2022-03-02T01:56:48.000Z | networks/carn_arch.py | zhuxyme/EMASRN | d92c12d37797514aaa8024168d5b96fb21d26f15 | [
"MIT"
] | 1 | 2021-12-06T12:59:47.000Z | 2021-12-06T12:59:47.000Z | networks/carn_arch.py | zhuxyme/EMASRN | d92c12d37797514aaa8024168d5b96fb21d26f15 | [
"MIT"
] | 8 | 2020-09-23T02:32:40.000Z | 2022-01-18T15:18:40.000Z | import torch
import torch.nn as nn
import networks.carn_ops as ops
class Block(nn.Module):
def __init__(self,
in_channels, out_channels,
group=1):
super(Block, self).__init__()
self.b1 = ops.ResidualBlock(64, 64)
self.b2 = ops.ResidualBlock(64, 64)
self.b3 = ops.ResidualBlock(64, 64)
self.c1 = ops.BasicBlock(64 * 2, 64, 1, 1, 0)
self.c2 = ops.BasicBlock(64 * 3, 64, 1, 1, 0)
self.c3 = ops.BasicBlock(64 * 4, 64, 1, 1, 0)
def forward(self, x):
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
return o3
class CARN(nn.Module):
def __init__(self):
super(CARN, self).__init__()
scale = 2
multi_scale = False
group = 1
self.sub_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=True)
self.add_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=False)
self.entry = nn.Conv2d(3, 64, 3, 1, 1)
self.b1 = Block(64, 64)
self.b2 = Block(64, 64)
self.b3 = Block(64, 64)
self.c1 = ops.BasicBlock(64 * 2, 64, 1, 1, 0)
self.c2 = ops.BasicBlock(64 * 3, 64, 1, 1, 0)
self.c3 = ops.BasicBlock(64 * 4, 64, 1, 1, 0)
self.upsample = ops.UpsampleBlock(64, scale=scale,
multi_scale=multi_scale,
group=group)
self.exit = nn.Conv2d(64, 3, 3, 1, 1)
def forward(self, x):
scale=2
x = self.sub_mean(x)
x = self.entry(x)
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
out = self.upsample(o3, scale=scale)
out = self.exit(out)
out = self.add_mean(out)
return out | 27.035294 | 75 | 0.474326 |
0527809f637e7960647774d14659f7c40f425019 | 12,360 | py | Python | skrf/media/circularWaveguide.py | dxxx9/scikit-rf | 8bbb23ff5ef25ae054cfd45b4558cbdfce3378a7 | [
"BSD-3-Clause"
] | 379 | 2015-01-25T12:19:19.000Z | 2022-03-29T14:01:07.000Z | skrf/media/circularWaveguide.py | jhillairet/scikit-rf | 060257568f3c0bdcc817e89742ec29445df81a09 | [
"BSD-3-Clause"
] | 456 | 2015-01-06T19:15:55.000Z | 2022-03-31T06:42:57.000Z | skrf/media/circularWaveguide.py | jhillairet/scikit-rf | 060257568f3c0bdcc817e89742ec29445df81a09 | [
"BSD-3-Clause"
] | 211 | 2015-01-06T17:14:06.000Z | 2022-03-31T01:36:00.000Z | """
circularWaveguide (:mod:`skrf.media.circularWaveguide`)
================================================================
.. autosummary::
:toctree: generated/
CircularWaveguide
"""
from scipy.constants import epsilon_0, mu_0, pi
from scipy.special import jn_zeros, jnp_zeros
from numpy import sqrt, where
import numpy as npy
from .media import Media
from ..data import materials
from .freespace import Freespace
from ..constants import NumberLike
from typing import Union, TYPE_CHECKING
if TYPE_CHECKING:
from .. frequency import Frequency
class CircularWaveguide(Media):
r"""
A single mode of a homogeneously filled Circular Waveguide
Represents a single mode of a homogeneously filled circular
waveguide of cross-section `r^2 pi`. The mode is determined by
`mode-type` (`'te'` or `'tm'`) and mode indices ( `m` and `n` ).
Corrugated circular waveguides, which also support HE modes, are not
supported.
==================================== ============= ===============
Quantity Symbol Variable
==================================== ============= ===============
Characteristic Wave Number :math:`k_0` :attr:`k0`
Cut-off Wave Number :math:`k_c` :attr:`kc`
Longitudinal Wave Number :math:`k_z` :attr:`gamma`
Transverse Wave Number (a) :math:`k_x` :attr:`kx`
Transverse Wave Number (b) :math:`k_y` :attr:`ky`
Characteristic Impedance :math:`z_0` :attr:`z0`
==================================== ============= ===============
Parameters
----------
frequency : :class:`~skrf.frequency.Frequency` object
frequency band of this transmission line medium
z0 : number, array-like, or None
the port impedance for media. Only needed if it's different
from the characteristic impedance of the transmission
line. if z0 is None then will default to Z0
r : number
radius of the waveguide, in meters.
mode_type : ['te','tm']
mode type, transverse electric (te) or transverse magnetic
(tm) to-z. where z is direction of propagation
m : int
mode index in 'phi'-direction, the azimuthal index
n : int
mode index in 'r'-direction, the radial index
ep_r : number, array-like,
filling material's relative permittivity
mu_r : number, array-like
filling material's relative permeability
rho : number, array-like, string
resistivity (ohm-m) of the conductor walls. If array-like
must be same length as frequency. if str, it must be a key in
`skrf.data.materials`.
\*args, \*\*kwargs : arguments, keyword arguments
passed to :class:`~skrf.media.media.Media`'s constructor
(:func:`~skrf.media.media.Media.__init__`
Examples
--------
In the following example an ideal waveguide of 2.39 mm diameter is
constructed for the high W band, operated in the fundamental TE11 mode.
If no conductivity is provided the walls are treated as perfect
electric conductors.
>>> freq = rf.Frequency(88, 110, 101, 'ghz')
>>> rf.CircularWaveguide(freq, r=0.5 * 2.39e-3)
"""
def __init__(self, frequency: Union['Frequency', None] = None,
z0: Union[NumberLike, None] = None,
r: NumberLike = 1,
mode_type: str = 'te', m: int = 1, n: int = 1,
ep_r: NumberLike = 1, mu_r: NumberLike = 1,
rho: Union[NumberLike, str, None] = None,
*args, **kwargs):
Media.__init__(self, frequency=frequency,z0=z0)
if mode_type.lower() not in ['te','tm']:
raise ValueError('mode_type must be either \'te\' or \'tm\'')
self.r = r
self.mode_type = mode_type.lower()
self.m = m
self.n = n
self.ep_r = ep_r
self.mu_r = mu_r
self.rho = rho
def __str__(self) -> str:
f=self.frequency
output = \
'Circular Waveguide Media. %i-%i %s. %i points'%\
(f.f_scaled[0], f.f_scaled[-1], f.unit, f.npoints) + \
'\n r= %.2em'% \
(self.r)
return output
def __repr__(self) -> str:
return self.__str__()
@classmethod
def from_Z0(cls, frequency: 'Frequency', Z0: NumberLike,
f: NumberLike, ep_r: NumberLike = 1, mu_r: NumberLike = 1,
**kwargs):
r"""
Initialize from specified impedance at a given frequency, assuming the
fundamental TE11 mode.
Parameters
----------
frequency : Frequency Object
Z0 : number /array
characteristic impedance to create at `f`
f : number
frequency (in Hz) at which the resultant waveguide has the
characteristic impedance Z0
ep_r : number, array-like,
filling material's relative permittivity
mu_r : number, array-like
filling material's relative permeability
\*\*kwargs : arguments, keyword arguments
passed to :class:`~skrf.media.media.Media`'s constructor
(:func:`~skrf.media.media.Media.__init__`
"""
mu = mu_0*mu_r
ep = epsilon_0*ep_r
w = 2*pi*f
# if self.mode_type =="te":
u = jnp_zeros(1, 1)[-1]
r =u/(w*mu) * 1./sqrt((1/(Z0*1j)**2+ep/mu))
kwargs.update(dict(frequency=frequency, r=r, m=1, n=1, ep_r=ep_r, mu_r=mu_r))
return cls(**kwargs)
@property
def ep(self) -> NumberLike:
"""
The permativity of the filling material.
Returns
-------
ep : number
filling material's relative permittivity
"""
return self.ep_r * epsilon_0
@property
def mu(self) -> NumberLike:
"""
The permeability of the filling material.
Returns
-------
mu : number
filling material's relative permeability
"""
return self.mu_r * mu_0
@property
def k0(self) -> NumberLike:
r"""
Characteristic wave number.
.. math::
k_0 = \omega \sqrt{\varepsilon \mu}
Returns
-------
k0 : number
characteristic wave number
"""
return 2*pi*self.frequency.f*sqrt(self.ep * self.mu)
@property
def kc(self) -> NumberLike:
r"""
Cut-off wave number.
Defined as
.. math::
k_c = \frac{u_{mn}}{R}
where R is the radius of the waveguide, and u_mn is:
* the n-th root of the m-th Bessel function for 'tm' mode
* the n-th root of the Derivative of the m-th Bessel function for 'te' mode.
Returns
-------
kc : number
cut-off wavenumber
"""
if self.mode_type =="te":
u = jnp_zeros(self.m, self.n)[-1]
elif self.mode_type =="tm":
u = jn_zeros(self.m,self.n)[-1]
return u/self.r
@property
def f_cutoff(self) -> NumberLike:
r"""
cutoff frequency for this mode
.. math::
f_c = \frac{v}{2 \pi} \frac{u_{mn}}{R}
where R is the radius of the waveguide, and u_mn is:
* the n-th root of the m-th Bessel function for 'tm' mode
* the n-th root of the Derivative of the m-th Bessel function for 'te' mode.
and v= 1/sqrt(ep*mu) is the bulk velocity inside the filling material.
"""
v = 1/sqrt(self.ep*self.mu)
return v* self.kc/(2*npy.pi)
@property
def f_norm(self) -> NumberLike:
"""
frequency vector normalized to cutoff
"""
return self.frequency.f/self.f_cutoff
@property
def rho(self) -> NumberLike:
"""
conductivity of sidewalls in ohm*m
Parameters
--------------
val : float, array-like or str
the conductivity in ohm*m. If array-like must be same length
as self.frequency. if str, it must be a key in
`skrf.data.materials`.
Examples
---------
>>> wg.rho = 2.8e-8
>>> wg.rho = 2.8e-8 * ones(len(wg.frequency))
>>> wg.rho = 'al'
>>> wg.rho = 'aluminum'
"""
# if self.roughness != None:
# delta = skin_depth(self.frequency.f, self._rho, self.mu_r)
# k_w = 1. +exp(-(delta/(2*self.roughness))**1.6)
# return self._rho*k_w**2
return self._rho
@rho.setter
def rho(self, val):
if isinstance(val, str):
self._rho = materials[val.lower()]['resistivity(ohm*m)']
else:
self._rho=val
@property
def lambda_guide(self) -> NumberLike:
r"""
Guide wavelength.
.. math::
\lambda_g = 2\pi/\beta
the distance in which the phase of the field increases by 2 pi.
"""
return 2*pi/self.beta
@property
def lambda_cutoff(self) -> NumberLike:
r"""
Cutoff wavelength.
.. math::
\lambda_c = v/f_c
where v= 1/sqrt(ep*mu)
"""
v = 1/sqrt(self.ep*self.mu)
return v/self.f_cutoff
@property
def gamma(self) -> NumberLike:
r"""
The propagation constant (aka Longitudinal wave number)
Defined as
.. math::
k_z = \pm j \sqrt {k_0^2 - k_c^2}
This is:
* IMAGINARY for propagating modes
* REAL for non-propagating modes,
Returns
-------
gamma : number
The propagation constant
"""
# This also holds for the circular waveguide
## haringtons form
if False: #self.m==1 and self.n==0:
fs = Freespace(frequency=self.frequency,
ep_r=self.ep_r,
mu_r=self.mu_r)
g= where(self.f_norm>1.,
sqrt(1-self.f_norm**(-2))*fs.gamma, # cutton
-1j*sqrt(1-self.f_norm**(2))*fs.gamma)# cutoff
else:
# TODO: fix this for lossy ep/mu (remove abs?)
k0,kc = self.k0, self.kc
g= 1j*sqrt(abs(k0**2 - kc**2)) * (k0>kc) +\
sqrt(abs(kc**2- k0**2))*(k0<kc) + \
0*(kc==k0)
g = g + self.alpha_c *(self.rho is not None)
return g
@property
def alpha_c(self) -> NumberLike:
"""
Loss due to finite conductivity of the sidewalls for the fundamental mode TE11. Higher order
modes are not implemented, as well as effects due to surface roughness.
In units of Np/m
See property `rho` for setting conductivity.
Effects of finite conductivity are taken from [#]_, but expressed in the same terms as in [#]_.
References
----------
.. [#] Eq. (3.133), Chapter 3.4, Microwave Engineering, Pozar David, 2011
.. [#] Eq. (9.8.1), Chapter 9, Electromagnetic Waves and Antennas by Sophocles J. Orfanidis
http://eceweb1.rutgers.edu/~orfanidi/ewa/
See Also
--------
rho
"""
# TODO: Generalize to higher order modes
if (self.mode_type != "te") or (self.m != 1) or (self.n != 1):
raise NotImplementedError
if self.rho is None:
return 0
r, w, ep, rho, f_n = self.r, self.frequency.w, self.ep, \
self.rho, self.f_norm
u= self.kc*r
return 1./r * sqrt( (w*ep)/(2./rho) ) * ( (1/f_n)**2 + 1/(u**2 - 1) ) \
/sqrt(1-(1/f_n)**2)
@property
def Z0(self) -> NumberLike:
"""
The characteristic impedance.
"""
omega = self.frequency.w
impedance_dict = {'te': 1j*omega*self.mu/(self.gamma),
'tm': -1j*self.gamma/(omega*self.ep),\
}
return impedance_dict[self.mode_type] | 30.44335 | 103 | 0.521117 |
d51da784719b9971a212649528c060503f9b8225 | 1,145 | py | Python | Easy/Que100.py | HuangZengPei/LeetCode | d2b8a1dfe986d71d02d2568b55bad6e5b1c81492 | [
"MIT"
] | 2 | 2019-11-20T14:05:27.000Z | 2019-11-20T14:05:28.000Z | Easy/Que100.py | HuangZengPei/LeetCode | d2b8a1dfe986d71d02d2568b55bad6e5b1c81492 | [
"MIT"
] | null | null | null | Easy/Que100.py | HuangZengPei/LeetCode | d2b8a1dfe986d71d02d2568b55bad6e5b1c81492 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# 不能单独用前序遍历,[1,2]以及[1,null,2]结果相同
def preOrder(self,p,list):
if p == None:
return
else:
list.append(p.val)
self.preOrder(p.left,list)
self.preOrder(p.right,list)
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
list1 = []
list2 = []
self.preOrder(p,list1)
self.preOrder(q,list2)
return list1 == list2
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
if not p and not q:
return True
if not p or not q:
return False
if p.val != q.val:
return False
else:
return self.isSameTree(p.left,q.left) and self.isSameTree(p.right,q.right) | 26.022727 | 86 | 0.464629 |
ef72a06ada490805daac90b4aa87656bee7afee1 | 650 | py | Python | pysbolgraph/S2ProvUsage.py | zhfanrui/pysbolgraph | c4914705bd9b22a2b69db0fc4d43049fcb07ad17 | [
"BSD-2-Clause"
] | 4 | 2018-06-29T10:43:08.000Z | 2019-03-27T22:33:33.000Z | pysbolgraph/S2ProvUsage.py | zhfanrui/pysbolgraph | c4914705bd9b22a2b69db0fc4d43049fcb07ad17 | [
"BSD-2-Clause"
] | 14 | 2019-01-22T16:03:12.000Z | 2019-11-11T19:05:32.000Z | pysbolgraph/S2ProvUsage.py | zhfanrui/pysbolgraph | c4914705bd9b22a2b69db0fc4d43049fcb07ad17 | [
"BSD-2-Clause"
] | 12 | 2018-07-01T10:59:37.000Z | 2021-03-01T08:48:20.000Z |
from .S2Identified import S2Identified
from .terms import Prov
from .terms import SBOL2
from rdflib import URIRef
from rdflib.namespace import RDF
class S2ProvUsage(S2Identified):
def __init__(self, g, uri):
super(S2ProvUsage, self).__init__(g, uri)
@property
def entity(self):
return self.get_identified_property(Prov.entity)
@entity.setter
def entity(self, entity):
self.set_identified_property(Prov.entity, entity)
@property
def role(self):
return self.get_uri_property(Prov.hadRole)
@role.setter
def role(self, role):
self.set_uri_property(Prov.hadRole, role)
| 20.967742 | 57 | 0.701538 |
031a3d34cb4e54b14cb038a2d0f88bb00a191756 | 305 | py | Python | src/xdist/__init__.py | kianmeng/pytest-xdist | 290b322a5d48290397ad698fc1dcb729cbe62e07 | [
"MIT"
] | 883 | 2015-09-01T22:41:20.000Z | 2022-03-30T22:32:43.000Z | src/xdist/__init__.py | kianmeng/pytest-xdist | 290b322a5d48290397ad698fc1dcb729cbe62e07 | [
"MIT"
] | 623 | 2015-09-02T00:06:07.000Z | 2022-03-31T11:40:44.000Z | src/xdist/__init__.py | kianmeng/pytest-xdist | 290b322a5d48290397ad698fc1dcb729cbe62e07 | [
"MIT"
] | 190 | 2015-09-01T18:56:08.000Z | 2022-03-25T17:50:56.000Z | from xdist.plugin import (
is_xdist_worker,
is_xdist_master,
get_xdist_worker_id,
is_xdist_controller,
)
from xdist._version import version as __version__
__all__ = [
"__version__",
"is_xdist_worker",
"is_xdist_master",
"is_xdist_controller",
"get_xdist_worker_id",
]
| 19.0625 | 49 | 0.714754 |
4c45351b800bd72bfa76522864c7dd66725bab61 | 2,954 | py | Python | keras_transformer/keras_transformer/core/encoder_decoder/DecoderBlock.py | erelcan/keras-transformer | ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f | [
"Apache-2.0"
] | 3 | 2021-02-14T17:10:59.000Z | 2021-02-14T18:09:17.000Z | keras_transformer/keras_transformer/core/encoder_decoder/DecoderBlock.py | erelcan/keras-transformer | ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f | [
"Apache-2.0"
] | null | null | null | keras_transformer/keras_transformer/core/encoder_decoder/DecoderBlock.py | erelcan/keras-transformer | ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f | [
"Apache-2.0"
] | null | null | null | from keras.layers import Layer
from keras_transformer.core.encoder_decoder.sub_layers.SelfAttentionSublayer import SelfAttentionSublayer
from keras_transformer.core.encoder_decoder.sub_layers.PositionWiseFeedForwardSublayer import PositionWiseFeedForwardSublayer
class DecoderBlock(Layer):
def __init__(self, d_model, attention_info, pff_info, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self._d_model = d_model
self._attention_info = attention_info
self._pff_info = pff_info
# masked_self_attention masks the future for each query~. Hence, using left_context_mask.
self._masked_self_attention = SelfAttentionSublayer(self._attention_info["head_num"], "left_context_mask", self._attention_info["dropout_rate"])
self._self_attention = SelfAttentionSublayer(self._attention_info["head_num"], None, self._attention_info["dropout_rate"])
self._position_wise_feed_forward = PositionWiseFeedForwardSublayer(self._d_model, self._pff_info["inner_length"], self._pff_info["dropout_rate"])
def call(self, inputs, mask=None, **kwargs):
# Assuming that inputs is a tensor which will be copied to Q, K and V in multi-head attention.
# We may customize this for taking a list of tensors representing Q, K and V which will be directly mapped
# in multi-head attention.
# input: [encoder_output, decoder_input]
# mask: [encoder_output_mask, decoder_mask]
encoder_output, decoder_input = inputs
encoder_output_mask, decoder_mask = mask
# In sublayer2, encoder_output is used for keys and queries.
sublayer1_output = self._masked_self_attention(decoder_input, mask=decoder_mask)
sublayer2_output = self._self_attention([sublayer1_output, encoder_output, encoder_output], [decoder_mask, encoder_output_mask, encoder_output_mask])
output = self._position_wise_feed_forward(sublayer2_output, mask=decoder_mask)
return output
def compute_output_shape(self, input_shape):
return input_shape[1][0], input_shape[1][1], self._d_model
def compute_mask(self, inputs, mask=None):
# The input mask is [encoder_output_mask, decoder_mask]
# This part is tricky:
# If we want to explicitly manage the encoder and decoder masks outside; this method should have no effect.
# Depending on the usage, we may need to return both masks or just the one on the decoder output/sequence.
# For now, we return the decoder_mask only; for next decoder in decoder layer, mask will be prepared by
# the DecoderStack.
return mask[1]
def get_config(self):
config = {
"d_model": self._d_model,
"attention_info": self._attention_info,
"pff_info": self._pff_info
}
base_config = super().get_config()
config.update(base_config)
return config
| 48.42623 | 157 | 0.721733 |
2dd69fb5fb52e08a04fd1621a2a947048539e632 | 2,730 | py | Python | tools/lint.py | mvaisakh/art-testing | 89dab669dca6d768a79101b1e480794fa46012d5 | [
"Apache-2.0"
] | null | null | null | tools/lint.py | mvaisakh/art-testing | 89dab669dca6d768a79101b1e480794fa46012d5 | [
"Apache-2.0"
] | null | null | null | tools/lint.py | mvaisakh/art-testing | 89dab669dca6d768a79101b1e480794fa46012d5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import os
import subprocess
import sys
import utils
def BuildOptions():
parser = argparse.ArgumentParser(
description = "Lint the java code in the repository.",
# Print default values.
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--jobs', '-j', metavar='N', type=int, nargs='?',
default=multiprocessing.cpu_count(),
help='Lint using N jobs')
return parser.parse_args()
def GetJavaFiles():
java_files = []
for dir_java_files in [utils.dir_framework, utils.dir_benchmarks]:
for root, dirs, files in os.walk(dir_java_files):
files = map(lambda x : os.path.join(root, x), files)
java_files += [f for f in files if f.endswith('.java')]
java_files.sort()
return java_files
def Lint(filename):
command = \
[os.path.join(utils.dir_tools, 'checkstyle', 'checkstyle'), filename]
print(' '.join(command))
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
rc = process.wait()
if rc != 0:
print(out)
return rc
def EnsureCheckstyleAvailable():
# Run the checkstyle script once to ensure the checkstyle jar file as
# available.
p = subprocess.check_output([os.path.join(utils.dir_tools, 'checkstyle', 'checkstyle')])
def LintFiles(files, jobs = 1):
EnsureCheckstyleAvailable()
pool = multiprocessing.Pool(jobs)
# The '.get(9999999)' is workaround to allow killing the test script with
# ctrl+C from the shell. This bug is documented at
# http://bugs.python.org/issue8296.
try:
results = pool.map_async(Lint, files).get(9999999)
pool.close()
pool.join()
except KeyboardInterrupt:
pool.terminate()
sys.exit(1)
n_incorrectly_formatted_files = sum(results)
return n_incorrectly_formatted_files
if __name__ == "__main__":
args = BuildOptions()
rc = LintFiles(GetJavaFiles(), args.jobs)
sys.exit(rc)
| 30.674157 | 90 | 0.675824 |
565c3fc7bbc6b0d7e26084a7a3226b3a2e8db8f9 | 1,534 | py | Python | build_site.py | devinbrady/openanc | 5e69dceb08120dd80a902fdfcaaf1a2a9947afaa | [
"CC0-1.0"
] | 4 | 2020-08-04T20:55:18.000Z | 2022-03-14T23:18:47.000Z | build_site.py | devinbrady/openanc | 5e69dceb08120dd80a902fdfcaaf1a2a9947afaa | [
"CC0-1.0"
] | 12 | 2020-07-16T17:46:12.000Z | 2022-01-24T22:02:07.000Z | build_site.py | devinbrady/openanc | 5e69dceb08120dd80a902fdfcaaf1a2a9947afaa | [
"CC0-1.0"
] | 1 | 2020-08-04T02:59:24.000Z | 2020-08-04T02:59:24.000Z | """
All steps necessary to build OpenANC pages
"""
import argparse
from datetime import datetime
from scripts.refresh_data import RefreshData
from scripts.index import BuildIndex
from scripts.districts import BuildDistricts
from scripts.ancs import BuildANCs
from scripts.wards import BuildWards
start_time = datetime.now()
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--refresh-data', action='store_true', help='Refresh local CSVs from Google Sheets')
parser.add_argument('-i', '--build-index', action='store_true', help='Build index and other top-level pages')
parser.add_argument('-w', '--build-wards', action='store_true', help='Build page for each Ward')
parser.add_argument('-a', '--build-ancs', action='store_true', help='Build page for each ANC')
parser.add_argument('-d', '--build-districts', action='store_true', help='Build page for each SMD')
args = parser.parse_args()
if args.refresh_data:
r = RefreshData()
r.run()
if args.build_index:
bi = BuildIndex()
bi.run()
if args.build_wards:
bw = BuildWards()
bw.run()
if args.build_ancs:
ba = BuildANCs()
ba.run()
if args.build_districts:
bd = BuildDistricts()
bd.run()
if not any([args.refresh_data, args.build_index, args.build_wards, args.build_ancs, args.build_districts]):
print('No arguments provided to build_site script, exiting.')
end_time = datetime.now()
time_elapsed = end_time - start_time
seconds_elapsed = time_elapsed.total_seconds()
print(f'build_site: {seconds_elapsed:.1f} seconds') | 28.943396 | 110 | 0.736636 |
89c567a4b46e20dafe52b34e4a3eb8118180fb68 | 1,646 | py | Python | promoterz/representation/oldschool.py | emillj/gekkoJaponicus | d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7 | [
"MIT"
] | null | null | null | promoterz/representation/oldschool.py | emillj/gekkoJaponicus | d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7 | [
"MIT"
] | null | null | null | promoterz/representation/oldschool.py | emillj/gekkoJaponicus | d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7 | [
"MIT"
] | 1 | 2021-11-29T20:18:25.000Z | 2021-11-29T20:18:25.000Z | #!/bin/python
import random
import json
import os
from copy import deepcopy
from deap import base
from deap import creator
from deap import tools
from deap import algorithms
import numpy as np
from .. import functions
def constructPhenotype(stratSettings, individue):
# THIS FUNCTION IS UGLYLY WRITTEN; USE WITH CAUTION;
# (still works :})
Strategy = individue.Strategy
R = lambda V, lim: ((lim[1]-lim[0])/100) * V + lim[0]
AttributeNames = sorted(list(stratSettings.keys()))
Phenotype = {}
for K in range(len(AttributeNames)):
Value = R(individue[K], stratSettings[AttributeNames[K]])
Phenotype[AttributeNames[K]] = Value
Phenotype = functions.expandNestedParameters(Phenotype)
return Phenotype
def createRandomVarList(IndSize):
VAR_LIST = [random.randrange(0,100) for x in range(IndSize)]
return VAR_LIST
def initInd(Criterion, Attributes):
w = Criterion()
IndSize =len(list(Attributes.keys()))
w[:] = createRandomVarList(IndSize)
return w
def getToolbox(Strategy, genconf, Attributes):
toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list,
fitness=creator.FitnessMax, Strategy=Strategy)
toolbox.register("newind", initInd, creator.Individual, Attributes)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutUniformInt, low=10, up=10, indpb=0.2)
toolbox.register("constructPhenotype", constructPhenotype, Attributes)
return toolbox
| 27.433333 | 77 | 0.709599 |
924fba0647ef5146df4f41163e8bc74a3044652e | 2,054 | py | Python | mm2d/core/backend/pymunk/body.py | adamheins/mm2d | 9a7b6ef63998757172c3100b916621a25d165d65 | [
"MIT"
] | null | null | null | mm2d/core/backend/pymunk/body.py | adamheins/mm2d | 9a7b6ef63998757172c3100b916621a25d165d65 | [
"MIT"
] | null | null | null | mm2d/core/backend/pymunk/body.py | adamheins/mm2d | 9a7b6ef63998757172c3100b916621a25d165d65 | [
"MIT"
] | null | null | null | import pymunk
from mm2d.shapes import Circle, Segment, Polygon
from mm2d.body import Fixture, Body
from mm2d.spatial import Transform, Twist, State
from .util import transform_to_pymunk
def shape_to_pymunk(shape) -> pymunk.Shape:
if isinstance(shape, Circle):
pymunk_shape = pymunk.Circle(radius=shape.radius, offset=shape.position)
elif isinstance(shape, Segment):
pymunk_shape = pymunk.Segment(a=shape.a, b=shape.b, radius=shape.radius)
elif isinstance(shape, Polygon):
pymunk_transform = None
if shape.transform is not None:
pymunk_transform = transform_to_pymunk(shape.transform)
pymunk_shape = pymunk.Poly(
vertices=shape.vertices, radius=shape.radius, transform=pymunk_transform
)
else:
raise TypeError(f"{shape} is not a valid shape.")
return pymunk_shape
def fixture_to_pymunk(fixture: Fixture) -> pymunk.Shape:
pymunk_shape = shape_to_pymunk(fixture.shape)
pymunk_shape.mass = fixture.mass
pymunk_shape.friction = fixture.friction
pymunk_shape.elasticity = fixture.elasticity
return pymunk_shape
def body_to_pymunk(body: Body) -> pymunk.Body:
pymunk_body = pymunk.Body()
pymunk_shapes = []
for fixture in body.fixtures:
pymunk_shape = fixture_to_pymunk(fixture)
pymunk_shape.body = pymunk_body
pymunk_shapes.append(pymunk_shape)
# TODO not sure if I'm missing something with setting position and angle -
# should I only do this after adding shapes or something?
pymunk_body.position = body.pose.position
pymunk_body.angle = body.pose.angle
return pymunk_body, pymunk_shapes
class PymunkBodyBackend:
"""Rigid body backend using pymunk."""
def __init__(self, body):
self.pymunk_body, self.pymunk_shapes = body_to_pymunk(body)
def state(self):
pose = Transform(self.pymunk_body.position, self.pymunk_body.angle)
twist = Twist(self.pymunk_body.velocity, self.pymunk_body.angular_velocity)
return State(pose, twist)
| 33.672131 | 84 | 0.717137 |
77e965477a37e700e6be14defbffca1b870271f9 | 2,341 | py | Python | binary_search_tree.py | ValtersJZ/LearningDataStructuresAndAlgorithms | d608494cc6a3bba4b3a91a9469c59de077fcf36f | [
"MIT"
] | 1 | 2019-07-10T10:44:09.000Z | 2019-07-10T10:44:09.000Z | binary_search_tree.py | ValtersJZ/LearningDataStructuresAndAlgorithms | d608494cc6a3bba4b3a91a9469c59de077fcf36f | [
"MIT"
] | null | null | null | binary_search_tree.py | ValtersJZ/LearningDataStructuresAndAlgorithms | d608494cc6a3bba4b3a91a9469c59de077fcf36f | [
"MIT"
] | null | null | null | class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BST(object):
"""Binary search tree"""
def __init__(self, root):
self.root = Node(root)
def insert(self, new_val):
self._insert_node(self.root, new_val)
def search(self, find_val):
return self._search_node(self.root, find_val)
def print_tree(self):
return self._preorder_print(self.root, "")[:-1] # Remove a "- from the end
def _insert_node(self, start, new_val):
if start:
if new_val < start.value:
if start.left:
self._insert_node(start.left, new_val)
else:
start.left = Node(new_val)
elif new_val > start.value:
if start.right:
self._insert_node(start.right, new_val)
else:
start.right = Node(new_val)
else:
# This practice implementation assumes no duplicate values
raise Exception("Error: tried to insert that is already present in the binary search tree")
else:
self.root = Node(new_val)
def _search_node(self, start, find_val):
if start:
if find_val == start.value:
return True
elif find_val < start.value:
return self._search_node(start.left, find_val)
else: # find_val > start.value:
return self._search_node(start.right, find_val)
else:
return False
def _preorder_print(self, start, traversal):
if start:
traversal += "{}-".format(start.value)
traversal = self._preorder_print(start.left, traversal)
traversal = self._preorder_print(start.right, traversal)
return traversal
if __name__ == '__main__':
# Set up tree
"""
4
/ \
2 5
/ \
1 3
"""
tree = BST(4)
# Insert elements
tree.insert(2)
tree.insert(1)
tree.insert(3)
tree.insert(5)
# Check search
# Should be True
print(tree.search(4))
# Should be False
print(tree.search(6))
# Pre order depth first search print
print("pre-order DFS traverse:", tree.print_tree()) | 28.54878 | 107 | 0.554464 |
e6a178b62334da0eca930230515c01df69a7c81c | 1,530 | py | Python | ivy/functional/backends/tensorflow/data_type.py | saurbhc/ivy | 20b327b4fab543b26ad5a18acf4deddd6e3c804b | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/tensorflow/data_type.py | saurbhc/ivy | 20b327b4fab543b26ad5a18acf4deddd6e3c804b | [
"Apache-2.0"
] | 1 | 2022-03-08T13:29:20.000Z | 2022-03-08T13:29:20.000Z | ivy/functional/backends/tensorflow/data_type.py | saurbhc/ivy | 20b327b4fab543b26ad5a18acf4deddd6e3c804b | [
"Apache-2.0"
] | null | null | null | # global
import numpy as np
import tensorflow as tf
from typing import Union, Tuple
from tensorflow.python.types.core import Tensor
from tensorflow.python.framework.dtypes import DType
# local
import ivy
# noinspection PyShadowingBuiltins
def iinfo(type: Union[DType, str, Tensor])\
-> np.iinfo:
return tf.experimental.numpy.iinfo(ivy.dtype_to_str(type))
class Finfo:
def __init__(self, tf_finfo):
self._tf_finfo = tf_finfo
@property
def bits(self):
return self._tf_finfo.bits
@property
def eps(self):
return float(self._tf_finfo.eps)
@property
def max(self):
return float(self._tf_finfo.max)
@property
def min(self):
return float(self._tf_finfo.min)
@property
def smallest_normal(self):
return float(self._tf_finfo.tiny)
# noinspection PyShadowingBuiltins
def finfo(type: Union[DType, str, Tensor])\
-> Finfo:
return Finfo(tf.experimental.numpy.finfo(ivy.dtype_from_str(type)))
def result_type(*arrays_and_dtypes: Union[Tensor, tf.DType]) -> tf.DType:
if len(arrays_and_dtypes) <= 1:
return tf.experimental.numpy.result_type(arrays_and_dtypes)
result = tf.experimental.numpy.result_type(arrays_and_dtypes[0], arrays_and_dtypes[1])
for i in range(2, len(arrays_and_dtypes)):
result = tf.experimental.numpy.result_type(result, arrays_and_dtypes[i])
return result
def broadcast_to (x: Tensor, shape: Tuple[int, ...])-> Tensor:
return tf.broadcast_to(x, shape)
| 24.677419 | 90 | 0.701307 |
c3a2875d3476b47c8f992f6d3b1cc47576384c26 | 1,722 | py | Python | src/tuplenet/tools/rebuild-etcd.py | vipshop/Tuplenet | 25701765b4dfd11ff1da1a126109cce04d987a4b | [
"Apache-2.0"
] | null | null | null | src/tuplenet/tools/rebuild-etcd.py | vipshop/Tuplenet | 25701765b4dfd11ff1da1a126109cce04d987a4b | [
"Apache-2.0"
] | null | null | null | src/tuplenet/tools/rebuild-etcd.py | vipshop/Tuplenet | 25701765b4dfd11ff1da1a126109cce04d987a4b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import os
def redefine_sys_path():
system_site_packet64_path = '/usr/lib/python2.7/site-packages'
system_site_packet_path = '/usr/lib64/python2.7/site-packages'
file_path = os.path.realpath(__file__)
file_path = file_path.split('/')
file_path = '/'.join(file_path[0:-3])
file_path += '/py_third'
if system_site_packet64_path in sys.path:
sys.path.remove(system_site_packet64_path)
if system_site_packet_path in sys.path:
sys.path.remove(system_site_packet_path)
sys.path = [file_path] + sys.path
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
redefine_sys_path()
import etcd3
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser("")
parser.add_option("-a", "--host", dest = "host",
action = "store", type = "string",
default = "localhost:2379", help = "etcd host address")
parser.add_option("-f", "--file", dest = "file",
action = "store", type = "string",
default = "data.txt",
help = "data file to construct tuplenet env")
(options, args) = parser.parse_args()
host_ip = options.host.split(':')[0]
host_port = options.host.split(':')[1]
etcd = etcd3.client(host_ip, host_port)
fd = open(options.file, 'r')
etcd_data = fd.readlines()
fd.close()
kv_array = []
for i in xrange(0, len(etcd_data), 2):
key = etcd_data[i].rstrip('\n')
value = etcd_data[i+1].rstrip('\n')
kv_array.append((key, value))
for key,value in kv_array:
etcd.put(key,value)
| 33.764706 | 77 | 0.618467 |
1f265ae9c13617d5af6ebaebb69f456c03cd9ba9 | 28,788 | py | Python | tests/test_caching.py | AtilaSaraiva/devito | 629181c89a43054b9710920e76af0eb14138c97c | [
"MIT"
] | 199 | 2016-08-18T23:33:05.000Z | 2019-12-24T07:08:48.000Z | tests/test_caching.py | AtilaSaraiva/devito | 629181c89a43054b9710920e76af0eb14138c97c | [
"MIT"
] | 949 | 2016-04-25T11:41:34.000Z | 2019-12-27T10:43:40.000Z | tests/test_caching.py | AtilaSaraiva/devito | 629181c89a43054b9710920e76af0eb14138c97c | [
"MIT"
] | 78 | 2016-08-30T07:42:34.000Z | 2019-12-13T20:34:45.000Z | from ctypes import byref, c_void_p
import weakref
import numpy as np
import pytest
from devito import (Grid, Function, TimeFunction, SparseFunction, SparseTimeFunction,
ConditionalDimension, SubDimension, Constant, Operator, Eq, Dimension,
DefaultDimension, _SymbolCache, clear_cache, solve, VectorFunction,
TensorFunction, TensorTimeFunction, VectorTimeFunction)
from devito.types import (DeviceID, NThreadsBase, NPThreads, Object, Scalar, Symbol,
ThreadID)
@pytest.fixture
def operate_on_empty_cache():
"""
To be used by tests that assert against the cache size. There are two
reasons this is required:
* Most symbolic objects embed further symbolic objects. For example,
Function embeds Dimension, DerivedDimension embed a parent Dimension,
and so on. The embedded objects require more than one call to
`clear_cache` to be evicted (typically two -- the first call
evicts the main object, then the children become unreferenced and so
they are evicted upon the second call). So, depending on what tests
were executed before, it is possible that one `clear_cache()` evicts
more than expected, making it impossible to assert against cache sizes.
* Due to some global symbols in `conftest.py`, it is possible that when
for example a SparseFunction is instantiated, fewer symbolic object than
expected are created, since some of them are available from the cache
already.
"""
old_cache = _SymbolCache.copy()
_SymbolCache.clear()
yield
_SymbolCache.update(old_cache)
class TestHashing(object):
"""
Test hashing of symbolic objects.
"""
def test_constant(self):
"""Test that different Constants have different hash value."""
c0 = Constant(name='c')
c1 = Constant(name='c')
assert c0 is not c1
assert hash(c0) != hash(c1)
def test_dimension(self):
"""Test that different Dimensions have different hash value."""
d0 = Dimension(name='d')
s0 = Scalar(name='s')
d1 = Dimension(name='d', spacing=s0)
assert hash(d0) != hash(d1)
s1 = Scalar(name='s', dtype=np.int32)
d2 = Dimension(name='d', spacing=s1)
assert hash(d1) != hash(d2)
d3 = Dimension(name='d', spacing=Constant(name='s1'))
assert hash(d3) != hash(d0)
assert hash(d3) != hash(d1)
def test_sub_dimension(self):
"""Test that different SubDimensions have different hash value."""
d0 = Dimension(name='d')
d1 = Dimension(name='d', spacing=Scalar(name='s'))
di0 = SubDimension.middle('di', d0, 1, 1)
di1 = SubDimension.middle('di', d1, 1, 1)
assert hash(di0) != hash(d0)
assert hash(di0) != hash(di1)
dl0 = SubDimension.left('dl', d0, 2)
assert hash(dl0) != hash(di0)
def test_conditional_dimension(self):
"""Test that different ConditionalDimensions have different hash value."""
d0 = Dimension(name='d')
s0 = Scalar(name='s')
d1 = Dimension(name='d', spacing=s0)
cd0 = ConditionalDimension(name='cd', parent=d0, factor=4)
cd1 = ConditionalDimension(name='cd', parent=d0, factor=5)
assert cd0 is not cd1
assert hash(cd0) != hash(cd1)
cd2 = ConditionalDimension(name='cd', parent=d0, factor=4, indirect=True)
assert hash(cd0) != hash(cd2)
cd3 = ConditionalDimension(name='cd', parent=d1, factor=4)
assert hash(cd0) != hash(cd3)
s1 = Scalar(name='s', dtype=np.int32)
cd4 = ConditionalDimension(name='cd', parent=d0, factor=4, condition=s0 > 3)
assert hash(cd0) != hash(cd4)
cd5 = ConditionalDimension(name='cd', parent=d0, factor=4, condition=s1 > 3)
assert hash(cd0) != hash(cd5)
assert hash(cd4) != hash(cd5)
def test_default_dimension(self):
"""Test that different DefaultDimensions have different hash value."""
dd0 = DefaultDimension(name='dd')
dd1 = DefaultDimension(name='dd')
assert hash(dd0) != hash(dd1)
@pytest.mark.parametrize('FunctionType', [Function, TimeFunction])
def test_function(self, FunctionType):
"""Test that different Functions have different hash value."""
grid0 = Grid(shape=(3, 3))
u0 = FunctionType(name='u', grid=grid0)
grid1 = Grid(shape=(4, 4))
u1 = FunctionType(name='u', grid=grid1)
assert u0 is not u1
assert hash(u0) != hash(u1)
# Now with the same grid
u2 = FunctionType(name='u', grid=grid0)
assert u0 is not u2
assert hash(u0) != hash(u2)
@pytest.mark.parametrize('FunctionType', [SparseFunction, SparseTimeFunction])
def test_sparse_function(self, FunctionType):
"""Test that different SparseFunctions have different hash value."""
grid0 = Grid(shape=(3, 3))
u0 = FunctionType(name='u', grid=grid0, npoint=1, nt=10)
grid1 = Grid(shape=(4, 4))
u1 = FunctionType(name='u', grid=grid1, npoint=1, nt=10)
assert u0 is not u1
assert hash(u0) != hash(u1)
# Now with the same grid
u2 = FunctionType(name='u', grid=grid0, npoint=1, nt=10)
assert u0 is not u2
assert hash(u0) != hash(u2)
# Now with different number of sparse points
u3 = FunctionType(name='u', grid=grid0, npoint=2, nt=10)
assert u0 is not u3
assert hash(u0) != hash(u3)
# Now with different number of timesteps stored
u4 = FunctionType(name='u', grid=grid0, npoint=1, nt=14)
assert u0 is not u4
assert hash(u0) != hash(u4)
@pytest.mark.parametrize('FunctionType', [TensorFunction, TensorTimeFunction,
VectorTimeFunction, VectorFunction])
def test_tensor_hash(self, FunctionType):
"""Test that different Functions have different hash value."""
grid0 = Grid(shape=(3, 3))
u0 = FunctionType(name='u', grid=grid0)
grid1 = Grid(shape=(4, 4))
u1 = FunctionType(name='u', grid=grid1)
assert u0 is not u1
assert hash(u0) != hash(u1)
# Now with the same grid
u2 = FunctionType(name='u', grid=grid0)
assert u0 is not u2
assert hash(u0) != hash(u2)
def test_bound_symbol(self):
grid = Grid(shape=(4, 4))
u0 = TimeFunction(name='u', grid=grid)
u1 = TimeFunction(name='u', grid=grid)
assert u0._C_symbol is not u1._C_symbol # Obviously
assert hash(u0._C_symbol) != hash(u1._C_symbol)
assert u0._C_symbol != u1._C_symbol
def test_objects(self):
v0 = byref(c_void_p(3))
v1 = byref(c_void_p(4))
dtype = type('Bar', (c_void_p,), {})
foo0 = Object('foo', dtype, v0)
foo1 = Object('foo', dtype, v0)
foo2 = Object('foo', dtype, v1)
# Obviously:
assert foo0 is not foo1
assert foo0 is not foo2
assert foo1 is not foo2
# Carried value doesn't matter -- an Object is always unique
assert hash(foo0) != hash(foo1)
# And obviously:
assert hash(foo0) != hash(foo2)
assert hash(foo1) != hash(foo2)
class TestCaching(object):
"""
Test the symbol cache infrastructure.
"""
@pytest.mark.parametrize('FunctionType', [Function, TimeFunction])
def test_function(self, FunctionType):
"""Test that new u[x, y] instances don't cache"""
grid = Grid(shape=(3, 4))
u0 = FunctionType(name='u', grid=grid)
u0.data[:] = 6.
u1 = FunctionType(name='u', grid=grid)
u1.data[:] = 2.
assert np.allclose(u0.data, 6.)
assert np.allclose(u1.data, 2.)
@pytest.mark.parametrize('FunctionType', [Function, TimeFunction])
def test_function_same_indices(self, FunctionType):
"""Test caching of derived u[x, y] instance from derivative"""
grid = Grid(shape=(3, 4))
u0 = FunctionType(name='u', grid=grid)
u0.data[:] = 6.
# Pick u(x, y) and u(x + h_x, y) from derivative
u1 = u0.dx.evaluate.args[1].args[2]
u2 = u0.dx.evaluate.args[0].args[1]
assert np.allclose(u1.data, 6.)
assert np.allclose(u2.data, 6.)
@pytest.mark.parametrize('FunctionType', [Function, TimeFunction])
def test_function_different_indices(self, FunctionType):
"""Test caching of u[x + h, y] instance from derivative"""
grid = Grid(shape=(3, 4))
u0 = FunctionType(name='u', grid=grid)
u0.data[:] = 6.
# Pick u[x + h, y] (different indices) from derivative
u = u0.dx.evaluate.args[0].args[1]
assert np.allclose(u.data, u0.data)
@pytest.mark.parametrize('FunctionType', [Function, TimeFunction])
def test_function_duplicates(self, FunctionType):
"""Test caching of u[x + h, y] instance from derivative"""
grid = Grid(shape=(3, 4))
_cache_size = len(_SymbolCache)
x = grid.dimensions[0]
u0 = FunctionType(name='u', grid=grid)
# u[x + h_x]
uf = u0.subs({x: x + x.spacing})
# u[x] shifting back from u[x + h_x]
ub = uf.subs({x: x - x.spacing})
# Make sure ub is u0
assert ub is u0
assert hash(ub) == hash(u0)
# Three new cache entries: u, u(t,x,y), u(t, x+h_x, y)
ncreated = 3
assert len(_SymbolCache) == _cache_size + ncreated
# shift again, no new entry should be created
uf2 = ub.subs({x: x + x.spacing})
assert uf is uf2
assert len(_SymbolCache) == _cache_size + ncreated
def test_symbols(self):
"""
Test that ``Symbol(name='s') != Scalar(name='s') != Dimension(name='s')``.
They all:
* rely on the same caching mechanism
* boil down to creating a sympy.Symbol
* created with the same args/kwargs (``name='s'``)
"""
sy = Symbol(name='s')
sc = Scalar(name='s')
d = Dimension(name='s')
assert sy is not sc
assert sc is not d
assert sy is not d
assert isinstance(sy, Symbol)
assert isinstance(sc, Scalar)
assert isinstance(d, Dimension)
def test_symbols_args_vs_kwargs(self):
"""
Unlike Functions, Symbols don't require the use of a kwarg to specify the name.
This test basically checks that `Symbol('s') is Symbol(name='s')`, i.e. that we
don't make any silly mistakes when it gets to compute the cache key.
"""
v_arg = Symbol('v')
v_kwarg = Symbol(name='v')
assert v_arg is v_kwarg
d_arg = Dimension('d100')
d_kwarg = Dimension(name='d100')
assert d_arg is d_kwarg
def test_scalar(self):
"""
Test that Scalars with same name but different attributes do not alias to
the same Scalar. Conversely, if the name and the attributes are the same,
they must alias to the same Scalar.
"""
s0 = Scalar(name='s0')
s1 = Scalar(name='s0')
assert s0 is s1
s2 = Scalar(name='s0', dtype=np.int32)
assert s2 is not s1
s3 = Scalar(name='s0', is_const=True)
assert s3 is not s1
def test_dimension(self):
"""
Test that Dimensions with same name but different attributes do not alias to
the same Dimension. Conversely, if the name and the attributes are the same,
they must alias to the same Dimension.
"""
d0 = Dimension(name='d')
d1 = Dimension(name='d')
assert d0 is d1
s0 = Scalar(name='s0')
s1 = Scalar(name='s1')
d2 = Dimension(name='d', spacing=s0)
d3 = Dimension(name='d', spacing=s1)
assert d2 is not d3
d4 = Dimension(name='d', spacing=s1)
assert d3 is d4
d5 = Dimension(name='d', spacing=Constant(name='s1'))
assert d2 is not d5
def test_conditional_dimension(self):
"""
Test that ConditionalDimensions with same name but different attributes do not
alias to the same ConditionalDimension. Conversely, if the name and the attributes
are the same, they must alias to the same ConditionalDimension.
"""
i = Dimension(name='i')
ci0 = ConditionalDimension(name='ci', parent=i, factor=4)
ci1 = ConditionalDimension(name='ci', parent=i, factor=4)
assert ci0 is ci1
ci2 = ConditionalDimension(name='ci', parent=i, factor=8)
assert ci2 is not ci1
ci3 = ConditionalDimension(name='ci', parent=i, factor=4, indirect=True)
assert ci3 is not ci1
s = Scalar(name='s')
ci4 = ConditionalDimension(name='ci', parent=i, factor=4, condition=s > 3)
assert ci4 is not ci1
ci5 = ConditionalDimension(name='ci', parent=i, factor=4, condition=s > 3)
assert ci5 is ci4
def test_sub_dimension(self):
"""
Test that SubDimensions with same name but different attributes do not
alias to the same SubDimension. Conversely, if the name and the attributes
are the same, they must alias to the same SubDimension.
"""
x = Dimension('x')
xi0 = SubDimension.middle('xi', x, 1, 1)
xi1 = SubDimension.middle('xi', x, 1, 1)
assert xi0 is xi1
xl0 = SubDimension.left('xl', x, 2)
xl1 = SubDimension.left('xl', x, 2)
assert xl0 is xl1
xl2asxi = SubDimension.left('xi', x, 2)
assert xl2asxi is not xl1
assert xl2asxi is not xi1
xr0 = SubDimension.right('xr', x, 1)
xr1 = SubDimension.right('xr', x, 1)
assert xr0 is xr1
def test_default_dimension(self):
d = Dimension(name='d')
dd0 = DefaultDimension(name='d')
assert d is not dd0
dd1 = DefaultDimension(name='d')
assert dd0 is not dd1
def test_constant_new(self):
"""Test that new Constant instances don't cache."""
u0 = Constant(name='u')
u0.data = 6.
u1 = Constant(name='u')
u1.data = 2.
assert u0.data == 6.
assert u1.data == 2.
def test_grid_objs(self):
"""
Test that two different Grids use the same Symbols/Dimensions if possible
(i.e., if already in cache). This is because objects such as spacing and origin
are Scalars, which carry no value.
"""
grid0 = Grid(shape=(4, 4))
x0, y0 = grid0.dimensions
ox0, oy0 = grid0.origin
grid1 = Grid(shape=(8, 8))
x1, y1 = grid1.dimensions
ox1, oy1 = grid1.origin
assert x0 is x1
assert y0 is y1
assert x0.spacing is x1.spacing
assert y0.spacing is y1.spacing
assert ox0 is ox1
assert oy0 is oy1
def test_special_symbols(self):
"""
This test checks the singletonization, through the caching infrastructure,
of the special symbols that an Operator may generate (e.g., `nthreads`).
"""
grid = Grid(shape=(4, 4, 4))
f = TimeFunction(name='f', grid=grid)
sf = SparseTimeFunction(name='sf', grid=grid, npoint=1, nt=10)
eqns = [Eq(f.forward, f + 1.)] + sf.inject(field=f.forward, expr=sf)
opt = ('advanced', {'par-nested': 0, 'openmp': True})
op0 = Operator(eqns, opt=opt)
op1 = Operator(eqns, opt=opt)
nthreads0, nthreads_nested0, nthreads_nonaffine0 =\
[i for i in op0.input if isinstance(i, NThreadsBase)]
nthreads1, nthreads_nested1, nthreads_nonaffine1 =\
[i for i in op1.input if isinstance(i, NThreadsBase)]
assert nthreads0 is nthreads1
assert nthreads_nested0 is nthreads_nested1
assert nthreads_nonaffine0 is nthreads_nonaffine1
tid0 = ThreadID(op0.nthreads)
tid1 = ThreadID(op0.nthreads)
assert tid0 is tid1
did0 = DeviceID()
did1 = DeviceID()
assert did0 is did1
npt0 = NPThreads(name='npt', size=3)
npt1 = NPThreads(name='npt', size=3)
npt2 = NPThreads(name='npt', size=4)
assert npt0 is npt1
assert npt0 is not npt2
def test_symbol_aliasing(self):
"""Test to assert that our aliasing cache isn't defeated by sympys
non-aliasing symbol cache.
For further explanation consider the symbol u[x, y] and it's first
derivative in x, which includes the symbols u[x, y] and u[x + h, y].
The two functions are aliased in devito's caching mechanism to allow
multiple stencil indices pointing at the same data object u, but
SymPy treats these two instances as separate functions and thus is
allowed to delete one or the other when the cache is cleared.
The test below asserts that u[x + h, y] is deleted, the data on u
is still intact through our own caching mechanism."""
# Ensure a clean cache to start with
clear_cache()
# FIXME: Currently not working, presumably due to our
# failure to cache new instances?
# assert(len(_SymbolCache) == 0)
# Create first instance of u and fill its data
grid = Grid(shape=(3, 4))
u = Function(name='u', grid=grid)
u.data[:] = 6.
u_ref = weakref.ref(u.data)
# Create u[x + h, y] and delete it again
dx = u.dx # Contains two u symbols: u[x, y] and u[x + h, y]
del dx
clear_cache()
# FIXME: Unreliable cache sizes
# assert len(_SymbolCache) == 1 # We still have a reference to u
assert np.allclose(u.data, 6.) # u.data is alive and well
# Remove the final instance and ensure u.data got deallocated
del u
clear_cache()
assert u_ref() is None
def test_symbol_aliasing_reverse(self):
"""Test to assert that removing he original u[x, y] instance does
not impede our alisaing cache or leaks memory.
"""
# Ensure a clean cache to start with
clear_cache()
# FIXME: Currently not working, presumably due to our
# failure to cache new instances?
# assert(len(_SymbolCache) == 0)
# Create first instance of u and fill its data
grid = Grid(shape=(3, 4))
u = Function(name='u', grid=grid)
u.data[:] = 6.
u_ref = weakref.ref(u.data)
# Create derivative and delete orignal u[x, y]
dx = u.dx
del u
clear_cache()
# We still have a references to u
# FIXME: Unreliable cache sizes
# assert len(_SymbolCache) == 1
# Ensure u[x + h, y] still holds valid data
assert np.allclose(dx.evaluate.args[0].args[1].data, 6.)
del dx
clear_cache()
# FIXME: Unreliable cache sizes
# assert len(_SymbolCache) == 0 # We still have a reference to u_h
assert u_ref() is None
def test_clear_cache(self, operate_on_empty_cache, nx=1000, ny=1000):
grid = Grid(shape=(nx, ny), dtype=np.float64)
cache_size = len(_SymbolCache)
for i in range(10):
assert(len(_SymbolCache) == cache_size)
Function(name='u', grid=grid, space_order=2)
# Both u and u(inds) added to cache
assert(len(_SymbolCache) == cache_size + 2)
clear_cache()
def test_clear_cache_with_Csymbol(self, operate_on_empty_cache, nx=1000, ny=1000):
grid = Grid(shape=(nx, ny), dtype=np.float64)
cache_size = len(_SymbolCache)
u = Function(name='u', grid=grid, space_order=2)
# Both u and u(inds) added to cache
assert(len(_SymbolCache) == cache_size + 2)
u._C_symbol
# Cache size won't change since _C_symbol isn't cached by devito to
# avoid circular references in the cache
assert(len(_SymbolCache) == cache_size + 2)
def test_clear_cache_with_alive_symbols(self, operate_on_empty_cache,
nx=1000, ny=1000):
"""
Test that `clear_cache` doesn't affect caching if an object is still alive.
"""
grid = Grid(shape=(nx, ny), dtype=np.float64)
f0 = Function(name='f', grid=grid, space_order=2)
f1 = Function(name='f', grid=grid, space_order=2)
# Obviously:
assert f0 is not f1
# And clearly, both still alive after a `clear_cache`
clear_cache()
assert f0 is not f1
assert f0.grid.dimensions[0] is grid.dimensions[0]
# Now we try with symbols
s0 = Scalar(name='s')
s1 = Scalar(name='s')
# Clearly:
assert s1 is s0
clear_cache()
s2 = Scalar(name='s')
# s2 must still be s1/so, even after a clear_cache, as so/s1 are both alive!
assert s2 is s1
del s0
del s1
s3 = Scalar(name='s')
# And obviously, still:
assert s3 is s2
cache_size = len(_SymbolCache)
del s2
del s3
clear_cache()
assert len(_SymbolCache) == cache_size - 1
def test_sparse_function(self, operate_on_empty_cache):
"""Test caching of SparseFunctions and children objects."""
grid = Grid(shape=(3, 3))
init_cache_size = len(_SymbolCache)
cur_cache_size = len(_SymbolCache)
u = SparseFunction(name='u', grid=grid, npoint=1, nt=10)
# created: u, u(inds), p_u, h_p_u, u_coords, u_coords(inds), d, h_d
ncreated = 8
assert len(_SymbolCache) == cur_cache_size + ncreated
cur_cache_size = len(_SymbolCache)
i = u.inject(expr=u, field=u)
# created: ii_u_0*2 (Symbol and ConditionalDimension), ii_u_1*2, ii_u_2*2,
# ii_u_3*2, px, py, posx, posy, u_coords (as indexified),
ncreated = 2+2+2+2+2+1+1+1
# Note that injection is now lazy so no new symbols should be created
assert len(_SymbolCache) == cur_cache_size
i.evaluate
assert len(_SymbolCache) == cur_cache_size + ncreated
# No new symbolic obejcts are created
u.inject(expr=u, field=u)
assert len(_SymbolCache) == cur_cache_size + ncreated
# Let's look at clear_cache now
del u
del i
clear_cache()
# At this point, not all children objects have been cleared. In particular, the
# ii_u_* Symbols are still alive, as well as p_u and h_p_u. This is because
# in the first clear_cache they were still referenced by their "parent" objects
# (e.g., ii_u_* by ConditionalDimensions, through `condition`)
assert len(_SymbolCache) == init_cache_size + 8
clear_cache()
# Now we should be back to the original state
assert len(_SymbolCache) == init_cache_size
def test_after_indexification(self):
"""
Test to assert that the SymPy cache retrieves the right Devito data object
after indexification.
"""
grid = Grid(shape=(4, 4, 4))
u0 = Function(name='u', grid=grid, space_order=0)
u1 = Function(name='u', grid=grid, space_order=1)
u2 = Function(name='u', grid=grid, space_order=2)
for i in [u0, u1, u2]:
assert i.indexify().base.function.space_order ==\
(i.indexify() + 1.).args[1].base.function.space_order
def test_reinsertion_after_deletion(self, operate_on_empty_cache):
"""
Test that dead weakrefs in the symbol cache do not cause any issues when
objects with the same key/hash are reinserted.
"""
d = Dimension(name='d')
del d
# `d` has just been deleted, but a weakref pointing to a dead object is still
# in the symbol cache at this point; `h_d` is still in the cache too, dead too
assert len(_SymbolCache) == 2
assert all(i() is None for i in _SymbolCache.values())
d = Dimension(name='d') # noqa
assert len(_SymbolCache) == 2
assert all(i() is not None for i in _SymbolCache.values())
@pytest.mark.parametrize('FunctionType', [VectorFunction, TensorFunction,
VectorTimeFunction, TensorTimeFunction])
def test_tensor_different_indices(self, FunctionType):
"""Test caching of u[x + h, y] instance from derivative"""
grid = Grid(shape=(3, 4))
u0 = FunctionType(name='u', grid=grid)
for s in u0:
s.data[:] = 6.
# Pick u[x + h, y] (different indices) from derivative
u = u0.dx.evaluate[0].args[0].args[1]
assert np.allclose(u.data, u0[0].data)
@pytest.mark.parametrize('FunctionType', [VectorFunction, TensorFunction,
VectorTimeFunction, TensorTimeFunction])
def test_tensor_same_indices(self, FunctionType):
"""Test caching of derived u[x, y] instance from derivative"""
grid = Grid(shape=(3, 4))
u0 = FunctionType(name='u', grid=grid)
for s in u0:
s.data[:] = 6.
# Pick u(x, y) and u(x + h_x, y) from derivative
u1 = u0.dx.evaluate[0].args[1].args[2]
u2 = u0.dx.evaluate[1].args[0].args[1]
assert np.allclose(u1.data, 6.)
assert np.allclose(u2.data, 6.)
@pytest.mark.parametrize('FunctionType', [VectorFunction, TensorFunction,
VectorTimeFunction, TensorTimeFunction])
def test_tensor_new(self, FunctionType):
"""Test that new u[x, y] instances don't cache"""
grid = Grid(shape=(3, 4))
u0 = FunctionType(name='u', grid=grid)
for s in u0:
s.data[:] = 6.
u1 = FunctionType(name='u', grid=grid)
for s in u1:
s.data[:] = 2.
assert np.all(np.allclose(s.data, 6.) for s in u0)
class TestMemoryLeaks(object):
"""
Tests ensuring there are no memory leaks.
"""
def test_operator_leakage_function(self):
"""
Test to ensure that Operator creation does not cause memory leaks for
(Time)Functions.
"""
grid = Grid(shape=(5, 6))
f = Function(name='f', grid=grid)
g = TimeFunction(name='g', grid=grid)
# Take weakrefs to test whether symbols are dead or alive
w_f = weakref.ref(f)
w_g = weakref.ref(g)
# Create operator and delete everything again
op = Operator(Eq(f, 2 * g))
w_op = weakref.ref(op)
del op
del f
del g
clear_cache()
# Test whether things are still hanging around
assert w_f() is None
assert w_g() is None
assert w_op() is None
def test_operator_leakage_sparse(self):
"""
Test to ensure that Operator creation does not cause memory leaks for
SparseTimeFunctions.
"""
grid = Grid(shape=(5, 6))
a = Function(name='a', grid=grid)
s = SparseTimeFunction(name='s', grid=grid, npoint=1, nt=1)
w_a = weakref.ref(a)
w_s = weakref.ref(s)
# Create operator and delete everything again
op = Operator(s.interpolate(a))
w_op = weakref.ref(op)
del op
del s
del a
clear_cache()
# Test whether things are still hanging around
assert w_a() is None
assert w_s() is None
assert w_op() is None
def test_solve(self, operate_on_empty_cache):
"""
Test to ensure clear_cache wipes out *all of* sympy caches. ``sympy.solve``,
in particular, relies on a series of private caches that must be purged too
(calling sympy's clear_cache() API function isn't enough).
"""
grid = Grid(shape=(4,))
u = TimeFunction(name='u', grid=grid, time_order=1, space_order=2)
eqn = Eq(u.dt, u.dx2)
solve(eqn, u.forward)
del u
del eqn
del grid
# We only deleted `u`, however we also cache shifted version created by the
# finite difference (u.dt, u.dx2). In this case we have three extra references
# to u(t + dt), u(x - h_x) and u(x + h_x) that have to be cleared.
# Then `u` points to the various Dimensions, the Dimensions point to the various
# spacing symbols, hence, we need four sweeps to clear up the cache.
assert len(_SymbolCache) == 14
clear_cache()
assert len(_SymbolCache) == 9
clear_cache()
assert len(_SymbolCache) == 3
clear_cache()
assert len(_SymbolCache) == 1
clear_cache()
assert len(_SymbolCache) == 0
| 36.211321 | 90 | 0.596811 |
4f48626ac19168d4fab97edd1ec3c65e3f412a79 | 1,124 | py | Python | src/odcprovider/constants.py | 52North/pygeoapi-odc-provider | 81d6b6aa35836a4b6efa634153557b6db667a099 | [
"Apache-2.0"
] | 5 | 2021-11-09T13:36:41.000Z | 2022-02-11T15:54:27.000Z | src/odcprovider/constants.py | MartinPontius/pygeoapi-odc-provider | 81d6b6aa35836a4b6efa634153557b6db667a099 | [
"Apache-2.0"
] | 5 | 2021-07-07T15:45:04.000Z | 2021-09-08T13:04:05.000Z | src/odcprovider/constants.py | 52North/pygeoapi-odc-provider | 81d6b6aa35836a4b6efa634153557b6db667a099 | [
"Apache-2.0"
] | 1 | 2021-07-21T22:23:09.000Z | 2021-07-21T22:23:09.000Z | # =================================================================
# Copyright (C) 2021-2021 52°North Spatial Information Research GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =================================================================
DEFAULT_APP = "pygeoapi_provider"
"""
Used for datacube object initialization to trigger datacube.conf find feature
"""
BBOX_COORD_PRECISION = "{:.4f}"
"""
Format string for BoundBox coordinates. Limits float output to four decimal places
"""
CACHE_PICKLE = 'ogc-tb-17/DATA/odc_cache.pickle'
"""
Path to ODC cache pickle file
Defaults to /ogc-tb-17/DATA/odc_cache.pickle
""" | 37.466667 | 82 | 0.672598 |
d5e20bf27f2c4773e7cc28400446dc7b91e03aac | 1,676 | py | Python | {{cookiecutter.out_dir}}/src/{{cookiecutter.django_project_name}}/settings/dev.py | corpusops/cookiecutter-django | 71df2fc679535554a503aed8c170db42c5a02b29 | [
"BSD-3-Clause"
] | 1 | 2020-11-30T08:21:17.000Z | 2020-11-30T08:21:17.000Z | {{cookiecutter.out_dir}}/src/{{cookiecutter.django_project_name}}/settings/dev.py | corpusops/cookiecutter-django | 71df2fc679535554a503aed8c170db42c5a02b29 | [
"BSD-3-Clause"
] | 4 | 2019-08-19T15:39:45.000Z | 2021-02-16T10:20:28.000Z | {{cookiecutter.out_dir}}/src/{{cookiecutter.django_project_name}}/settings/dev.py | corpusops/cookiecutter-django | 71df2fc679535554a503aed8c170db42c5a02b29 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T14:52:06.000Z | 2020-07-31T12:34:21.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import logging
import os
try:
from django.utils import six
except ImportError:
import six
from .base import * # noqa
os.environ['RELATIVE_SETTINGS_MODULE'] = '.dev'
INSTALLED_APPS += tuple([ # noqa
{%- if cookiecutter.with_toolbar %}
'debug_toolbar',
{%- endif%}
{%- if cookiecutter.with_djextensions %}
'django_extensions',
{%- endif%}
])
SECRET_KEY = os.environ.get('SECRET_KEY', 'secretkey-superhot-12345678')
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
# INTERNAL_IPS = ('127.0.0.1',) # Used by app debug_toolbar
DEBUG = True
# Force every loggers to use console handler only. Note that using 'root'
# logger is not enough if children don't propage.
for logger in six.itervalues(LOGGING['loggers']): # noqa
logger['handlers'] = ['console']
# Log every level.
LOGGING['handlers']['console']['level'] = logging.NOTSET # noqa
MIDDLEWARE += tuple([
{% if cookiecutter.with_toolbar %}'debug_toolbar.middleware.DebugToolbarMiddleware',{%endif %}
])
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda x: True,
}
{% if cookiecutter.cache_only_in_prod %}
# deactivate cache except if we have set either:
# - ENABLE_CACHE_IN_DEV
# - DJANGO__ENABLE_CACHE_IN_DEV=true (envvar)
try:
ENABLE_CACHE_IN_DEV # noqa
except NameError:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
{% endif %}
USE_DJANGO_EXTENSIONS = True
locs_, globs_, env = post_process_settings(locals())
globals().update(globs_)
try:
from .local import * # noqa
except ImportError:
pass
| 25.784615 | 98 | 0.701074 |
4b174e8bd238f7beeda0c733c705f96991215ad0 | 1,199 | py | Python | dynamic_linear_nets.py | Srujan35007/TensorFlow | 1fdacd1b0190fd08e7946a15210bbc553e001fea | [
"MIT"
] | null | null | null | dynamic_linear_nets.py | Srujan35007/TensorFlow | 1fdacd1b0190fd08e7946a15210bbc553e001fea | [
"MIT"
] | null | null | null | dynamic_linear_nets.py | Srujan35007/TensorFlow | 1fdacd1b0190fd08e7946a15210bbc553e001fea | [
"MIT"
] | null | null | null | import time
b = time.time()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow.keras import layers, models, datasets
a = time.time()
print(f'Imports complete in {a-b} seconds')
def get_flat_shape(foo_shape):
prod = 1
for i in range(len(foo_shape)):
prod = prod * foo_shape[i]
return prod
# __________TWEAKABLES____________
raw_input_shape = (28,28,1) # Height Width Channels
flat_input_shape = get_flat_shape(raw_input_shape)
hidden_layer_units = [200,100,30]
n_out_classes = 10
classification = True
# ________________________________
model = models.Sequential()
model.add(layers.Flatten(input_shape = raw_input_shape))
model.add(layers.Dense(flat_input_shape, activation='relu'))
for i in range(len(hidden_layer_units)):
model.add(layers.Dense(hidden_layer_units[i], activation='relu'))
if classification:
model.add(layers.Dense(n_out_classes, activation='softmax'))
else:
model.add(layers.Dense(n_out_classes, activation='sigmoid'))
model.summary()
loss_fn = 'sparse_categorical_crossentropy' if classification else 'mse'
model.compile(loss = loss_fn, optimizer='adam', metrics=['acciracy'])
print('Model compiled') | 30.74359 | 72 | 0.761468 |
2307e329bbc9e0e7669f2a0a7640081c32809989 | 2,395 | py | Python | mangaloid_instance/sync/utils.py | zhet1c/instance | 301e28c47bb4d18b26329dea81b7f45705d18670 | [
"MIT"
] | null | null | null | mangaloid_instance/sync/utils.py | zhet1c/instance | 301e28c47bb4d18b26329dea81b7f45705d18670 | [
"MIT"
] | null | null | null | mangaloid_instance/sync/utils.py | zhet1c/instance | 301e28c47bb4d18b26329dea81b7f45705d18670 | [
"MIT"
] | null | null | null | from json import dumps
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import padding
from base64 import b64encode, b64decode
from asyncio import get_event_loop
from concurrent.futures.process import ProcessPoolExecutor
async def _run_async(call, *args):
with ProcessPoolExecutor() as pool:
return await get_event_loop().run_in_executor(pool, call, *args)
def _get_key_pair():
private = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend())
public = private.public_key()
private_string = private.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()).decode("utf-8")
public_string = public.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo).decode("utf-8")
return private_string, public_string
def _create_sync_payload(dc, key):
string = dumps(dc).encode("utf-8")
pkey = serialization.load_pem_public_key(key.encode("utf-8"), backend=default_backend())
return b64encode(pkey.encrypt(string, padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
))).decode("ascii")
def _get_sync_payload(b64, key):
byte = b64decode(b64)
pkey = serialization.load_pem_private_key(key.encode("utf-8"), None ,backend=default_backend())
return loads(pkey.decrypt(byte, padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)).decode("utf-8"))
async def get_key_pair():
"""Creates a public/private key pair and returns them in PEM format as strings"""
return await _run_async(_get_key_pair)
async def create_sync_payload(dc, key):
"""Converts dictionary to JSON, encrypts it with provided public key (PEM)
and returns it as a Base64-encoded string.
"""
return await _run_async(_create_sync_payload, dc, key)
async def get_sync_payload(b64, key):
"""Converts Base64-encoded string created with 'create_sync_payload' to dictionary"""
return await _run_async(_get_sync_payload, b64, key) | 42.017544 | 99 | 0.749896 |
eb0831f7d6d54bb15d2aa59ba4d21808afbd62d5 | 4,606 | py | Python | Lib/test/test_importlib/import_/test_packages.py | oleksandr-pavlyk/cpython | eb002dbe0da9622245a355db5f0cd5aa2fc70b40 | [
"0BSD"
] | 5 | 2021-12-03T23:11:53.000Z | 2022-01-08T21:02:50.000Z | Lib/test/test_importlib/import_/test_packages.py | dalakatt/cpython | 2f49b97cc5426087b46515254b9a97a22ee8c807 | [
"0BSD"
] | 8 | 2022-01-07T11:31:11.000Z | 2022-03-04T00:07:16.000Z | Lib/test/test_importlib/import_/test_packages.py | dalakatt/cpython | 2f49b97cc5426087b46515254b9a97a22ee8c807 | [
"0BSD"
] | 3 | 2017-10-18T09:35:14.000Z | 2018-09-09T16:40:13.000Z | from test.test_importlib import util
import sys
import unittest
from test import support
from test.support import import_helper
class ParentModuleTests:
"""Importing a submodule should import the parent modules."""
def test_import_parent(self):
with util.mock_spec('pkg.__init__', 'pkg.module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('pkg.module')
self.assertIn('pkg', sys.modules)
def test_bad_parent(self):
with util.mock_spec('pkg.module') as mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ImportError) as cm:
self.__import__('pkg.module')
self.assertEqual(cm.exception.name, 'pkg')
def test_raising_parent_after_importing_child(self):
def __init__():
import pkg.module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
with self.assertRaises(ZeroDivisionError):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_relative_importing_child(self):
def __init__():
from . import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.module',
module_code={'pkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from . import module"
# line, not sure why.
self.__import__('pkg')
self.assertNotIn('pkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.module')
self.assertNotIn('pkg', sys.modules)
# XXX False
#self.assertIn('pkg.module', sys.modules)
def test_raising_parent_after_double_relative_importing_child(self):
def __init__():
from ..subpkg import module
1/0
mock = util.mock_spec('pkg.__init__', 'pkg.subpkg.__init__',
'pkg.subpkg.module',
module_code={'pkg.subpkg': __init__})
with mock:
with util.import_state(meta_path=[mock]):
with self.assertRaises((ZeroDivisionError, ImportError)):
# This raises ImportError on the "from ..subpkg import module"
# line, not sure why.
self.__import__('pkg.subpkg')
self.assertNotIn('pkg.subpkg', sys.modules)
with self.assertRaises((ZeroDivisionError, ImportError)):
self.__import__('pkg.subpkg.module')
self.assertNotIn('pkg.subpkg', sys.modules)
# XXX False
#self.assertIn('pkg.subpkg.module', sys.modules)
def test_module_not_package(self):
# Try to import a submodule from a non-package should raise ImportError.
assert not hasattr(sys, '__path__')
with self.assertRaises(ImportError) as cm:
self.__import__('sys.no_submodules_here')
self.assertEqual(cm.exception.name, 'sys.no_submodules_here')
def test_module_not_package_but_side_effects(self):
# If a module injects something into sys.modules as a side-effect, then
# pick up on that fact.
name = 'mod'
subname = name + '.b'
def module_injection():
sys.modules[subname] = 'total bunk'
mock_spec = util.mock_spec('mod',
module_code={'mod': module_injection})
with mock_spec as mock:
with util.import_state(meta_path=[mock]):
try:
submodule = self.__import__(subname)
finally:
import_helper.unload(subname)
(Frozen_ParentTests,
Source_ParentTests
) = util.test_both(ParentModuleTests, __import__=util.__import__)
if __name__ == '__main__':
unittest.main()
| 41.125 | 82 | 0.576639 |
75a06865c335831bfde26f0e43be77d0d61f7a3b | 4,845 | py | Python | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py_prev/trash/903_for_f704.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | 1 | 2020-02-28T12:03:39.000Z | 2020-02-28T12:03:39.000Z | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py_prev/trash/903_for_f704.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | null | null | null | Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py_prev/trash/903_for_f704.py | hehuanlin123/DeepLearning | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 28 22:20:21 2018
@author: Kazuki
"""
import gc, os
from tqdm import tqdm
import pandas as pd
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
#from glob import glob
from sklearn.model_selection import GroupKFold
import count
import utils_cat
import utils
utils.start(__file__)
#==============================================================================
HEAD = 300
NFOLD = 5
SEED = 71
day_start = -365*1 # min: -2922
day_end = -365*0 # min: -2922
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.02,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.9,
'nthread': 32,
# 'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
'seed': SEED
}
imp = pd.read_csv('LOG/imp_0728.py.csv').sort_values('total', ascending=False)
imp = imp[~imp.feature.str.startswith('f101_app_')].reset_index(drop=True)
val = utils.read_pickles('../data/prev_train', ['DAYS_DECISION'])
tr_ind = val[val['DAYS_DECISION'].between(day_start, day_end)].index
val = utils.read_pickles('../data/prev_test', ['DAYS_DECISION'])
te_ind = val[val['DAYS_DECISION'].between(day_start, day_end)].index
# =============================================================================
# load train
# =============================================================================
files = ('../feature_prev/train_' + imp.head(HEAD).feature + '.f').tolist()
X_train = pd.concat([
pd.read_feather(f) for f in tqdm(files, mininterval=60)
], axis=1).iloc[tr_ind]
y_train = utils.read_pickles('../data/prev_label').iloc[tr_ind].TARGET
if X_train.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')
print('no dup :) ')
print(f'X_train.shape {X_train.shape}')
gc.collect()
sub_train = utils.read_pickles('../data/prev_train', ['SK_ID_CURR', 'SK_ID_PREV']).set_index('SK_ID_CURR').iloc[tr_ind]
sub_train['y'] = y_train.values
sub_train['cnt'] = sub_train.index.value_counts()
sub_train['w'] = 1 / sub_train.cnt.values
group_kfold = GroupKFold(n_splits=NFOLD)
sub_train['g'] = sub_train.index % NFOLD
CAT = list( set(X_train.columns)&set(utils_cat.ALL))
# =============================================================================
# load test
# =============================================================================
files = ('../feature_prev/test_' + imp.head(HEAD).feature + '.f').tolist()
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files, mininterval=60)
], axis=1).iloc[te_ind]
sub_test = utils.read_pickles('../data/prev_test', ['SK_ID_CURR', 'SK_ID_PREV']).set_index('SK_ID_CURR').iloc[te_ind]
# =============================================================================
# predict with eraly stopping
# =============================================================================
sub_train['y_pred'] = 0
sub_test['y_pred'] = 0
sub_train.reset_index(inplace=True)
sub_test.reset_index(inplace=True)
for train_index, valid_index in group_kfold.split(X_train, sub_train.y, sub_train.g):
dtrain = lgb.Dataset(X_train.iloc[train_index], sub_train.iloc[train_index].y,
categorical_feature=CAT)
dvalid = lgb.Dataset(X_train.iloc[valid_index], sub_train.iloc[valid_index].y,
categorical_feature=CAT)
model = lgb.train(params=param, train_set=dtrain, num_boost_round=9999,
valid_sets=[dtrain, dvalid],
valid_names=['train','valid'],
early_stopping_rounds=100,
#evals_result=evals_result,
verbose_eval=50
)
sub_train.iloc[valid_index, -1] = model.predict(X_train.iloc[valid_index])
sub_test['y_pred'] += model.predict(X_test)
sub_test['y_pred'] /= NFOLD
print('train:', sub_train.y_pred.describe())
print('test:', sub_test.y_pred.describe())
# =============================================================================
# save
# =============================================================================
sub_train.to_feather('../data/prev_train_imputation_f704.f')
sub_test.to_feather('../data/prev_test_imputation_f704.f')
#==============================================================================
utils.end(__file__)
| 32.086093 | 119 | 0.544272 |
b2238d16f3b3d4036d9a6a373e4d9f8995654082 | 5,172 | py | Python | models/5-Deepflash2/Labels.py | cns-iu/HuBMAP---Hacking-the-Kidney | 1a41c887f8edb0b52f5afade384a17dc3d3efec4 | [
"MIT"
] | null | null | null | models/5-Deepflash2/Labels.py | cns-iu/HuBMAP---Hacking-the-Kidney | 1a41c887f8edb0b52f5afade384a17dc3d3efec4 | [
"MIT"
] | null | null | null | models/5-Deepflash2/Labels.py | cns-iu/HuBMAP---Hacking-the-Kidney | 1a41c887f8edb0b52f5afade384a17dc3d3efec4 | [
"MIT"
] | null | null | null | # <h1> HubMap - Hacking the Kidney </h1>
# <h3> Goal - Mapping the human body at function tissue unit level - detect crypts FTUs in colon </h3>
#
# Implementation of Kaggle Notebook - Innovation Prize Winner - Deep Flash2 <br>
# Description - Create segmentations masks from RLE and probability density function (PDF) for efficient sampling from mask and anatmical structure. <br>
# Input - train.csv (csv file containing rle format mask), HuBMAP-20-dataset_information.csv (csv containing meta data about the images), downscaled images <br>
# - Sampling weight for cortex regions (`cortex_value`): 0.5
# - Sampling weight for medulla regions (`medulla_value`): 0.25
# - Sampling weight for other regions (`fbr`, foreground-background-ratio): 0.01
#
# Output - downscaled masks, roi-stats.csv (csv containing pdfs for each image) <br>
#
# <b>How to use?</b><br>
# Change the basepath to where your data lives and you're good to go. <br>
#
# Link to the original notebook - https://www.kaggle.com/matjes/hubmap-efficient-sampling-ii-deepflash2#HuBMAP-masks-and-probability-density-function <hr>
#
# <h6> Step 1 - Import useful libraries<h6>
import cv2, zarr, gc
import matplotlib.pyplot as plt, numpy as np, pandas as pd
from pathlib import Path
gc.enable()
import time
start = time.time()
def elapsed_time(start_time):
return time.time() - start_time
# from https://www.kaggle.com/paulorzp/rle-functions-run-lenght-encode-decode
def rle2mask(mask_rle, shape):
'''
mask_rle: run-length as string formated (start length)
shape: (width,height) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
s = mask_rle.split()
starts, lengths = [
np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])
]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0] * shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo : hi] = 1
return img.reshape(shape).T
# <h6> Step 2 - Set paths and configuration </h6>
BASE_PATH = r'/N/slate/soodn/'
dataset = "colon"
# dataset = "kidney"
INPUT_PATH = BASE_PATH+'hubmap-'+dataset+'-segmentation'
df_train = pd.read_csv(INPUT_PATH+"/train.csv")
df_info = pd.read_csv(INPUT_PATH+"/HuBMAP-20-dataset_information.csv")
grp_pdf = zarr.open_group(f'output_{dataset}/images_scale2/')
class CONFIG:
scale = 2 # Downscale final mask by factor 2
cdf_size = 512 # Downscale CDF for memory efficient loading during training
bg_p = 0.1 # Background Probability
cortex_p = 0.7 # Cortex Probability
medulla_p = 0.2 # Medulla Probability
cfg = CONFIG()
# Output
root = zarr.group(f'output_{dataset}/masks_scale{cfg.scale}')
# Saving cdf in 'pdfs' due to naming conventions for sampling during training in deepflash2
g_msk, g_pdf, g_cdf = root.create_groups('labels', 'pdfs', 'cdfs', overwrite=True)
if dataset == "colon":
df_train = df_train.rename(columns={"predicted":"encoding"})
df_train = df_train[df_train.id != 'HandE_B005_CL_b_RGB_topright']
df_train.index = df_train.id
# <h6> Step 3 - Resize the masks and find the pdf for each image </h6>
df_list = []
for idx, row in df_train.iterrows():
# Get image info
filename = idx
img_info = df_info[df_info.image_file==filename]
shape = (img_info.height_pixels.values[0], img_info.width_pixels.values[0])
msk = rle2mask(row.encoding, (shape[1], shape[0])).astype('uint8')
# Plot
fig, ax = plt.subplots(ncols=2, figsize=(15,15))
resize_w = int((msk.shape[1]/msk.shape[0])*cfg.cdf_size)
ax[0].imshow(cv2.resize(msk, dsize=(resize_w, cfg.cdf_size)))
ax[0].set_title('Mask')
ax[0].set_axis_off()
pdf = grp_pdf[idx][:]
if cfg.scale!=1:
new_size = (msk.shape[1] // cfg.scale, msk.shape[0] // cfg.scale)
print('Scaling to', new_size)
msk = cv2.resize(msk, new_size)
pdf = cv2.resize(pdf, new_size)
pdf = pdf.astype('float32')
pdf[pdf==0] = cfg.bg_p/np.sum(pdf==0)
pdf[msk>0] = 0
pdf[pdf==1] = cfg.cortex_p/np.sum(pdf==1)
pdf[pdf==2] = cfg.medulla_p/np.sum(pdf==2)
print('Getting glomeruli stats')
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(msk, connectivity=4)
print(f'Found {nb_components} glomeruli')
df_centroids = pd.DataFrame(centroids[1:], columns=['cy', 'cx'])
df_centroids = df_centroids.join(pd.DataFrame(stats[1:], columns=['left', 'top', 'width', 'height', 'area']))
df_centroids['idx'] = idx
df_centroids.reset_index(inplace=True)
df_centroids.set_index(['idx', 'index'], inplace=True)
df_list.append(df_centroids)
# Saving
g_msk[idx] = msk
g_pdf[idx] = pdf
# Saving cdf
pdf = cv2.resize(pdf, dsize=(resize_w, cfg.cdf_size))
g_cdf[idx] = np.cumsum(pdf/np.sum(pdf))
ax[1].imshow(pdf)
ax[1].set_title('Probability density function for sampling')
ax[1].set_axis_off()
plt.show()
df_stats = pd.concat(df_list)
df_stats.to_csv(f'output_{dataset}/masks_scale{cfg.scale}/roi_stats.csv')
print ("Run time = ", elapsed_time(start))
| 37.751825 | 160 | 0.671694 |
2855488f7315098aa90afd9884695e5338ec6899 | 1,940 | py | Python | mac/google-cloud-sdk/lib/surface/dataproc/autoscaling_policies/delete.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | mac/google-cloud-sdk/lib/surface/dataproc/autoscaling_policies/delete.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 4 | 2020-07-21T12:51:46.000Z | 2022-01-22T10:29:25.000Z | mac/google-cloud-sdk/lib/surface/dataproc/autoscaling_policies/delete.py | bopopescu/cndw | ee432efef88a4351b355f3d6d5350defc7f4246b | [
"Apache-2.0"
] | 1 | 2020-07-25T18:17:57.000Z | 2020-07-25T18:17:57.000Z | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete autoscaling policy command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.dataproc import dataproc as dp
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataproc import flags
from googlecloudsdk.core.console import console_io
class Delete(base.DeleteCommand):
"""Delete an autoscaling policy.
## EXAMPLES
The following command deletes the autoscaling policy
`example-autoscaling-policy`:
$ {command} example-autoscaling-policy
"""
@classmethod
def Args(cls, parser):
dataproc = dp.Dataproc(cls.ReleaseTrack())
flags.AddAutoscalingPolicyResourceArg(parser, 'delete',
dataproc.api_version)
def Run(self, args):
dataproc = dp.Dataproc(self.ReleaseTrack())
messages = dataproc.messages
policy_ref = args.CONCEPTS.autoscaling_policy.Parse()
request = messages.DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest(
name=policy_ref.RelativeName())
console_io.PromptContinue(
message="The autoscaling policy '[{0}]' will be deleted.".format(
policy_ref.Name()),
cancel_on_no=True)
dataproc.client.projects_regions_autoscalingPolicies.Delete(request)
| 32.881356 | 79 | 0.742268 |
db947203a27e69a6e56668dc63431f0e4f5599a2 | 5,639 | py | Python | initialize.py | T-002/pyproject | 06112f3f4b5c54d4789da3cb1eb007a33f00960b | [
"MIT"
] | null | null | null | initialize.py | T-002/pyproject | 06112f3f4b5c54d4789da3cb1eb007a33f00960b | [
"MIT"
] | null | null | null | initialize.py | T-002/pyproject | 06112f3f4b5c54d4789da3cb1eb007a33f00960b | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016-2017 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module can be used to adapt the template to your project needs."""
import os
def delete_dummy_files():
"""Delete dummy test files."""
os.remove("package/dummy.py")
os.remove("tests/dummy_test.py")
def rename_project(project):
"""Rename the package directory."""
os.rename("package", project)
def update_test_config(project):
"""Update the test configuration."""
original = open("tests/__init__.py", "r").read()
open("tests/__init__.py", "w").write(
original.replace("package", project))
def update_linter_test(project):
"""Update the linter test."""
original = open("tests/pylint_test.py", "r").read()
open("tests/pylint_test.py", "w").write(
original.replace('PROJECT_NAME="package"', 'PROJECT_NAME="%s"' % project))
def update_service_test(project):
"""Update the tests for the service submodule."""
original = open("tests/service_test.py", "r").read()
open("tests/service_test.py", "w").write(
original.replace("from package import", "from %s import" % project))
def update_builder_test(project):
"""Update the tests for the builder submodule."""
original = open("tests/builder_test.py", "r").read()
open("tests/builder_test.py", "w").write(
original.replace("from package import", "from %s import" % project))
def update_noseconfig(project):
"""Update the test configuration to match with the projects package name."""
original = open("nose.cfg", "r").read()
open("nose.cfg", "w").write(
original.replace("cover-package=package,tests", "cover-package=%s,tests" % project))
def update_pylintrc(project):
"""Update the init-hook for pylint."""
original = open("nose.cfg", "r").read()
open("nose.cfg", "w").write(original.replace(
"""init-hook='import sys, os; sys.path.insert[0]("."); sys.path.insert[0]("./package");'""",
"""init-hook='import sys, os; sys.path.insert[0]("."); sys.path.insert[0]("./%s");'"""
% project))
def update_main(project, is_flask_service):
"""Remove the not required code from __main__.py"""
original = open("%s/__main__.py" % project, "r").read()
original = original.replace("package", project)
start_ms = "#### START MICROSERVICE CODE"
end_ms = "#### END MICROSERVICE CODE"
start_creation = "#### START MICROSERVICE INSTANCE CREATION"
end_creation = "#### END MICROSERVICE INSTANCE CREATION"
if not is_flask_service:
current_index = 0
new = original[current_index : original.find(start_ms)]
current_index = original.find(end_ms) + len(end_ms)
new += original[current_index : original.find(start_creation)]
current_index = original.find(end_creation) + len(end_creation)
new += original[current_index:]
original = new
else:
original = original.replace(
'make_app("package")',
'make_app("%s")' % project)
original = original.replace(start_ms, "").replace(end_ms, "")
original = original.replace(start_creation, "").replace(end_creation, "")
open("%s/__main__.py" % project, "w").write(original)
def delete_flask_service_files(project):
"""Delete flask related files."""
os.remove("%s/service.py" % project)
os.remove("tests/service_test.py")
os.remove("%s/builder.py" % project)
os.remove("tests/builder_test.py")
def get_user_config():
"""Reads the project configuration from the user.
Returns:
tuple: Returns a tuple, containing (project_name, is_flask_service)
"""
project = str(input("""Please give your project name: """))
flask_service = str(input(
"""Should "%s" contain a Flask service? (y/n) """ % project)
).lower().strip()
if flask_service:
flask_service = flask_service[0] == "y"
return project, flask_service
def main():
"""Run the initializtion and execute all steps to
transform the template into a usable project.
"""
project, is_flask_service = get_user_config()
delete_dummy_files()
rename_project(project)
update_test_config(project)
update_linter_test(project)
update_noseconfig(project)
update_pylintrc(project)
update_builder_test(project)
update_service_test(project)
update_main(project, is_flask_service)
if not is_flask_service:
delete_flask_service_files(project)
if __name__=="__main__":
main()
print("You can now delete initialize.py")
| 36.380645 | 100 | 0.679021 |
6b94256496fbe4578b3f7da35a7b64fddc31a658 | 493 | py | Python | 42.py | Bascil/python-data-structures-code-challenge | d3ec548b828746ec1dfab0236666be543d27c35c | [
"MIT"
] | null | null | null | 42.py | Bascil/python-data-structures-code-challenge | d3ec548b828746ec1dfab0236666be543d27c35c | [
"MIT"
] | null | null | null | 42.py | Bascil/python-data-structures-code-challenge | d3ec548b828746ec1dfab0236666be543d27c35c | [
"MIT"
] | null | null | null | # Classes - Vertex and Graph classes
'''
Vertex class
Has a constructor that sets the name of the vertex
and creates a new empty set to store neighbours
The add neighbour method adds the name of a neighbouring
vertex to the neighbours set. This set automatically eliminates
duplicates. If a neighbour is already created, it cannot be added
'''
class Vertex:
def __init__(self, n):
self.name = n
self.neighbours = set()
def add_neighbours(self, v):
self.neighbours.add(v)
| 24.65 | 65 | 0.742394 |
e76ca647bed087802a782c174f2d9ccbe0445d5a | 5,534 | py | Python | jmetal/util/density_estimator.py | badchild0912/jMetalPy | 46ec02d143e1db97d97cc6b5826136558cb382c1 | [
"MIT"
] | null | null | null | jmetal/util/density_estimator.py | badchild0912/jMetalPy | 46ec02d143e1db97d97cc6b5826136558cb382c1 | [
"MIT"
] | null | null | null | jmetal/util/density_estimator.py | badchild0912/jMetalPy | 46ec02d143e1db97d97cc6b5826136558cb382c1 | [
"MIT"
] | null | null | null | import logging
import numpy
from abc import ABCMeta, abstractmethod
from typing import TypeVar, List
from functools import cmp_to_key
from scipy.spatial.distance import euclidean
from jmetal.util.solutions.comparator import SolutionAttributeComparator, Comparator
LOGGER = logging.getLogger('jmetal')
S = TypeVar('S')
"""
.. module:: crowding_distance
:platform: Unix, Windows
:synopsis: Crowding distance implementation.
.. moduleauthor:: Antonio J. Nebro <ajnebro@uma.es>
"""
class DensityEstimator(List[S]):
"""This is the interface of any density estimator algorithm.
"""
__metaclass__ = ABCMeta
@abstractmethod
def compute_density_estimator(self, solution_list):
pass
@abstractmethod
def sort(self, solution_list):
pass
@classmethod
def get_comparator(cls):
pass
class CrowdingDistance(DensityEstimator[List[S]]):
"""This class implements a DensityEstimator based on the crowding distance of algorithm NSGA-II.
"""
def compute_density_estimator(self, front):
"""This function performs the computation of the crowding density estimation over the solution list.
.. note::
This method assign the distance in the inner elements of the solution list.
:param front: The list of solutions.
"""
size = len(front)
if size is 0:
return
elif size is 1:
front[0].attributes['crowding_distance'] = float("inf")
return
elif size is 2:
front[0].attributes['crowding_distance'] = float("inf")
front[1].attributes['crowding_distance'] = float("inf")
return
for i in range(len(front)):
front[i].attributes['crowding_distance'] = 0.0
number_of_objectives = front[0].number_of_objectives
for i in range(number_of_objectives):
# Sort the population by Obj n
front = sorted(front, key=lambda x: x.objectives[i])
objective_minn = front[0].objectives[i]
objective_maxn = front[len(front) - 1].objectives[i]
# Set de crowding distance
front[0].attributes['crowding_distance'] = float('inf')
front[size - 1].attributes['crowding_distance'] = float('inf')
for j in range(1, size - 1):
distance = front[j + 1].objectives[i] - front[j - 1].objectives[i]
# Check if minimum and maximum are the same (in which case do nothing)
if objective_maxn - objective_minn == 0:
pass; # LOGGER.warning('Minimum and maximum are the same!')
else:
distance = distance / (objective_maxn - objective_minn)
distance += front[j].attributes['crowding_distance']
front[j].attributes['crowding_distance'] = distance
def sort(self, solutions):
solutions.sort(key=cmp_to_key(self.get_comparator().compare))
@classmethod
def get_comparator(cls):
return SolutionAttributeComparator("crowding_distance", lowest_is_best=False)
class KNearestNeighborDensityEstimator(DensityEstimator[List[S]]):
"""This class implements a density estimator based on the distance to the k-th nearest solution.
"""
def __init__(self, k=1):
self.k = k
self.distance_matrix = []
def compute_density_estimator(self, solutions):
solutions_size = len(solutions)
if solutions_size <= self.k:
return
points = []
for i in range(solutions_size):
points.append(solutions[i].objectives)
# Compute distance matrix
self.distance_matrix = numpy.zeros(shape=(solutions_size, solutions_size))
for i in range(solutions_size):
for j in range(solutions_size):
self.distance_matrix[i, j] = self.distance_matrix[j, i] = euclidean(solutions[i].objectives,
solutions[j].objectives)
# Gets the k-nearest distance of all the solutions
for i in range(solutions_size):
distances = []
for j in range(solutions_size):
distances.append(self.distance_matrix[i, j])
distances.sort()
solutions[i].attributes['knn_density'] = distances[self.k]
def sort(self, solutions):
def compare(solution1, solution2):
distances1 = solution1.attributes["distances_"]
distances2 = solution2.attributes["distances_"]
tmp_k = self.k
if distances1[tmp_k] > distances2[tmp_k]:
return -1
elif distances1[tmp_k] < distances2[tmp_k]:
return 1
else:
while tmp_k < (len(distances1) - 1):
tmp_k += 1
if distances1[tmp_k] > distances2[tmp_k]:
return -1
elif distances1[tmp_k] < distances2[tmp_k]:
return 1
return 0
for i in range(len(solutions)):
distances = []
for j in range(len(solutions)):
distances.append(self.distance_matrix[i, j])
distances.sort()
solutions[i].attributes["distances_"] = distances
solutions.sort(key=cmp_to_key(compare))
@classmethod
def get_comparator(cls):
return SolutionAttributeComparator("knn_density", lowest_is_best=False)
| 34.160494 | 108 | 0.606072 |
37fd0cc0f2537388644cfc17fa309b5d56f1ece5 | 556 | py | Python | python-leetcode/100.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
] | 12 | 2020-01-16T08:55:27.000Z | 2021-12-02T14:52:39.000Z | python-leetcode/100.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
] | null | null | null | python-leetcode/100.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
] | 1 | 2019-12-11T12:00:38.000Z | 2019-12-11T12:00:38.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if p is None and q is None:
return True
elif p is not None and q is not None:
if p.val == q.val:
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
else:
return False
else:
return False
| 27.8 | 92 | 0.534173 |
fa49397c56fd2663d9742f9d402375d944661917 | 22,046 | py | Python | tests/master/test_pypi.py | bennuttall/piwheels | 4331b6491ea661f9bc387ee6c0802e623dff6fe4 | [
"BSD-3-Clause"
] | 100 | 2017-06-15T21:04:07.000Z | 2019-07-10T09:21:41.000Z | tests/master/test_pypi.py | bennuttall/piwheels | 4331b6491ea661f9bc387ee6c0802e623dff6fe4 | [
"BSD-3-Clause"
] | 172 | 2017-04-13T18:50:36.000Z | 2019-07-13T11:38:41.000Z | tests/master/test_pypi.py | bennuttall/piwheels | 4331b6491ea661f9bc387ee6c0802e623dff6fe4 | [
"BSD-3-Clause"
] | 12 | 2017-07-13T12:54:35.000Z | 2019-03-02T12:28:45.000Z | # The piwheels project
# Copyright (c) 2017 Ben Nuttall <https://github.com/bennuttall>
# Copyright (c) 2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from time import sleep
from unittest import mock
from random import randint
from datetime import datetime, timezone
from threading import Thread
from socketserver import ThreadingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
from xmlrpc.client import ProtocolError
from queue import Queue
import pytest
import requests
import http.client
import xmlrpc.client
from piwheels.pypi import *
UTC = timezone.utc
def dt(s):
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
@pytest.fixture()
def xml_server(request):
q = Queue()
def changelog_since_serial(n):
return [
(pkg, ver, ts, msg, index)
for index, (pkg, ver, ts, msg) in enumerate(q.get(), start=n + 1)
]
class ThreadedXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
pass
xml_server = ThreadedXMLRPCServer(("127.0.0.1", 8000))
xml_server.register_introspection_functions()
xml_server.register_function(changelog_since_serial)
xml_server_thread = Thread(target=xml_server.serve_forever)
xml_server_thread.daemon = True
xml_server_thread.start()
yield "http://127.0.0.1:8000/", q
xml_server.shutdown()
xml_server.server_close()
@pytest.fixture()
def mock_buffer(request):
with mock.patch('piwheels.pypi.PyPIBuffer') as buffer_proxy:
events = []
buffer_proxy().__iter__.return_value = events
yield events
def test_pypi_buf_talks_to_servers(xml_server):
xml_url, xml_queue = xml_server
# NOTE: Must use a serial after PYPI_EPOCH here to permit events thru,
# and we must include at least 5 minutes worth of events
buf = PyPIBuffer(pypi_xmlrpc=xml_url, serial=PYPI_EPOCH + 1000)
buf._transport.use_https = False
xml_queue.put([
('bla', '0.0', 1531320000, 'create'),
] * PYPI_MARGIN + [
('foo', '0.1', 1531327388, 'create'),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz'),
('bar', '1.0', 1531328389, 'create'),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl'),
('baz', '2.0', 1531329389, 'create'),
('baz', '2.0', 1531329390, 'add py2.py3 file baz-1.0-py2.py3-none-any.whl'),
])
# baz events aren't included in output because they've not "aged" for
# 5 minutes
assert list(buf) == [
('bla', '0.0', 1531320000, 'create', PYPI_EPOCH + 1000),
('foo', '0.1', 1531327388, 'create', PYPI_EPOCH + 1001),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz', PYPI_EPOCH + 1002),
('bar', '1.0', 1531328389, 'create', PYPI_EPOCH + 1003),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', PYPI_EPOCH + 1004),
]
def test_pypi_buf_returns_empty_before_epoch(xml_server):
# See notes in prior test
xml_url, xml_queue = xml_server
buf = PyPIBuffer(pypi_xmlrpc=xml_url, serial=0)
buf._transport.use_https = False
xml_queue.put([
('bla', '0.0', ts, 'create')
for ts in range(1531320000, 1531320000 + 1000)
])
# Nothing returned because it's all before the PYPI_EPOCH
assert list(buf) == []
def test_pypi_buf_returns_empty_before_serial(xml_server):
xml_url, xml_queue = xml_server
# Make sure we're beyond the epoch, even accounting for the amount
# PyPIBuffer jumps back by (the margin)
i = PYPI_EPOCH + PYPI_MARGIN + 1000
buf = PyPIBuffer(pypi_xmlrpc=xml_url, serial=i)
buf._transport.use_https = False
xml_queue.put([
('bla', '0.0', 1531320000, 'create'),
] * (PYPI_MARGIN - 1))
# Nothing returned yet because PyPIBuffer has jumped backwards PYPI_MARGIN
# events
assert list(buf) == []
xml_queue.put([
('foo', '0.1', 1531327388, 'create'),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz'),
('bar', '1.0', 1531328389, 'create'),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl'),
('baz', '2.0', 1531329389, 'create'),
('baz', '2.0', 1531329390, 'add py2.py3 file baz-1.0-py2.py3-none-any.whl'),
])
assert list(buf) == [
('foo', '0.1', 1531327388, 'create', i),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz', i + 1),
('bar', '1.0', 1531328389, 'create', i + 2),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', i + 3),
]
def test_pypi_buf_waits_for_more_events(xml_server):
xml_url, xml_queue = xml_server
# Make sure we're beyond the epoch, even accounting for the amount
# PyPIBuffer jumps back by (the margin)
i = PYPI_EPOCH + PYPI_MARGIN + 1000
buf = PyPIBuffer(pypi_xmlrpc=xml_url, serial=i)
buf._transport.use_https = False
xml_queue.put([
('bla', '0.0', 1531320000, 'create'),
] * (PYPI_MARGIN - 1))
# Nothing yet because of PYPI_MARGIN (see prior test)
assert list(buf) == []
xml_queue.put([
('foo', '0.1', 1531327388, 'create'),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz'),
])
# Nothing yet because even though we've pushed the event it's waiting for,
# it's not 5 minutes "old" yet
assert list(buf) == []
xml_queue.put([
('bar', '1.0', 1531328389, 'create'),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl'),
('baz', '2.0', 1531329389, 'create'),
('baz', '2.0', 1531329390, 'add py2.py3 file baz-1.0-py2.py3-none-any.whl'),
])
assert list(buf) == [
('foo', '0.1', 1531327388, 'create', i),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz', i + 1),
('bar', '1.0', 1531328389, 'create', i + 2),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', i + 3),
]
def test_pypi_buf_raises_errors():
class BadXMLHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.send_error(404, 'Function not found')
class BadXMLRPCServer(ThreadingMixIn, HTTPServer):
pass
server = BadXMLRPCServer(("127.0.0.1", 8000), BadXMLHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
try:
buf = PyPIBuffer(pypi_xmlrpc='http://127.0.0.1:8000/')
buf._transport.use_https = False
with pytest.raises(ProtocolError):
list(buf)
finally:
server.shutdown()
server.server_close()
def test_pypi_read_normal(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
('baz', '1.0', 1531327390, 'create', 4),
('baz', '1.0', 1531327390, 'add py2.py3 file baz-1.0-py2.py3-none-any.whl', 5),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'package bar'
mock_json_server['baz'] = None
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('baz', None, dt('2018-07-11 16:43:10'), 'create', ''),
('baz', '1.0', dt('2018-07-11 16:43:10'), 'create', ''),
]
def test_pypi_read_json_err(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('pypi-http-err-503', '1.0', 1531327389, 'create', 2),
('pypi-http-err-503', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('pypi-http-err-503', None, dt('2018-07-11 16:43:09'), 'create', None),
('pypi-http-err-503', '1.0', dt('2018-07-11 16:43:09'), 'create', None),
]
def test_pypi_read_json_bad(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('pypi-bad', '1.0', 1531327389, 'create', 2),
('pypi-bad', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['pypi-bad'] = 'pypi broke'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('pypi-bad', None, dt('2018-07-11 16:43:09'), 'create', None),
('pypi-bad', '1.0', dt('2018-07-11 16:43:09'), 'create', None),
]
def test_pypi_read_missing_description(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', None),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', None),
]
def test_pypi_read_huge_description(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'bar' * 1000
expected = ('bar' * 1000)[:199] + '…'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', expected),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', expected),
]
def test_pypi_ignore_other_events(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
('bar', '1.0', 1531327392, 'foo', 4),
('bar', '1.0', 1531327392, 'foo bar baz', 5),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'package bar'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', 'package bar'),
]
def test_pypi_cache_expunge(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'package bar'
events = PyPIEvents(cache_size=1)
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', 'package bar'),
]
assert ('foo', '0.1') not in events._versions
assert ('bar', '1.0') in events._versions
def test_pypi_ignore_dupes(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add source file bar-1.0.tar.gz', 3),
('bar', '1.0', 1531327389, 'add source file bar-1.0.zip', 4),
('bar', '1.0', 1531327392, 'add cp34 file bar-0.1-cp34-cp34-manylinux1_x86_64.whl', 5),
('bar', '1.0', 1531327392, 'add cp35 file bar-0.1-cp35-cp35-manylinux1_x86_64.whl', 6),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'package bar'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'source', 'package bar'),
]
def test_pypi_promote_binary_to_source(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327390, 'add cp34 file bar-0.1-cp34-cp34-manylinux1_x86_64.whl', 3),
('bar', '1.0', 1531327390, 'add cp35 file bar-0.1-cp35-cp35-manylinux1_x86_64.whl', 4),
('bar', '1.0', 1531327392, 'add source file bar-1.0.tar.gz', 5),
('bar', '1.0', 1531327392, 'add source file bar-1.0.zip', 6),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = ''
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', ''),
('bar', '1.0', dt('2018-07-11 16:43:10'), 'create', ''),
# Note the timestamp doesn't alter as the release time is the
# earliest release
('bar', '1.0', dt('2018-07-11 16:43:10'), 'source', ''),
]
def test_pypi_ignore_removes(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('foo', '0.1', 1531327388, 'remove Owner foo', 2),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
]
def test_pypi_remove_version(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('foo', '0.1', 1531327388, 'remove project', 2),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'remove', None),
]
def test_pypi_remove_package(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('foo', None, 1531327388, 'remove release', 2),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('foo', None, dt('2018-07-11 16:43:08'), 'remove', None),
]
def test_pypi_yank_version(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'yank release', 0),
]
events = PyPIEvents()
assert list(events) == [
('foo', '0.1', dt('2018-07-11 16:43:08'), 'yank', None),
]
def test_pypi_unyank_version(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'unyank release', 0),
]
events = PyPIEvents()
assert list(events) == [
('foo', '0.1', dt('2018-07-11 16:43:08'), 'unyank', None),
]
def test_pypi_backoff(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = ''
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', ''),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', ''),
]
mock_buffer[:] = []
assert list(events) == []
mock_buffer[:] = [
('bar', '1.1', 1531327392, 'create', 4),
('bar', '1.1', 1531327393, 'add source file bar-1.1.tar.gz', 5),
]
# Because 10 seconds haven't elapsed...
assert list(events) == []
def test_pypi_read_improper_state():
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.side_effect = (
http.client.ImproperConnectionState('Something went horribly wrong')
)
events = PyPIEvents()
assert list(events) == []
def test_pypi_read_server_protocol_error():
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.side_effect = (
xmlrpc.client.ProtocolError('Something else went wrong',
500, '', '')
)
events = PyPIEvents()
assert list(events) == []
def test_pypi_read_server_other_fault():
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.side_effect = (
xmlrpc.client.Fault(-1000, 'Something went horribly wrong!')
)
events = PyPIEvents()
with pytest.raises(xmlrpc.client.Fault):
list(events)
def test_pypi_read_server_backoff():
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.side_effect = (
xmlrpc.client.Fault(-32500, 'Too many requests')
)
events = PyPIEvents()
assert list(events) == []
def test_pypi_read_client_error():
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.side_effect = (
xmlrpc.client.ProtocolError('Client did something stupid',
400, '', '')
)
events = PyPIEvents()
with pytest.raises(xmlrpc.client.ProtocolError):
list(events)
def test_pypi_json_timeouts(mock_json_server):
assert pypi_package_description('pypi-timeout-err') is None
assert pypi_package_description('pypi-http-err-408') is None
def test_pypi_json_connection_error(mock_json_server):
assert pypi_package_description('pypi-connect-err') is None
def test_pypi_json_client_error(mock_json_server):
with pytest.raises(requests.HTTPError):
pypi_package_description('pypi-http-err-418') # I'm a teapot
| 41.054004 | 103 | 0.599383 |
91ab73d4ad4c5fbffceb1820e99aec81013fde36 | 10,256 | py | Python | src/sentry/api/endpoints/organization_health.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 4 | 2019-05-27T13:55:07.000Z | 2021-03-30T07:05:09.000Z | src/sentry/api/endpoints/organization_health.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 99 | 2019-05-20T14:16:33.000Z | 2021-01-19T09:25:15.000Z | src/sentry/api/endpoints/organization_health.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 1 | 2020-08-10T07:55:40.000Z | 2020-08-10T07:55:40.000Z | from __future__ import absolute_import
import six
from collections import namedtuple, defaultdict
from datetime import timedelta
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from django.utils import timezone
from sentry.api.bases import OrganizationEndpoint, EnvironmentMixin
from sentry.api.utils import get_date_range_from_params, InvalidParams
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import (
Project, ProjectStatus, OrganizationMemberTeam,
Environment,
)
from sentry.api.serializers.snuba import (
SnubaResultSerializer, SnubaTSResultSerializer, value_from_row,
SnubaLookup,
)
from sentry.utils import snuba
from sentry.utils.dates import parse_stats_period
SnubaResultSet = namedtuple('SnubaResultSet', ('current', 'previous'))
SnubaTSResult = namedtuple('SnubaTSResult', ('data', 'start', 'end', 'rollup'))
def query(**kwargs):
kwargs['referrer'] = 'health'
kwargs['totals'] = True
return snuba.raw_query(**kwargs)
class OrganizationHealthEndpointBase(OrganizationEndpoint, EnvironmentMixin):
def empty(self):
return Response({'data': []})
def get_project_ids(self, request, organization):
project_ids = set(map(int, request.GET.getlist('project')))
before = project_ids.copy()
if request.user.is_superuser:
# Superusers can query any projects within the organization
qs = Project.objects.filter(
organization=organization,
status=ProjectStatus.VISIBLE,
)
else:
# Anyone else needs membership of the project
qs = Project.objects.filter(
organization=organization,
teams__in=OrganizationMemberTeam.objects.filter(
organizationmember__user=request.user,
organizationmember__organization=organization,
).values_list('team'),
status=ProjectStatus.VISIBLE,
)
# If no project's are passed through querystring, we want to
# return all projects, otherwise, limit to the passed in ones
if project_ids:
qs = qs.filter(id__in=project_ids)
project_ids = set(qs.values_list('id', flat=True))
if before and project_ids != before:
raise PermissionDenied
if not project_ids:
return
# Make sure project_ids is now a list, otherwise
# snuba isn't happy with it being a set
return list(project_ids)
def get_environment(self, request, organization):
try:
environment = self._get_environment_from_request(
request,
organization.id,
)
except Environment.DoesNotExist:
raise ResourceDoesNotExist
if environment is None:
return []
if environment.name == '':
return [['tags[environment]', 'IS NULL', None]]
return [['tags[environment]', '=', environment.name]]
def get_query_condition(self, request, organization):
qs = request.GET.getlist('q')
if not qs:
return [[]]
conditions = defaultdict(list)
for q in qs:
try:
tag, value = q.split(':', 1)
except ValueError:
# Malformed query
continue
try:
lookup = SnubaLookup.get(tag)
except KeyError:
# Not a valid lookup tag
continue
conditions[lookup.filter_key].append(value)
return [[k, 'IN', v] for k, v in conditions.items()]
class OrganizationHealthTopEndpoint(OrganizationHealthEndpointBase):
MIN_STATS_PERIOD = timedelta(hours=1)
MAX_STATS_PERIOD = timedelta(days=45)
MAX_LIMIT = 50
def get(self, request, organization):
"""
Returns a top-N view based on queryset over time period, as well as previous
period.
"""
try:
lookup = SnubaLookup.get(request.GET['tag'])
except KeyError:
raise ResourceDoesNotExist
stats_period = parse_stats_period(request.GET.get('statsPeriod', '24h'))
if stats_period is None or stats_period < self.MIN_STATS_PERIOD or stats_period >= self.MAX_STATS_PERIOD:
return Response({'detail': 'Invalid statsPeriod'}, status=400)
try:
limit = int(request.GET.get('limit', '5'))
except ValueError:
return Response({'detail': 'Invalid limit'}, status=400)
if limit > self.MAX_LIMIT:
return Response({'detail': 'Invalid limit: max %d' % self.MAX_LIMIT}, status=400)
if limit <= 0:
return self.empty()
try:
project_ids = self.get_project_ids(request, organization)
except ValueError:
return Response({'detail': 'Invalid project ids'}, status=400)
if not project_ids:
return self.empty()
environment = self.get_environment(request, organization)
query_condition = self.get_query_condition(request, organization)
aggregations = [('count()', '', 'count')]
# If we pass `?topk` this means we also are
# layering on top_projects and total_projects for each value.
if 'topk' in request.GET:
try:
topk = int(request.GET['topk'])
except ValueError:
return Response({'detail': 'Invalid topk'}, status=400)
aggregations += [
('topK(%d)' % topk, 'project_id', 'top_projects'),
('uniq', 'project_id', 'total_projects'),
]
now = timezone.now()
data = query(
end=now,
start=now - stats_period,
selected_columns=lookup.selected_columns,
aggregations=aggregations,
filter_keys={
'project_id': project_ids,
},
conditions=lookup.conditions + query_condition + environment,
groupby=lookup.columns,
orderby='-count',
limit=limit,
)
if not data['data']:
return self.empty()
# Convert our results from current period into a condition
# to be used in the next query for the previous period.
# This way our values overlap to be able to deduce a delta.
values = []
is_null = False
for row in data['data']:
value = lookup.encoder(value_from_row(row, lookup.columns))
if value is None:
is_null = True
else:
values.append(value)
previous = query(
end=now - stats_period,
start=now - (stats_period * 2),
selected_columns=lookup.selected_columns,
aggregations=[
('count()', '', 'count'),
],
filter_keys={
'project_id': project_ids,
},
conditions=lookup.conditions + query_condition + environment + [
[lookup.filter_key, 'IN', values] if values else [],
[lookup.tagkey, 'IS NULL', None] if is_null else [],
],
groupby=lookup.columns,
)
serializer = SnubaResultSerializer(organization, lookup, request.user)
return Response(
serializer.serialize(
SnubaResultSet(data, previous),
),
status=200,
)
class OrganizationHealthGraphEndpoint(OrganizationHealthEndpointBase):
def get_environments(self, request, organization):
requested_environments = set(request.GET.getlist('environment'))
if not requested_environments:
return []
environments = set(
Environment.objects.filter(
organization_id=organization.id,
name__in=requested_environments,
).values_list('name', flat=True),
)
if requested_environments != environments:
raise ResourceDoesNotExist
conditions = []
# the "no environment" environment is null in snuba
if '' in environments:
environments.remove('')
conditions.append(['tags[environment]', 'IS NULL', None])
if environments:
conditions.append(['tags[environment]', 'IN', list(environments)])
return [conditions]
def get(self, request, organization):
"""
Returns a time series view over statsPeriod over interval.
"""
try:
lookup = SnubaLookup.get(request.GET['tag'])
except KeyError:
raise ResourceDoesNotExist
try:
start, end = get_date_range_from_params(request.GET)
except InvalidParams as exc:
return Response({'detail': six.text_type(exc)}, status=400)
interval = parse_stats_period(request.GET.get('interval', '1h'))
if interval is None:
interval = timedelta(hours=1)
try:
project_ids = self.get_project_ids(request, organization)
except ValueError:
return Response({'detail': 'Invalid project ids'}, status=400)
if not project_ids:
return self.empty()
environment_conditions = self.get_environments(request, organization)
query_condition = self.get_query_condition(request, organization)
rollup = int(interval.total_seconds())
data = query(
end=end,
start=start,
rollup=rollup,
selected_columns=lookup.selected_columns,
aggregations=[
('count()', '', 'count'),
],
filter_keys={'project_id': project_ids},
conditions=lookup.conditions + query_condition + environment_conditions,
groupby=['time'] + lookup.columns,
orderby='time',
)
serializer = SnubaTSResultSerializer(organization, lookup, request.user)
return Response(
serializer.serialize(
SnubaTSResult(data, start, end, rollup),
),
status=200,
)
| 33.298701 | 113 | 0.593799 |
50b4d8e6aca744c590e2a5e46c011c1b68bdcc93 | 14,839 | py | Python | code/constituents_utils.py | nateGeorge/beat_market_analysis | 6c54b4aba4049d4e4a12b2d064d5d5d537ad8520 | [
"Apache-2.0"
] | null | null | null | code/constituents_utils.py | nateGeorge/beat_market_analysis | 6c54b4aba4049d4e4a12b2d064d5d5d537ad8520 | [
"Apache-2.0"
] | null | null | null | code/constituents_utils.py | nateGeorge/beat_market_analysis | 6c54b4aba4049d4e4a12b2d064d5d5d537ad8520 | [
"Apache-2.0"
] | null | null | null | import os
import glob
import datetime
from collections import OrderedDict
import pandas as pd
import numpy as np
import pandas_market_calendars as mcal
import matplotlib.pyplot as plt
FILEPATH = '/home/nate/Dropbox/data/sp600/'
CONSTITUENT_FILEPATH = '/home/nate/Dropbox/data/barchart.com/'
WRDS_FILEPATH = '/home/nate/Dropbox/data/wrds/compustat_north_america/tsv/'
def get_home_dir(repo_name='beat_market_analysis'):
cwd = os.getcwd()
cwd_list = cwd.split('/')
repo_position = [i for i, s in enumerate(cwd_list) if s == repo_name]
if len(repo_position) > 1:
print("error! more than one intance of repo name in path")
return None
home_dir = '/'.join(cwd_list[:repo_position[0] + 1]) + '/'
return home_dir
def get_historical_constituents_wrds():
"""
gets historical constituents from WRDS file
"""
# TODO: get latest file
df = pd.read_csv(WRDS_FILEPATH + 'index_constituents_9-12-2018.txt', parse_dates=['from', 'thru'], infer_datetime_format=True, sep='\t')
if df['from'][0].tzinfo is None:
df['from'] = df['from'].dt.tz_localize('US/Eastern')
if df['thru'][0].tzinfo is None:
df['thru'] = df['thru'].dt.tz_localize('US/Eastern')
# only use s&p600 for now
sp600_df = df[df['conm'] == 'S&P Smallcap 600 Index']
# create dataframe with list of constituents for each day
start = sp600_df['from'].min()
# get todays date and reset hour, min, sec to 0s
end = pd.Timestamp.today(tz='US/Eastern').replace(hour=0, minute=0, second=0, microsecond=0)
# replace NaT with tomorrow's date
# gives copy warning but can't get rid of it...
sp600_df['thru'].fillna(end + pd.DateOffset(days=1), inplace=True)
nyse = mcal.get_calendar('NYSE')
# gets all dates
# date_range = mcal.date_range(start=start, end=end)
# gets only dates valid for NYSE
date_range = nyse.valid_days(start_date=start.date(), end_date=end.date())
constituent_companies = OrderedDict()
constituent_tickers = OrderedDict()
lengths = []
# TODO: multiprocessing to speed up
for d in date_range:
# if date is within stock's from and thru, add to list
# stocks were removed on 'thru', so if it is the 'thru' date, then shouldn't be included
# but stocks were added on 'from' date, so include stocks on 'from' date
# use dataframe masking
date_string = d.strftime('%Y-%m-%d')
current_stocks = sp600_df[(sp600_df['from'] <= d) & (sp600_df['thru'] > d)]
current_companies = current_stocks['co_conm'] # company names
current_tickers = current_stocks['co_tic'] # company tickers
constituent_companies[date_string] = current_companies
constituent_tickers[date_string] = current_tickers
lengths.append(current_tickers.shape[0])
# look at number of constituents as a histogram; mostly 600 but a few above and below
# pd.value_counts(lengths)
# plt.hist(lengths)
# plt.show()
# TODO:
# need to check that no tickers are used for multiple companies
# get unique dates where changes were made
unique_dates = set(sp600_df['from'].unique()) | set(sp600_df['thru'].unique())
return constituent_companies, constituent_tickers, unique_dates
def get_latest_daily_date(source='barchart.com'):
# get latest date from daily scrapes
daily_files = glob.glob(FILEPATH + '{}/*.csv'.format(source))
if len(daily_files) == 0:
return None
daily_dates = [pd.to_datetime(f.split('/')[-1].split('_')[-1].split('.')[0]) for f in daily_files]
last_daily = max(daily_dates)
return last_daily
def get_latest_daily_date_constituents(index='QQQ'):
# get latest date from daily scrapes
daily_files = glob.glob(CONSTITUENT_FILEPATH + '{}/*.csv'.format(index))
if len(daily_files) == 0:
return None
daily_dates = [pd.to_datetime(f.split('/')[-1].split('_')[-1].split('.')[0]) for f in daily_files]
last_daily = max(daily_dates)
return last_daily
def get_latest_index_date(ticker='IJR'):
# get latest date from daily scrapes
extension = 'csv'
if ticker == 'SLY':
extension = 'xls'
daily_files = glob.glob(FILEPATH + 'index_funds/{}/*.{}'.format(ticker, extension))
if len(daily_files) == 0:
return None
daily_dates = [pd.to_datetime(f.split('/')[-1].split('_')[-1].split('.')[0]) for f in daily_files]
last_daily = max(daily_dates)
return last_daily
def load_sp600_files(date='latest', source='barchart.com'):
"""
loads data from files from investing.com
https://www.investing.com/indices/s-p-600-components
date should be a string, either 'latest' to use the latest available date, or
a specific date like YYYY-mm-dd
"""
# TODO: deal with 0 bytes files
folder = FILEPATH + '{}/'.format(source)
dfs = []
labels = ['price', 'performance', 'technical', 'fundamental']
if date == 'latest':
file_date = get_latest_daily_date(source=source).strftime('%Y-%m-%d')
if file_date is None:
print('no files to load!')
return None
else:
file_date = date
for l in labels:
filename = 'sp600_{}_{}.csv'.format(l, file_date)
print(filename)
if source == 'barchart.com':
# catch errors with 0 bytes filesize
if os.path.getsize(folder + filename) == 0:
print('filesize is 0 for', filename, 'returning None')
return None
dfs.append(pd.read_csv(folder + filename, skipfooter=1))
elif source == 'investing.com':
dfs.append(pd.read_csv(folder + filename))
# ensure the names and symbols are identical
eq01 = dfs[0][['Name', 'Symbol']].equals(dfs[1][['Name', 'Symbol']])
eq12 = dfs[1][['Name', 'Symbol']].equals(dfs[2][['Name', 'Symbol']])
eq23 = dfs[2][['Name', 'Symbol']].equals(dfs[3][['Name', 'Symbol']])
if eq01 and eq12 and eq23:
print('all names/symbols are equal')
else:
print('WARNING: some names/symbols not equal')
for d in dfs:
d.set_index('Symbol', inplace=True)
if source == 'barchart.com':
d = d[:-2] # the last row has some info about barchart.com
# remove 'Name' column from all but first df
for d in dfs[1:]:
d.drop('Name', axis=1, inplace=True)
if source == 'barchart.com':
if 'Last' in d.columns:
d.drop('Last', axis=1, inplace=True)
if source == 'investing.com':
# add prefixes so 'Daily' is different for performance and technical dfs
dfs[1].columns = ['perf ' + c for c in dfs[1].columns]
dfs[2].columns = ['tech ' + c for c in dfs[2].columns]
df = pd.concat(dfs, axis=1)
# 'Time' column seems to be just year/month
df.drop('Time', axis=1, inplace=True)
# convert k to 1000, M to 1e6, and B to 1.9
if source == 'barchart.com':
# just need to rename the column, the data is not $K, just $
df['Market Cap'] = df['Market Cap, $K']
df.drop('Market Cap, $K', axis=1, inplace=True)
elif source == 'investing.com':
for c in ['Vol.', 'Average Vol. (3m)', 'Market Cap', 'Revenue']:
df[c] = df[c].apply(lambda x: clean_abbreviations(x))
# clean up % columns
if source == 'barchart.com':
if 'Div Yield(a)' in df.columns:
cols = ['%Chg', 'Wtd Alpha', 'YTD %Chg', '1M %Chg', '3M %Chg', '52W %Chg', '20D Rel Str', '20D His Vol', 'Div Yield(a)']
elif 'Div Yield(ttm)' in df.columns:
cols = ['%Chg', 'Wtd Alpha', 'YTD %Chg', '1M %Chg', '3M %Chg', '52W %Chg', '20D Rel Str', '20D His Vol', 'Div Yield(ttm)']
else:
yield_col = [c for c in df.columns if 'Div Yield' in c]
cols = ['%Chg', 'Wtd Alpha', 'YTD %Chg', '1M %Chg', '3M %Chg', '52W %Chg', '20D Rel Str', '20D His Vol'] + yield_col
elif source == 'investing.com':
cols = ['Chg. %', 'perf Daily', 'perf 1 Week', 'perf 1 Month', 'perf YTD', 'perf 1 Year', 'perf 3 Years']
for c in cols:
df[c] = df[c].apply(lambda x: clean_pcts(x))
if source == 'investing.com':
# maps technical indicators to numbers for sorting
conversion_dict = {'Strong Buy': 2,
'Buy': 1,
'Neutral': 0,
'Sell': -1,
'Strong Sell': -2}
for k, v in conversion_dict.items():
for c in dfs[2].columns:
df.at[df[c] == k, c] = v
return df
def clean_pcts(x):
"""
the 'Chg. %' column and others have entries like +1.24%
"""
# if not enough data, will be '-' with investing.com
if x == '-' or pd.isnull(x):
return np.nan
elif x == 'unch':
return float(0)
elif type(x) == float:
return x
new_x = x.replace('+', '')
new_x = new_x.replace('%', '')
new_x = float(new_x) / 100
return new_x
def clean_abbreviations(x):
"""
replaces K with 000, M with 000000, B with 000000000
"""
# a few entries in Revenue were nan
if pd.isnull(x):
return np.nan
elif 'K' in x:
return int(float(x[:-1]) * 1e3)
elif 'M' in x:
return int(float(x[:-1]) * 1e6)
elif 'B' in x:
return int(float(x[:-1]) * 1e9)
else:
return int(x)
def get_current_smallest_mkt_cap(df, n=20):
"""
using df from investing.com and the load_sp600_files function,
gets the n number of smallest market-cap stocks
should use barchart.com or wrds as source of constituents
"""
sorted_df = df.sort_values(by='Market Cap')
return sorted_df.iloc[:n].index
def load_ijr_holdings():
latest_date = get_latest_index_date(ticker='IJR')
if latest_date is None:
print('no files')
return
filename = FILEPATH + 'index_funds/IJR/IJR_holdings_' + latest_date.strftime('%Y-%m-%d') + '.csv'
df = pd.read_csv(filename, skiprows=10)
df = df[df['Asset Class'] == 'Equity']
for c in ['Shares', 'Market Value', 'Notional Value']:
df[c] = df[c].apply(lambda x: x.replace(',', '')).astype(float)
df['Price'] = df['Price'].astype(float)
df.set_index('Ticker', inplace=True)
return df
def load_sly_holdings():
latest_date = get_latest_index_date(ticker='SLY')
if latest_date is None:
print('no files')
return
filename = FILEPATH + 'index_funds/SLY/SLY_holdings_' + latest_date.strftime('%Y-%m-%d') + '.xls'
df = pd.read_excel(filename, skiprows=3, skipfooter=11)
# remove non-equities
df = df[df['Identifier'] != 'Unassigned']
df.set_index('Identifier', inplace=True)
return df
def load_vioo_holdings():
latest_date = get_latest_index_date(ticker='VIOO')
if latest_date is None:
print('no files')
return
filename = FILEPATH + 'index_funds/VIOO/VIOO_holdings_' + latest_date.strftime('%Y-%m-%d') + '.csv'
df = pd.read_csv(filename, skiprows=4)
df.drop(['Unnamed: 0', 'Unnamed: 10'], axis=1, inplace=True)
missing_row_idx = np.where(df.isna().sum(axis=1) == df.shape[1])[0][0]
df = df.iloc[:missing_row_idx]
df.drop('Security depository receipt type', axis=1, inplace=True)
df['Shares'] = df['Shares'].apply(lambda x: x.replace(',', '')).astype(float)
df['Market value'] = df['Market value'].apply(lambda x: x.replace('$', '').replace(',', '')).astype(float)
df['% of fund*'] = df['% of fund*'].astype(float)
df['SEDOL'] = df['SEDOL'].apply(lambda x: x.replace('=', '').replace('"', ''))
df['Ticker'] = df['Ticker'].apply(lambda x: x.strip())
df.set_index('Ticker', inplace=True)
return df
# import sys
# sys.path.append('../stock_prediction/code')
# import dl_quandl_EOD as dlq
#
# all_stocks_dfs = dlq.load_stocks()
#
#
# # get market cap of each stock in index for each unique date
# # need to get more historical data from wrds
# market_caps = OrderedDict()
# unique_dates = sorted(pd.concat([sp600_df['from'], sp600_df['thru']]).unique())[:-1]
# for d in unique_dates:
# mcaps = []
# for ticker in current_tickers[d]:
# mcaps.append(all_stocks_dfs[ticker][''])
# market_caps[d] =
def load_barchart_constituents(date='latest', index='QQQ'):
if date == 'latest':
file_date = get_latest_daily_date_constituents(index=index).strftime('%Y-%m-%d')
if file_date is None:
print('no files to load!')
return None
else:
file_date = date
filename = CONSTITUENT_FILEPATH + 'QQQ/qqq_constituents_' + file_date + '.csv'
df = pd.read_csv(filename)
# last column says 'downloaded from...'
df.drop(df.index[-1], inplace=True)
df.set_index('Symbol', inplace=True)
df['% Holding'] = df['% Holding'].apply(lambda x: x.replace('%', ''))
df['% Holding'] = df['% Holding'].astype('float')
return df
if __name__ == '__main__':
pass
def qqq_analysis():
"""
looking at the correlation in daily movements between the stocks and the qqq index
"""
df = load_barchart_constituents()
def check_wrds_differences():
"""
checks which tickers are missing from various ETFs and the barchart sp600 list
"""
constituent_companies, constituent_tickers, unique_dates = get_historical_constituents_wrds()
# # get list of tickers from latest WRDS data
# 6-26-2018 was last time it was downloaded
wrds_tickers = constituent_tickers['2018-06-26']
wrds_set = set(wrds_tickers)
ijr = load_ijr_holdings()
sly = load_sly_holdings()
vioo = load_vioo_holdings()
ijr_set = set(ijr.index)
sly_set = set(sly.index)
vioo_set = set(vioo.index)
# all currently have at least 3 differences -- ijr seems to differ the most
ijr_set.difference(wrds_set)
sly_set.difference(wrds_set)
vioo_set.difference(wrds_set)
df = load_sp600_files()
current_set = set(df.index)
current_set.difference(wrds_set)
wrds_set.difference(current_set)
print('latest constituents:')
print(get_current_smallest_mkt_cap(df))
# TODO:
# see how often the bottom 20 companies have changed
# need to get historical market caps first
# for d in unique_dates:
# get_current_smallest_mkt_cap(df)
# VIOO seems to be slower to remove companies that are not in the index;
# companies that are not in the current set from barchart.com are only in vioo
# overall, barchart seems to be a pretty good source
# sorted_df = df.sort_values(by='Market Cap')
# smallest = get_current_smallest_mkt_cap(df)
# print(smallest)
| 35.163507 | 140 | 0.617157 |
aa818db0e7e9f7b792ae5dfcd2b8fcd61ec69b7f | 2,628 | py | Python | src/utils.py | a-lucic/focus | 26fba5f60c9f71a6b1287a4a557cd8316ad12527 | [
"MIT"
] | 12 | 2020-12-26T11:48:34.000Z | 2022-02-27T07:28:10.000Z | src/utils.py | a-lucic/focus | 26fba5f60c9f71a6b1287a4a557cd8316ad12527 | [
"MIT"
] | 2 | 2020-11-02T05:29:10.000Z | 2021-01-04T21:10:07.000Z | src/utils.py | a-lucic/focus | 26fba5f60c9f71a6b1287a4a557cd8316ad12527 | [
"MIT"
] | 4 | 2020-12-22T15:48:21.000Z | 2021-06-28T08:50:42.000Z | import tensorflow as tf
import numpy as np
from tensorflow.losses import Reduction
import os
import errno
def filter_hinge_loss(n_class, mask_vector, feat_input,
sigma, temperature, model_fn):
n_input = feat_input.shape[0]
if not np.any(mask_vector):
return np.zeros((n_input, n_class))
filtered_input = tf.boolean_mask(feat_input, mask_vector)
if type(sigma) != float or type(sigma) != int:
sigma = tf.boolean_mask(sigma, mask_vector)
if type(temperature) != float or type(temperature) != int:
temperature = tf.boolean_mask(temperature, mask_vector)
filtered_loss = model_fn(filtered_input, sigma, temperature)
indices = np.where(mask_vector)[0]
zero_loss = np.zeros((n_input, n_class))
hinge_loss = tf.tensor_scatter_nd_add(
zero_loss,
indices[:, None],
filtered_loss,
)
return hinge_loss
def safe_euclidean(x, epsilon=10. ** -10, axis=-1):
return (tf.reduce_sum(x ** 2, axis=axis) + epsilon) ** 0.5
def true_euclidean(x, axis=-1):
return (tf.reduce_sum(x ** 2, axis=axis)) ** 0.5
def safe_cosine(x1, x2, epsilon=10. ** -10):
normalize_x1 = tf.nn.l2_normalize(x1, dim=1)
normalize_x2 = tf.nn.l2_normalize(x2, dim=1)
dist = tf.losses.cosine_distance(normalize_x1, normalize_x2, axis=-1, reduction=Reduction.NONE) + epsilon
dist = tf.squeeze(dist)
dist = tf.cast(dist, tf.float64)
return dist
def true_cosine(x1: object, x2: object, axis=-1) -> object:
normalize_x1 = tf.nn.l2_normalize(x1, dim=1)
normalize_x2 = tf.nn.l2_normalize(x2, dim=1)
dist = tf.losses.cosine_distance(normalize_x1, normalize_x2, axis=axis, reduction=Reduction.NONE)
dist = tf.squeeze(dist)
dist = tf.cast(dist, tf.float64)
return dist
def safe_l1(x, epsilon=10. ** -10, axis=1):
return tf.reduce_sum(tf.abs(x), axis=axis) + epsilon
def true_l1(x, axis=1):
return tf.reduce_sum(tf.abs(x), axis=axis)
def tf_cov(x):
mean_x = tf.reduce_mean(x, axis=0, keep_dims=True)
mx = tf.matmul(tf.transpose(mean_x), mean_x)
vx = tf.matmul(tf.transpose(x), x) / tf.cast(tf.shape(x)[0], tf.float64)
cov_xx = vx - mx
return cov_xx
def safe_mahal(x, inv_covar, epsilon=10. ** -10):
return tf.reduce_sum(tf.multiply(tf.matmul(x + epsilon, inv_covar), x + epsilon), axis=1)
def true_mahal(x, inv_covar):
return tf.reduce_sum(tf.multiply(tf.matmul(x, inv_covar), x), axis=1)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def safe_open(path, w):
''' Open "path" for writing, creating any parent directories as needed.'''
mkdir_p(os.path.dirname(path))
return open(path, w)
| 27.375 | 106 | 0.714612 |
3b86c0864cb27f827ac86839e34da296d0342b92 | 58,706 | py | Python | nova/virt/driver.py | zaina/nova | 181358c172d606b23c9cc14b58d677d911013c02 | [
"Apache-2.0"
] | null | null | null | nova/virt/driver.py | zaina/nova | 181358c172d606b23c9cc14b58d677d911013c02 | [
"Apache-2.0"
] | null | null | null | nova/virt/driver.py | zaina/nova | 181358c172d606b23c9cc14b58d677d911013c02 | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from nova.i18n import _, _LE, _LI
from nova import utils
from nova.virt import event as virtevent
driver_opts = [
cfg.StrOpt('compute_driver',
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, ironic.IronicDriver, '
'vmwareapi.VMwareVCDriver, hyperv.HyperVDriver'),
cfg.StrOpt('default_ephemeral_format',
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('preallocate_images',
default='none',
choices=('none', 'space'),
help='VM image preallocation mode: '
'"none" => no storage provisioning is done up front, '
'"space" => storage is fully allocated at instance start'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
cfg.BoolOpt('vif_plugging_is_fatal',
default=True,
help="Fail instance boot if vif plugging fails"),
cfg.IntOpt('vif_plugging_timeout',
default=300,
help='Number of seconds to wait for neutron vif plugging '
'events to arrive before continuing or failing (see '
'vif_plugging_is_fatal). If this is set to zero and '
'vif_plugging_is_fatal is False, events should not '
'be expected to arrive at all.'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
def driver_dict_from_config(named_driver_config, *args, **kwargs):
driver_registry = dict()
for driver_str in named_driver_config:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
driver_registry[driver_type] = driver_class(*args, **kwargs)
return driver_registry
def get_block_device_info(instance, block_device_mapping):
"""Converts block device mappings for an instance to driver format.
Virt drivers expect block device mapping to be presented in the format
of a dict containing the following keys:
root_device_name: device name of the root disk
ephemerals: a (potentially empty) list of DriverEphemeralBlockDevice
instances
swap: An instance of DriverSwapBlockDevice or None
block_device_mapping: a (potentially empty) list of
DriverVolumeBlockDevice or any of it's more
specialized subclasses.
"""
from nova.virt import block_device as virt_block_device
block_device_info = {
'root_device_name': instance.root_device_name,
'ephemerals': virt_block_device.convert_ephemerals(
block_device_mapping),
'block_device_mapping':
virt_block_device.convert_all_volumes(*block_device_mapping)
}
swap_list = virt_block_device.convert_swap(block_device_mapping)
block_device_info['swap'] = virt_block_device.get_swap(swap_list)
return block_device_info
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
"supports_migrate_to_same_host": False
}
def __init__(self, virtapi):
self.virtapi = virtapi
self._compute_event_callback = None
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def cleanup_host(self, host):
"""Clean up anything that is necessary for the driver gracefully stop,
including ending remote sessions. This is optional.
"""
pass
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
:param instance: nova.objects.instance.Instance object
Returns a InstanceInfo object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance):
"""Checks existence of an instance on the host.
:param instance: The instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
try:
return instance.uuid in self.list_instance_uuids()
except NotImplementedError:
return instance.name in self.list_instances()
def estimate_instance_overhead(self, instance_info):
"""Estimate the virtualization overhead required to build an instance
of the given flavor.
Defaults to zero, drivers should override if per-instance overhead
calculations are desired.
:param instance_info: Instance/flavor to calculate overhead for.
:returns: Dict of estimated overhead values.
"""
return {'memory_mb': 0}
def list_instances(self):
"""Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instance_uuids(self):
"""Return the UUIDS of all the instances known to the virtualization
layer, as a list.
"""
raise NotImplementedError()
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
This base class method shuts down the VM, detaches all block devices,
then spins up the new VM afterwards. It may be overridden by
hypervisors that need to - e.g. for optimisations, or when the 'VM'
is actually proxied and needs to be held across the shutdown + spin
up steps.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param bdms: block-device-mappings to use for rebuild
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param recreate: True if the instance is being recreated on a new
hypervisor - all the cleanup of old state is skipped.
:param block_device_info: Information about block devices to be
attached to the instance.
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy the specified instance from the Hypervisor.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup the instance resources .
Instance should have been destroyed from the Hypervisor before calling
this method.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: nova.objects.instance.Instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, context, instance):
"""Get console output for an instance
:param context: security context
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_vnc_console(self, context, instance):
"""Get connection info for a vnc console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleVNC
"""
raise NotImplementedError()
def get_spice_console(self, context, instance):
"""Get connection info for a spice console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleSpice
"""
raise NotImplementedError()
def get_rdp_console(self, context, instance):
"""Get connection info for a rdp console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleRDP
"""
raise NotImplementedError()
def get_serial_console(self, context, instance):
"""Get connection info for a serial console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleSerial
"""
raise NotImplementedError()
def get_mks_console(self, context, instance):
"""Get connection info for a MKS console.
:param context: security context
:param instance: nova.objects.instance.Instance
:returns an instance of console.type.ConsoleMKS
"""
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
:param instances: nova.objects.instance.InstanceList
"""
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.-
"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance.
:param instance: nova.objects.instance.Instance
:param resize_to: This parameter is used to indicate the new volume
size when the new volume lager than old volume.
And the units is Gigabyte.
"""
raise NotImplementedError()
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def detach_interface(self, instance, vif):
"""Detach an interface from the instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def post_interrupted_snapshot_cleanup(self, context, instance):
"""Cleans up any resources left after an interrupted snapshot.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
pass
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize.
:param context: the context for the migration/resize
:param migration: the migrate/resize information
:param instance: nova.objects.instance.Instance being migrated/resized
:param disk_info: the newly transferred disk information
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
:param resize_instance: True if the instance is being resized,
False otherwise
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize.
:param context: the context for the finish_revert_migration
:param instance: nova.objects.instance.Instance being migrated/resized
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, context, instance):
"""suspend the specified instance.
:param context: the context for the suspend
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance.
:param context: the context for the resume
:param instance: nova.objects.instance.Instance being resumed
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
"""
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def set_bootable(self, instance, is_bootable):
"""Set the ability to power on/off an instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
raise NotImplementedError()
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def soft_delete(self, instance):
"""Soft delete the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def restore(self, instance):
"""Restore the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename:
node which the caller want to get resources from
a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Prepare an instance for live migration
:param context: security context
:param instance: nova.objects.instance.Instance object
:param block_device_info: instance block device information
:param network_info: instance network information
:param disk_info: instance disk information
:param migrate_data: implementation specific data dict.
"""
raise NotImplementedError()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
:param migrate_data: implementation specific params.
"""
raise NotImplementedError()
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration.
:param context: security context
:param instance: instance object that was being migrated
:param network_info: instance network information
:param block_device_info: instance block device information
:param destroy_disks:
if true, destroy disks at destination during cleanup
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
"""Post operation of live migration at source host.
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
:param migrate_data: if not None, it is a dict which has data
"""
pass
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
raise NotImplementedError(_("Hypervisor driver does not support "
"post_live_migration_at_source method"))
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
raise NotImplementedError()
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
"""
raise NotImplementedError()
def check_instance_shared_storage_remote(self, context, data):
"""Check if instance files located on shared storage.
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
raise NotImplementedError()
def check_instance_shared_storage_cleanup(self, context, data):
"""Do cleanup on host after check_instance_shared_storage calls
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
pass
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param src_compute_info: Info about the sending machine
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info (hypervisor-dependent)
"""
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a dict containing migration info (hypervisor-dependent)
"""
raise NotImplementedError()
def get_instance_disk_info(self, instance,
block_device_info=None):
"""Retrieve information about actual disk sizes of an instance.
:param instance: nova.objects.Instance
:param block_device_info:
Optional; Can be used to filter out devices which are
actually volumes.
:return:
json strings with below format::
"[{'path':'disk',
'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'
'over_committed_disk_size':'10737418240'},
...]"
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
"""
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:param instance: nova.objects.instance.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, instance, new_pass):
"""Set the root password on the specified instance.
:param instance: nova.objects.instance.Instance
:param new_pass: the new password
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
NOTE(russellb) This method is deprecated and will be removed once it
can be removed from nova.compute.manager.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
"""Applies a diff to the instance metadata.
This is an optional driver method which is used to publish
changes to the instance's metadata to the hypervisor. If the
hypervisor has no means of publishing the instance metadata to
the instance, then this method should not be implemented.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
pass
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances
:param timeout: the currently configured timeout for considering
rebooting instances to be stuck
:param instances: instances that have been in rebooting state
longer than the configured timeout
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
raise NotImplementedError()
def set_host_enabled(self, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_uptime(self):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_host_cpu_stats(self):
"""Get the currently known host CPU stats.
:returns: a dict containing the CPU stat info, eg:
| {'kernel': kern,
| 'idle': idle,
| 'user': user,
| 'iowait': wait,
| 'frequency': freq},
where kern and user indicate the cumulative CPU time
(nanoseconds) spent by kernel and user processes
respectively, idle indicates the cumulative idle CPU time
(nanoseconds), wait indicates the cumulative I/O wait CPU
time (nanoseconds), since the host is booting up; freq
indicates the current CPU frequency (MHz). All values are
long integers.
"""
raise NotImplementedError()
def block_stats(self, instance, disk_id):
"""Return performance counters associated with the given disk_id on the
given instance. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?"""
return False
def macs_for_instance(self, instance):
"""What MAC addresses must this instance have?
Some hypervisors (such as bare metal) cannot do freeform virtualisation
of MAC addresses. This method allows drivers to return a set of MAC
addresses that the instance is to have. allocate_for_instance will take
this into consideration when provisioning networking for the instance.
Mapping of MAC addresses to actual networks (or permitting them to be
freeform) is up to the network implementation layer. For instance,
with openflow switches, fixed MAC addresses can still be virtualised
onto any L2 domain, with arbitrary VLANs etc, but regular switches
require pre-configured MAC->network mappings that will match the
actual configuration.
Most hypervisors can use the default implementation which returns None.
Hypervisors with MAC limits should return a set of MAC addresses, which
will be supplied to the allocate_for_instance call by the compute
manager, and it is up to that call to ensure that all assigned network
details are compatible with the set of MAC addresses.
This is called during spawn_instance by the compute manager.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
return None
def dhcp_options_for_instance(self, instance):
"""Get DHCP options for this instance.
Some hypervisors (such as bare metal) require that instances boot from
the network, and manage their own TFTP service. This requires passing
the appropriate options out to the DHCP service. Most hypervisors can
use the default implementation which returns None.
This is called during spawn_instance by the compute manager.
Note that the format of the return value is specific to Quantum
client API.
:return: None, or a set of DHCP options, eg:
| [{'opt_name': 'bootfile-name',
| 'opt_value': '/tftpboot/path/to/config'},
| {'opt_name': 'server-ip-address',
| 'opt_value': '1.2.3.4'},
| {'opt_name': 'tftp-server',
| 'opt_value': '1.2.3.4'}
| ]
"""
return None
def manage_image_cache(self, context, all_instances):
"""Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
:param all_instances: nova.objects.instance.InstanceList
"""
pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
# NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
raise NotImplementedError()
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
return True
# Refresh and check again.
return nodename in self.get_available_nodes(refresh=True)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return {}
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.
:param instance: nova.objects.instance.Instance to lookup
Returns True if files of an instance with the supplied ID accessible on
the host, False otherwise.
.. note::
Used in rebuild for HA implementation and required for validation
of access to instance shared disk files
"""
return False
def register_event_listener(self, callback):
"""Register a callback to receive events.
Register a callback to receive asynchronous event
notifications from hypervisors. The callback will
be invoked with a single parameter, which will be
an instance of the nova.virt.event.Event class.
"""
self._compute_event_callback = callback
def emit_event(self, event):
"""Dispatches an event to the compute manager.
Invokes the event callback registered by the
compute manager to dispatch the event. This
must only be invoked from a green thread.
"""
if not self._compute_event_callback:
LOG.debug("Discarding event %s", str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s", str(event))
self._compute_event_callback(event)
except Exception as ex:
LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"),
{'event': event, 'ex': ex})
def delete_instance_files(self, instance):
"""Delete any lingering instance files for an instance.
:param instance: nova.objects.instance.Instance
:returns: True if the instance was deleted from disk, False otherwise.
"""
return True
@property
def need_legacy_block_device_info(self):
"""Tell the caller if the driver requires legacy block device info.
Tell the caller whether we expect the legacy format of block
device info to be passed in to methods that expect it.
"""
return True
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Snapshots volumes attached to a specified instance.
:param context: request context
:param instance: nova.objects.instance.Instance that has the volume
attached
:param volume_id: Volume to be snapshotted
:param create_info: The data needed for nova to be able to attach
to the volume. This is the same data format returned by
Cinder's initialize_connection() API call. In the case of
doing a snapshot, it is the image file Cinder expects to be
used as the active disk after the snapshot operation has
completed. There may be other data included as well that is
needed for creating the snapshot.
"""
raise NotImplementedError()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
"""Snapshots volumes attached to a specified instance.
:param context: request context
:param instance: nova.objects.instance.Instance that has the volume
attached
:param volume_id: Attached volume associated with the snapshot
:param snapshot_id: The snapshot to delete.
:param delete_info: Volume backend technology specific data needed to
be able to complete the snapshot. For example, in the case of
qcow2 backed snapshots, this would include the file being
merged, and the file being merged into (if appropriate).
"""
raise NotImplementedError()
def default_root_device_name(self, instance, image_meta, root_bdm):
"""Provide a default root device name for the driver."""
raise NotImplementedError()
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
"""Default the missing device names in the block device mapping."""
raise NotImplementedError()
def get_device_name_for_instance(self, instance,
bdms, block_device_obj):
"""Get the next device name based on the block device mapping.
:param instance: nova.objects.instance.Instance that volume is
requesting a device name
:param bdms: a nova.objects.BlockDeviceMappingList for the instance
:param block_device_obj: A nova.objects.BlockDeviceMapping instance
with all info about the requested block
device. device_name does not need to be set,
and should be decided by the driver
implementation if not set.
:returns: The chosen device name.
"""
raise NotImplementedError()
def is_supported_fs_format(self, fs_type):
"""Check whether the file format is supported by this driver
:param fs_type: the file system type to be checked,
the validate values are defined at disk API module.
"""
# NOTE(jichenjc): Return False here so that every hypervisor
# need to define their supported file system
# type and implement this function at their
# virt layer.
return False
def quiesce(self, context, instance, image_meta):
"""Quiesce the specified instance to prepare for snapshots.
If the specified instance doesn't support quiescing,
InstanceQuiesceNotSupported is raised. When it fails to quiesce by
other errors (e.g. agent timeout), NovaException is raised.
:param context: request context
:param instance: nova.objects.instance.Instance to be quiesced
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def unquiesce(self, context, instance, image_meta):
"""Unquiesce the specified instance after snapshots.
If the specified instance doesn't support quiescing,
InstanceQuiesceNotSupported is raised. When it fails to quiesce by
other errors (e.g. agent timeout), NovaException is raised.
:param context: request context
:param instance: nova.objects.instance.Instance to be unquiesced
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
Load the compute driver module specified by the compute_driver
configuration option or, if supplied, the driver name supplied as an
argument.
Compute drivers constructors take a VirtAPI object as their first object
and this must be supplied.
:param virtapi: a VirtAPI instance
:param compute_driver: a compute driver name to override the config opt
:returns: a ComputeDriver instance
"""
if not compute_driver:
compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_LE("Compute driver option required, but not specified"))
sys.exit(1)
LOG.info(_LI("Loading compute driver '%s'"), compute_driver)
try:
driver = importutils.import_object_ns('nova.virt',
compute_driver,
virtapi)
return utils.check_isinstance(driver, ComputeDriver)
except ImportError:
LOG.exception(_LE("Unable to load the virtualization driver"))
sys.exit(1)
def compute_driver_matches(match):
return CONF.compute_driver and CONF.compute_driver.endswith(match)
| 40.320055 | 79 | 0.650053 |
a44f16f7d0b3af87d8c60d3cec76e42d02884bab | 7,499 | py | Python | pymatgen/analysis/structure_prediction/dopant_predictor.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | 2 | 2017-10-02T03:11:47.000Z | 2018-12-02T12:56:12.000Z | pymatgen/analysis/structure_prediction/dopant_predictor.py | darnoceloc/pymatgen | 5cc42912a12a265a603df7e34c856561f76edc1f | [
"MIT"
] | 3 | 2017-07-18T01:13:41.000Z | 2019-04-29T18:17:30.000Z | pymatgen/analysis/structure_prediction/dopant_predictor.py | darnoceloc/pymatgen | 5cc42912a12a265a603df7e34c856561f76edc1f | [
"MIT"
] | 2 | 2016-06-15T00:12:59.000Z | 2018-12-02T12:56:47.000Z | import warnings
import numpy as np
from pymatgen.analysis.structure_prediction.substitution_probability import \
SubstitutionPredictor
from pymatgen.core.periodic_table import Specie, Element
def get_dopants_from_substitution_probabilities(structure, num_dopants=5,
threshold=0.001,
match_oxi_sign=False):
"""
Get dopant suggestions based on substitution probabilities.
Args:
structure (Structure): A pymatgen structure decorated with
oxidation states.
num_dopants (int): The number of suggestions to return for
n- and p-type dopants.
threshold (float): Probability threshold for substitutions.
match_oxi_sign (bool): Whether to force the dopant and original species
to have the same sign of oxidation state. E.g. If the original site
is in a negative charge state, then only negative dopants will be
returned.
Returns:
(dict): Dopant suggestions, given as a dictionary with keys "n_type" and
"p_type". The suggestions for each doping type are given as a list of
dictionaries, each with they keys:
- "probability": The probability of substitution.
- "dopant_species": The dopant species.
- "original_species": The substituted species.
"""
els_have_oxi_states = [hasattr(s, "oxi_state") for s in structure.species]
if not all(els_have_oxi_states):
raise ValueError("All sites in structure must have oxidation states to "
"predict dopants.")
sp = SubstitutionPredictor(threshold=threshold)
subs = [sp.list_prediction([s]) for s in set(structure.species)]
subs = [{'probability': pred['probability'],
'dopant_species': list(pred['substitutions'].keys())[0],
'original_species': list(pred['substitutions'].values())[0]}
for species_preds in subs for pred in species_preds]
subs.sort(key=lambda x: x['probability'], reverse=True)
return _get_dopants(subs, num_dopants, match_oxi_sign)
def get_dopants_from_shannon_radii(bonded_structure, num_dopants=5,
match_oxi_sign=False):
"""
Get dopant suggestions based on Shannon radii differences.
Args:
bonded_structure (StructureGraph): A pymatgen structure graph
decorated with oxidation states. For example, generated using the
CrystalNN.get_bonded_structure() method.
num_dopants (int): The nummber of suggestions to return for
n- and p-type dopants.
match_oxi_sign (bool): Whether to force the dopant and original species
to have the same sign of oxidation state. E.g. If the original site
is in a negative charge state, then only negative dopants will be
returned.
Returns:
(dict): Dopant suggestions, given as a dictionary with keys "n_type" and
"p_type". The suggestions for each doping type are given as a list of
dictionaries, each with they keys:
- "radii_diff": The difference between the Shannon radii of the species.
- "dopant_spcies": The dopant species.
- "original_species": The substituted species.
"""
# get a list of all Specie for all elements in all their common oxid states
all_species = [Specie(el, oxi) for el in Element
for oxi in el.common_oxidation_states]
# get a series of tuples with (coordination number, specie)
cn_and_species = set((bonded_structure.get_coordination_of_site(i),
bonded_structure.structure[i].specie)
for i in range(bonded_structure.structure.num_sites))
cn_to_radii_map = {}
possible_dopants = []
for cn, species in cn_and_species:
cn_roman = _int_to_roman(cn)
try:
species_radius = species.get_shannon_radius(cn_roman)
except KeyError:
warnings.warn("Shannon radius not found for {} with coordination "
"number {}.\nSkipping...".format(species, cn))
continue
if cn not in cn_to_radii_map:
cn_to_radii_map[cn] = _shannon_radii_from_cn(
all_species, cn_roman, radius_to_compare=species_radius)
shannon_radii = cn_to_radii_map[cn]
possible_dopants += [{'radii_diff': p['radii_diff'],
'dopant_species': p['species'],
'original_species': species}
for p in shannon_radii]
possible_dopants.sort(key=lambda x: abs(x['radii_diff']))
return _get_dopants(possible_dopants, num_dopants, match_oxi_sign)
def _get_dopants(substitutions, num_dopants, match_oxi_sign):
"""
Utility method to get n- and p-type dopants from a list of substitutions.
"""
n_type = [pred for pred in substitutions
if pred['dopant_species'].oxi_state >
pred['original_species'].oxi_state
and (not match_oxi_sign or
np.sign(pred['dopant_species'].oxi_state) ==
np.sign(pred['original_species'].oxi_state))]
p_type = [pred for pred in substitutions
if pred['dopant_species'].oxi_state <
pred['original_species'].oxi_state
and (not match_oxi_sign or
np.sign(pred['dopant_species'].oxi_state) ==
np.sign(pred['original_species'].oxi_state))]
return {'n_type': n_type[:num_dopants], 'p_type': p_type[:num_dopants]}
def _shannon_radii_from_cn(species_list, cn_roman, radius_to_compare=0):
"""
Utility func to get Shannon radii for a particular coordination number.
As the Shannon radii depends on charge state and coordination number,
species without an entry for a particular coordination number will
be skipped.
Args:
species_list (list): A list of Species to get the Shannon radii for.
cn_roman (str): The coordination number as a roman numeral. See
Specie.get_shannon_radius for more details.
radius_to_compare (float, optional): If set, the data will be returned
with a "radii_diff" key, containing the difference between the
shannon radii and this radius.
Returns:
(list of dict): The Shannon radii for all Species in species. Formatted
as a list of dictionaries, with the keys:
- "species": The species with charge state.
- "radius": The Shannon radius for the species.
- "radius_diff": The difference between the Shannon radius and the
radius_to_compare optional argument.
"""
shannon_radii = []
for s in species_list:
try:
radius = s.get_shannon_radius(cn_roman)
shannon_radii.append({
'species': s, 'radius': radius,
'radii_diff': radius - radius_to_compare})
except KeyError:
pass
return shannon_radii
def _int_to_roman(number):
"""Utility method to convert an int (less than 20) to a roman numeral."""
roman_conv = [(10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I")]
result = []
for (arabic, roman) in roman_conv:
(factor, number) = divmod(number, arabic)
result.append(roman * factor)
if number == 0:
break
return "".join(result)
| 40.101604 | 80 | 0.636352 |
10fe4b7ad0b3581fac8aa462c812dcaa2656f3df | 1,271 | py | Python | .history/Classiles/ice_cream_20210614181315.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | .history/Classiles/ice_cream_20210614181315.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | .history/Classiles/ice_cream_20210614181315.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | """[Practice: Ice Cream]
Class
keyword class name
class Ice:
Instantiation
variable class
name name
("instance")
ice = Ice()
Method an action or behavior ==== to add a method, I simply define a function inside the class
| method name is (eat)
def eat(self):
print("hi") # this line is the method content
Dot Expression # To test the method
instance method name
IceCream . eat ()
Assigning an attribute is very similar to defining a variable
self attaches the attribute attribute value
to the current instance
self . cubes = 3
"""
class IceCream:
def __init__(self):
print("Created ice cream")
def eat(self, scoops):
if self.scoops < scoops:
print("Not enough bites left!")
self.scoops -= scoops
def add(self, scoops):
self.scoops += scoops
IceCream.eat() # Traceback (most recent call last):
# File "/home/rich/Desktop/CarlsHub/Coding101-OOP/Classiles/ice_cream.py", line 37, in <module>
# IceCream.eat()
# TypeError: eat() missing 2 required positional arguments: 'self' and 'scoops'
| 27.042553 | 107 | 0.578285 |
ef8287e4a2a88f22bcb6dbda7c5cd7f8f95e04fa | 1,673 | py | Python | cpo/commands/fyre/info/get_quick_burn_max_hours.py | IBM/cloud-pak-operations-cli | 45ddcefb4302801c9a833d1359ea4d740c384556 | [
"Apache-2.0"
] | 7 | 2021-12-07T09:16:24.000Z | 2022-03-08T12:38:54.000Z | cpo/commands/fyre/info/get_quick_burn_max_hours.py | IBM/cloud-pak-operations-cli | 45ddcefb4302801c9a833d1359ea4d740c384556 | [
"Apache-2.0"
] | 3 | 2021-11-26T09:43:03.000Z | 2021-12-14T08:04:53.000Z | cpo/commands/fyre/info/get_quick_burn_max_hours.py | IBM/cloud-pak-operations-cli | 45ddcefb4302801c9a833d1359ea4d740c384556 | [
"Apache-2.0"
] | 1 | 2022-03-10T07:14:49.000Z | 2022-03-10T07:14:49.000Z | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import click
import cpo.config
import cpo.lib.click.utils
import cpo.utils.network
from cpo.lib.fyre.ocp_plus_api_manager import OCPPlusAPIManager
from cpo.lib.fyre.utils.click import fyre_command_options
from cpo.utils.logging import loglevel_command
@loglevel_command(
context_settings=cpo.lib.click.utils.create_default_map_from_json_file(
cpo.config.configuration_manager.get_credentials_file_path()
)
)
@fyre_command_options
@click.option("--json", help="Prints the command output in JSON format", is_flag=True)
@click.option("--site", help="OCP+ site", type=click.Choice(["rtp", "svl"]))
def get_quick_burn_max_hours(
fyre_api_user_name: str,
fyre_api_key: str,
disable_strict_response_schema_check: bool,
json: bool,
site: Optional[str],
):
"""Get the maxmimum hours for a quick burn deployment"""
cpo.utils.network.disable_insecure_request_warning()
OCPPlusAPIManager(fyre_api_user_name, fyre_api_key, disable_strict_response_schema_check).get_quickburn_max_hours(
site
).format(json)
| 34.142857 | 118 | 0.768679 |
8c76c329e71fd547affa7ba27d5a273330991966 | 2,433 | py | Python | vol2/vol2-python-examples/examples/example_crossover.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 777 | 2015-01-17T22:48:26.000Z | 2022-03-31T01:10:07.000Z | vol2/vol2-python-examples/examples/example_crossover.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 17 | 2015-01-02T14:41:24.000Z | 2017-09-02T02:57:09.000Z | vol2/vol2-python-examples/examples/example_crossover.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 445 | 2015-01-26T17:01:49.000Z | 2022-03-24T07:16:58.000Z | #!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example uses crossover to combine two parent genomes to produce two children.
Both repeating and non-repeating splice are used.
Crossover Splice
Parent 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Parent 2: [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
Offspring 1: [1, 2, 3, 4, 5, 6, 4, 3, 2, 1]
Offspring 2: [10, 9, 8, 7, 6, 5, 7, 8, 9, 10]
Crossover Splice No Repeat
Parent 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Parent 2: [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
Offspring 1: [10, 3, 2, 4, 5, 6, 7, 8, 9, 1]
Offspring 2: [1, 8, 9, 7, 6, 5, 4, 3, 2, 10]
"""
import sys
import os
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from genetic import *
from genetic import Genome
from genetic import Species
p1 = [ 1,2,3,4,5,6,7,8,9,10 ]
p2 = [ 10,9,8,7,6,5,4,3,2,1 ]
off = [[],[]]
pop = Population()
print("Crossover Splice")
crossover_splice(pop, p1,p2,off)
print("Parent 1: " + str(p1))
print("Parent 2: " + str(p2))
print("Offspring 1: " + str(off[0]))
print("Offspring 2: " + str(off[1]))
print()
print("Crossover Splice No Repeat")
crossover_splice_no_repeat(pop, p1,p2,off)
print("Parent 1: " + str(p1))
print("Parent 2: " + str(p2))
print("Offspring 1: " + str(off[0]))
print("Offspring 2: " + str(off[1])) | 30.4125 | 112 | 0.633785 |
30401032ac9f894d9f8172bf845bf9bf99338c8c | 1,628 | py | Python | case/example/webdriver_savefile_test07.py | w718328952/webdriver_test | 66d0ad70899981a9968717525b14a7dc239ef963 | [
"Apache-2.0"
] | null | null | null | case/example/webdriver_savefile_test07.py | w718328952/webdriver_test | 66d0ad70899981a9968717525b14a7dc239ef963 | [
"Apache-2.0"
] | null | null | null | case/example/webdriver_savefile_test07.py | w718328952/webdriver_test | 66d0ad70899981a9968717525b14a7dc239ef963 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep # time。sleep()实现延时
# 火狐浏览器专用文件配置方法
profile = webdriver.FirefoxProfile()
# 指定下载路径
profile.set_preference('browser.download.dir', 'd:\\')
# 设置成 2 表示使用自定义下载路径;设置成 0 表示下载到桌面;设置成 1 表示下载到默认路径
profile.set_preference('browser.download.folderList', 2)
# 在开始下载时是否显示下载管理器
profile.set_preference('browser.download.manager.showWhenStarting', False)
# 对所给出文件类型不再弹出框进行询问
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/zip')
browser = webdriver.Firefox(firefox_profile=profile) # 配置浏览器时,必须定义配置对象profile
browser.get('http://www.xxxxxxx.com') # 这里地址用的是查询页面,如果没有登录就会跳转到登录页面,登录之后会自动跳到查询页面
# assert 'Yahoo!' in browser 这行不要了
username = browser.find_element_by_name('uid') # 获取username输入框
username.clear() # 先清空输入框
username.send_keys(username) # 输入用户名
password = browser.find_element_by_name('password') # 获取username输入框
password.clear() # 先清空输入框
password.send_keys(password) # 输入密码
password.send_keys(Keys.RETURN) # 输入密码之后输入RETURN特殊键实现登录,不用再定位登录按钮
browser.implicitly_wait(5) # 延时5秒等待页面跳转
browser.find_element_by_name('项目').send_keys(ID) # 定位到项目ID输入框并输入项目ID
browser.find_element_by_id('search').click() # 定位到搜索按钮,并点击
browser.implicitly_wait(5) # 延时等待搜索结果
browser.find_element_by_xpath('\\').click() # 定位到导出按钮,并点击
sleep(3) # 延时 弹出导出提示框,提示用户到另外一个页面下载导出数据
browser.find_element_by_xpath('\\').click()
# 定位到弹出框上的确定按钮,点击确定隐藏提示框,以方便导出下一个项目的bug列表
browser.get('http://www.yyyyyy.com') # 跳转到下载页面
filelist = browser.find_elements_by_xpath('\\') # 定位到文件列表中所有的文件
for file in filelist:
file.click() # 点击保存文件 | 40.7 | 83 | 0.789926 |
3ca01dc134a15a595c20114e53b9194f50a12e6c | 1,478 | py | Python | articles/models.py | varkon/eparhiaodua | f2a2d6fd3c89748712e38f4035a720c6a7ad7fe3 | [
"Apache-2.0"
] | 2 | 2018-04-24T10:41:11.000Z | 2018-04-24T10:41:15.000Z | articles/models.py | varkon/eparhia | f2a2d6fd3c89748712e38f4035a720c6a7ad7fe3 | [
"Apache-2.0"
] | null | null | null | articles/models.py | varkon/eparhia | f2a2d6fd3c89748712e38f4035a720c6a7ad7fe3 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.utils import timezone
from tinymce import models as tinymce_model
from filebrowser.fields import FileBrowseField
from eparhiapp.apps import transliterate
# Create your models here.
class Article(models.Model) :
title = models.CharField(max_length = 255, verbose_name='Заголовок')
annonce = tinymce_model.HTMLField(verbose_name='Анонс новини')
body = tinymce_model.HTMLField(verbose_name='Повний текст новини')
link = models.CharField(max_length = 255, verbose_name='Посилання', unique=True, blank=True)
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
icon = FileBrowseField("Зображення", max_length=250, directory="uploads/", extensions=[".jpg","jpeg","png"], null=True)
class Meta:
verbose_name = 'Стаття'
verbose_name_plural = 'Статті'
# this is not needed for create link
def save(self, *args, **kwargs):
self.createlink()
super(Article, self).save(*args, **kwargs)
def publish(self):
self.published_date = timezone.now()
if (self.link == ""):
self.link = transliterate(self.title)
self.save()
def createlink(self):
if (self.link == ""):
self.link = transliterate(self.title)
def __str__(self):
return self.title
| 36.04878 | 123 | 0.679296 |
a9df85b1cc2c4548e7e4c02f5e7bd9d2737f3455 | 3,356 | py | Python | library/apt-upgrade-list.py | cmurphy/ardana-ansible | f3fb1573cfd6adff24c122c85ae877582a24ab55 | [
"Apache-2.0"
] | 4 | 2018-12-21T13:30:46.000Z | 2020-04-06T10:39:18.000Z | library/apt-upgrade-list.py | cmurphy/ardana-ansible | f3fb1573cfd6adff24c122c85ae877582a24ab55 | [
"Apache-2.0"
] | null | null | null | library/apt-upgrade-list.py | cmurphy/ardana-ansible | f3fb1573cfd6adff24c122c85ae877582a24ab55 | [
"Apache-2.0"
] | 8 | 2018-03-09T19:50:19.000Z | 2019-08-13T09:49:44.000Z | #!/usr/bin/env python
#
# An Ansible module to query apt for the list of packages
# available for update.
#
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DOCUMENTATION = '''
---
module: apt-upgrade-list
author: Tom Howley
short_description: Queries apt for list of packages available for upgrade.
description:
- Updates the local apt cache.
- Queries the apt cache to get the list of packages avalailable for upgrade.
That list of packages is written to the fact: list_pkg_upgrades
options:
timeout:
description:
- Timeout the module operation after specified number of seconds.
required: false
default: 30
'''
EXAMPLES = '''
- apt-upgrade-list:
timeout: 30
'''
import datetime
import json
import os
import signal
from threading import Timer
def kill_procgroup(proc):
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
def get_list_pkg_upgrades(module, timeout):
cmd = "sudo aptitude -s -y upgrade"
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
t = Timer(timeout, kill_procgroup, [p])
t.start()
output, err = p.communicate()
if t.is_alive():
t.cancel()
else:
error_msg = "Timeout on cmd %s output %s" % (cmd, output)
module.fail_json(msg=error_msg)
if p.returncode != 0:
error_msg = "Failed to run %s" % (cmd)
module.fail_json(msg=error_msg)
output = output.splitlines()
list_pkg_upgrades = []
UPGRADE_STR="The following packages will be upgraded:"
RECOMMEND_STR="The following packages are RECOMMENDED but will NOT be installed:"
idx_start_match = next((i for i, v in enumerate(output) if v == UPGRADE_STR), -1)
if idx_start_match == -1:
return list_pkg_upgrades
idx_end_match = next((i for i, v in enumerate(output) if v == RECOMMEND_STR), -1)
if idx_end_match == -1:
idx_end_match = next((i for i, v in enumerate(output) if re.match('^\d*\s*packages upgraded.*not upgraded.$',v)), -1)
if idx_end_match == -1:
return list_pkg_upgrades
for line in output[idx_start_match+1:idx_end_match]:
list_pkg_upgrades.extend(line.split())
for pkg in list_pkg_upgrades:
print "Pkg: %s" % pkg
return list_pkg_upgrades
def main():
module = AnsibleModule(
argument_spec = dict(
timeout=dict(required=False, type='int', default=30)
))
timeout = module.params['timeout']
list_pkg_upgrades = get_list_pkg_upgrades(module, timeout)
changed = (len(list_pkg_upgrades) > 0)
ansible_facts_dict = dict(list_pkg_upgrades=list_pkg_upgrades)
result = dict(changed=changed, ansible_facts=ansible_facts_dict)
module.exit_json(**result)
from ansible.module_utils.basic import *
main()
| 31.074074 | 125 | 0.701132 |
2c77f791d430bf98e3c705ae313dc99d7d4c301e | 1,307 | py | Python | pytket_cirq/setup.py | Travis-S/pytket | ba1b63b5b22533d9366c431b91d69bf9cf77b0d7 | [
"Apache-2.0"
] | null | null | null | pytket_cirq/setup.py | Travis-S/pytket | ba1b63b5b22533d9366c431b91d69bf9cf77b0d7 | [
"Apache-2.0"
] | null | null | null | pytket_cirq/setup.py | Travis-S/pytket | ba1b63b5b22533d9366c431b91d69bf9cf77b0d7 | [
"Apache-2.0"
] | null | null | null | import setuptools
from setuptools import setup
def find_pytket_subpackages():
locations = [('pytket', 'pytket')]
pkg_list = []
for location, prefix in locations:
pkg_list += list(
map(lambda package_name: '{}.{}'.format(prefix, package_name),
setuptools.find_packages(where=location))
)
return pkg_list
setup(
name='pytket_cirq',
version='0.1',
author='Will Simmons',
author_email='will.simmons@cambridgequantum.com',
python_requires='>=3.6',
url='https://github.com/CQCL/pytket',
description='Extension for pytket, providing translation to and from the Cirq framework',
license='Apache 2.0',
packages = find_pytket_subpackages(),
install_requires = [
'pytket >=0.2.0',
'cirq ~=0.5.0',
'matplotlib ~=2.2'
],
classifiers=[
"Environment :: Console",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering"
],
zip_safe=False
) | 30.395349 | 93 | 0.609028 |
064e54a074da849bece5a11954121930000b807b | 1,344 | py | Python | Lista-05/exercicio-03.py | joaofelipecsantos/ser-347 | cfaebf7dcba8366b8e8b922c8c7f7e4d270892b8 | [
"MIT"
] | null | null | null | Lista-05/exercicio-03.py | joaofelipecsantos/ser-347 | cfaebf7dcba8366b8e8b922c8c7f7e4d270892b8 | [
"MIT"
] | null | null | null | Lista-05/exercicio-03.py | joaofelipecsantos/ser-347 | cfaebf7dcba8366b8e8b922c8c7f7e4d270892b8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SER-347 - Joao Felipe
Lista-05
Exercício 03. Tomando como base os operadores disponíveis em Python documentation
- String Methods, apresente as operações para converter os elementos da coluna
string de entrada nos resultados apresentados na coluna string de saída.
"""
# 1
entrada = 'Gilberto'
print(f'++{entrada}++') # saida = '++Gilberto++'
# 2
entrada = 'sensoriamento remoto'
print(entrada.capitalize()) # saida = 'Sensoriamento remoto'
# 3
entrada = 'sensoriamento remoto'
print(entrada.title()) # saida = 'Sensoriamento Remoto'
# 4
entrada = 'GilberTo'
print(entrada.lower()) # saida = 'gilberto'
# 5
entrada = 'Gilberto'
print(entrada + '**') # saida = 'Gilberto**'
# 6
entrada = 'Gilberto'
print('**' + entrada) # saida = '**Gilberto'
# 7
entrada = ' Gilberto'
print(entrada.strip()) # saida = 'Gilberto'
# 8
entrada = 'ser347@dpi.inpe.br'
print("('%s', '@', '%s')" %(entrada.split('@')[0], entrada.split('@')[1])) # saida = ('ser347', '@', 'dpi.inpe.br')
# 9
entrada = 'CBERS_4_PAN5M_20180308'
print(entrada.split('_')) # saida = ['CBERS', '4', 'PAN5M', '20180308']
# 10
entrada = 'Gilberto@@@'
print(entrada.replace('@', '')) # saida = 'Gilberto'
# 11
entrada = '@@Gilberto@@@'
print(entrada.replace('@', '')) # saida = 'Gilberto'
# END ----------------------- | 24.436364 | 116 | 0.633185 |
02482ffbc3c39476658b620e20dfe02e03f977ad | 4,170 | py | Python | setup.py | ziodave/opentapioca | e4d5d41c1fdb199a49745c3efc2a02c6d74be315 | [
"Apache-2.0"
] | 191 | 2019-04-12T11:28:02.000Z | 2022-03-13T16:05:31.000Z | setup.py | ziodave/opentapioca | e4d5d41c1fdb199a49745c3efc2a02c6d74be315 | [
"Apache-2.0"
] | 38 | 2019-04-23T16:47:50.000Z | 2022-03-22T09:29:21.000Z | setup.py | ziodave/opentapioca | e4d5d41c1fdb199a49745c3efc2a02c6d74be315 | [
"Apache-2.0"
] | 26 | 2019-04-23T13:49:05.000Z | 2022-02-10T07:43:15.000Z | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='opentapioca',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.1',
description='Simple entity linking service for Wikidata',
long_description=long_description,
# The project's main homepage.
url='https://github.com/wetneb/opentapioca',
# Author details
author='Antonin Delpeuch',
author_email='antonin@delpeuch.eu',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Information Analysis',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='NIF NLP Wikidata NERD',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'scipy', 'scikit-learn', 'bottle', 'requests-cache', 'requests_mock', 'unidecode', 'pynif', 'requests', 'click'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage', 'pytest'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'tapioca = opentapioca.cli:cli',
],
},
)
| 36.26087 | 144 | 0.668345 |
2537bf7400f1054d01421fb8cd16307926b2c4df | 2,716 | py | Python | Birnn_Transformer/ncc/criterions/retrieval/triplet.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | 71 | 2020-12-04T02:18:13.000Z | 2022-03-30T15:19:50.000Z | Birnn_Transformer/ncc/criterions/retrieval/triplet.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | 4 | 2021-03-10T17:48:50.000Z | 2022-03-13T10:42:22.000Z | Birnn_Transformer/ncc/criterions/retrieval/triplet.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | 11 | 2020-12-09T12:17:44.000Z | 2022-03-30T09:02:13.000Z | # -*- coding: utf-8 -*-
import math
import torch.nn.functional as F
from ncc.criterions import NccCriterion, register_criterion
from ncc.data.constants import EPS
from ncc.utils import utils
from ncc.utils.logging import metrics
@register_criterion('triplet')
class TripletCriterion(NccCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
self.margin = self.task.args['optimization']['margin']
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, _ = self.compute_loss(model, net_output, reduce=reduce)
sample_size = sample['nsentences']
logging_output = {
'loss': loss.data,
'nsentences': sample['nsentences'],
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def cos_similarity(self, src_repr, tgt_repr):
return F.cosine_similarity(src_repr, tgt_repr)
def compute_loss(self, model, net_output, reduce=True):
src_repr, pos_repr, neg_repr = net_output
pos_dist = self.cos_similarity(src_repr, pos_repr) # B X 1
neg_dist = self.cos_similarity(src_repr, neg_repr) # B X 1
loss = (self.margin - pos_dist + neg_dist).clamp(EPS).sum()
return loss, loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=6)
if sample_size != ntokens:
metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=6)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
else:
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 38.8 | 99 | 0.661635 |
e97d6ebeb53cc0b1023600963f736c5c2b22ed6f | 579 | py | Python | pacote-Download/pythontestes/desafio037.py | FernandoFalcone-dev/exercicios-Python | a867cf5cd08ae7325f2f7916b4116b5f31fe83ef | [
"MIT"
] | null | null | null | pacote-Download/pythontestes/desafio037.py | FernandoFalcone-dev/exercicios-Python | a867cf5cd08ae7325f2f7916b4116b5f31fe83ef | [
"MIT"
] | null | null | null | pacote-Download/pythontestes/desafio037.py | FernandoFalcone-dev/exercicios-Python | a867cf5cd08ae7325f2f7916b4116b5f31fe83ef | [
"MIT"
] | null | null | null | num = int(input('Digite um número: '))
print('''Escolha uma das bases para conversão:
[ 1 ] converter para \033[1;31mBINÁRIO\033[m
[ 2 ] converter para \033[1;32mOCTAL\033[m
[ 3 ] converter para \033[1;33mHEXADECIMAL\033[m''')
escolha = int(input('Sua opção é: '))
if escolha == 1:
print(f'{num} convertido para BINÁRIO é igual a {bin(num)[2:]}')
elif escolha == 2:
print(f'{num} convertido para OCTAL é igual a {oct(num)[2:]}')
elif escolha == 3:
print(f'{num} convertido para HEXADECIMAL é igual a {hex(num)[2:]}')
else:
print('Opção inválida. Tente de novo.')
| 38.6 | 72 | 0.663212 |
85f1ccf43946d110972b4e1943e2803363d749d5 | 3,670 | py | Python | python/GafferSceneUI/FilteredSceneProcessorUI.py | cwmartin/gaffer | 1f8a0f75522105c9d5efefac6d55cb61c1038909 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneUI/FilteredSceneProcessorUI.py | cwmartin/gaffer | 1f8a0f75522105c9d5efefac6d55cb61c1038909 | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneUI/FilteredSceneProcessorUI.py | cwmartin/gaffer | 1f8a0f75522105c9d5efefac6d55cb61c1038909 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNodeDescription(
GafferScene.FilteredSceneProcessor,
"""The base type for scene processors which use a Filter node to control which part of the scene is affected.""",
"filter",
{
"description" : """The filter used to control which parts of the scene are processed. A Filter node should be connected here.""",
"nodeUI:section" : "Filter",
"nodeGadget:nodulePosition" : "right",
}
)
##########################################################################
# Widgets and Gadgets
##########################################################################
GafferUI.PlugValueWidget.registerCreator(
GafferScene.FilteredSceneProcessor,
"filter",
GafferSceneUI.FilterPlugValueWidget,
)
GafferUI.Nodule.registerNodule( GafferScene.FilteredSceneProcessor, "filter", GafferUI.StandardNodule )
##########################################################################
# NodeGraph context menu
##########################################################################
def __selectAffected( node, context ) :
with context :
pathMatcher = GafferScene.PathMatcher()
GafferScene.matchingPaths( node["filter"], node["in"], pathMatcher )
context["ui:scene:selectedPaths"] = IECore.StringVectorData( pathMatcher.paths() )
def appendNodeContextMenuDefinitions( nodeGraph, node, menuDefinition ) :
if not isinstance( node, GafferScene.FilteredSceneProcessor ) :
return
menuDefinition.append( "/FilteredSceneProcessorDivider", { "divider" : True } )
menuDefinition.append( "/Select Affected Objects", { "command" : IECore.curry( __selectAffected, node, nodeGraph.getContext() ) } )
| 39.042553 | 132 | 0.632698 |
1b2056d98311c91c13d5b48faed495f4a50c6d74 | 30,829 | py | Python | agents/GFootball_with_Memory_Patterns_v47.py | sazas/football | a762fdedf7367ef08cb3a4c6b44713b93aa69d37 | [
"Apache-2.0"
] | null | null | null | agents/GFootball_with_Memory_Patterns_v47.py | sazas/football | a762fdedf7367ef08cb3a4c6b44713b93aa69d37 | [
"Apache-2.0"
] | null | null | null | agents/GFootball_with_Memory_Patterns_v47.py | sazas/football | a762fdedf7367ef08cb3a4c6b44713b93aa69d37 | [
"Apache-2.0"
] | null | null | null | # start executing cells from here to rewrite submission.py
from kaggle_environments.envs.football.helpers import *
import math
import random
def find_patterns(obs, player_x, player_y):
""" find list of appropriate patterns in groups of memory patterns """
for get_group in groups_of_memory_patterns:
group = get_group(obs, player_x, player_y)
if group["environment_fits"](obs, player_x, player_y):
return group["get_memory_patterns"](obs, player_x, player_y)
def get_action_of_agent(obs, player_x, player_y):
""" get action of appropriate pattern in agent's memory """
memory_patterns = find_patterns(obs, player_x, player_y)
# find appropriate pattern in list of memory patterns
for get_pattern in memory_patterns:
pattern = get_pattern(obs, player_x, player_y)
if pattern["environment_fits"](obs, player_x, player_y):
return pattern["get_action"](obs, player_x, player_y)
def get_active_sticky_action(obs, exceptions):
""" get release action of the first active sticky action, except those in exceptions list """
release_action = None
for k in sticky_actions:
if k not in exceptions and sticky_actions[k] in obs["sticky_actions"]:
if k == "sprint":
release_action = Action.ReleaseSprint
elif k == "dribble":
release_action = Action.ReleaseDribble
else:
release_action = Action.ReleaseDirection
break
return release_action
def get_average_distance_to_opponents(obs, player_x, player_y):
""" get average distance to closest opponents """
distances_sum = 0
distances_amount = 0
for i in range(1, len(obs["right_team"])):
# if opponent is ahead of player
if obs["right_team"][i][0] > (player_x - 0.03):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent < 0.07:
distances_sum += distance_to_opponent
distances_amount += 1
# if there is no opponents close around
if distances_amount == 0:
return 2, distances_amount
return distances_sum / distances_amount, distances_amount
def get_distance(x1, y1, x2, y2):
""" get two-dimensional Euclidean distance, considering y size of the field """
return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def bad_angle_short_pass(obs, player_x, player_y):
""" perform a short pass, if player is at bad angle to opponent's goal """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player have the ball and is at bad angle to opponent's goal
if (obs["ball_owned_player"] == obs["active"] and
obs["ball_owned_team"] == 0 and
(
(abs(player_y) > 0.2 and player_x > 0.9) or
(player_x > 0.7 and player_y > 0.07 and obs["left_team_direction"][obs["active"]][1] > 0) or
(player_x > 0.7 and player_y < -0.07 and obs["left_team_direction"][obs["active"]][1] < 0)
)):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, ["top", "bottom"])
if action_to_release != None:
return action_to_release
if Action.Top not in obs["sticky_actions"] and Action.Bottom not in obs["sticky_actions"]:
if player_y > 0:
return Action.Top
else:
return Action.Bottom
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_goalkeeper_shot(obs, player_x, player_y):
""" shot if close to the goalkeeper """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
goalkeeper_x = obs["right_team"][0][0] + obs["right_team_direction"][0][0] * 13
goalkeeper_y = obs["right_team"][0][1] + obs["right_team_direction"][0][1] * 13
# player have the ball and located close to the goalkeeper
if (obs["ball_owned_player"] == obs["active"] and
obs["ball_owned_team"] == 0 and
get_distance(player_x, player_y, goalkeeper_x, goalkeeper_y) < 0.3):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if player_y <= -0.03 or (player_y > 0 and player_y < 0.03):
action_to_release = get_active_sticky_action(obs, ["bottom_right", "sprint"])
if action_to_release != None:
return action_to_release
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
else:
action_to_release = get_active_sticky_action(obs, ["top_right", "sprint"])
if action_to_release != None:
return action_to_release
if Action.TopRight not in obs["sticky_actions"]:
return Action.TopRight
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def far_from_goal_shot(obs, player_x, player_y):
""" perform a shot, if far from opponent's goal """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player have the ball and is far from opponent's goal
if (obs["ball_owned_player"] == obs["active"] and
obs["ball_owned_team"] == 0 and
(player_x < -0.6 or obs["ball_owned_player"] == 0)):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, ["right", "sprint"])
if action_to_release != None:
return action_to_release
if Action.Right not in obs["sticky_actions"]:
return Action.Right
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def far_from_goal_high_pass(obs, player_x, player_y):
""" perform a high pass, if far from opponent's goal """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player have the ball and is far from opponent's goal
if (obs["ball_owned_player"] == obs["active"] and
obs["ball_owned_team"] == 0 and
(player_x < -0.3 or obs["ball_owned_player"] == 0)):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, ["right", "sprint"])
if action_to_release != None:
return action_to_release
if Action.Right not in obs["sticky_actions"]:
return Action.Right
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
def go_through_opponents(obs, player_x, player_y):
""" avoid closest opponents by going around them """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# right direction is safest
biggest_distance, final_opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y)
obs["memory_patterns"]["go_around_opponent"] = Action.Right
# if top right direction is safest
top_right, opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y - 0.007)
if (top_right > biggest_distance and player_y > -0.15) or (top_right == 2 and player_y > 0.07):
biggest_distance = top_right
final_opponents_amount = opponents_amount
obs["memory_patterns"]["go_around_opponent"] = Action.TopRight
# if bottom right direction is safest
bottom_right, opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y + 0.007)
if (bottom_right > biggest_distance and player_y < 0.15) or (bottom_right == 2 and player_y < -0.07):
biggest_distance = bottom_right
final_opponents_amount = opponents_amount
obs["memory_patterns"]["go_around_opponent"] = Action.BottomRight
# is player surrounded?
if final_opponents_amount >= 3:
obs["memory_patterns"]["go_around_opponent_surrounded"] = True
else:
obs["memory_patterns"]["go_around_opponent_surrounded"] = False
return True
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
# if player is surrounded
if obs["memory_patterns"]["go_around_opponent_surrounded"]:
return Action.HighPass
if obs["memory_patterns"]["go_around_opponent"] not in obs["sticky_actions"]:
action_to_release = get_active_sticky_action(obs, ["sprint"])
if action_to_release != None:
return action_to_release
return obs["memory_patterns"]["go_around_opponent"]
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return obs["memory_patterns"]["go_around_opponent"]
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def khorne_slide(obs, berzerker_x, berzerker_y):
""" BLOOD FOR THE BLOOD GOD!!! SKULLS FOR THE SKULL THRONE!!! """
def environment_fits(obs, berzerker_x, berzerker_y):
""" environment fits constraints """
# if prey has the ball
if obs["ball_owned_team"] == 1:
prey_x = obs["right_team"][obs["ball_owned_player"]][0]
prey_y = obs["right_team"][obs["ball_owned_player"]][1]
# by x position, amount of berzerker's team players between prey and goal of berzerker's team
players_amount = 0
for i in range(1, len(obs["left_team"])):
if obs["left_team"][i][0] < prey_x:
players_amount += 1
prey_x_direction = obs["right_team_direction"][obs["ball_owned_player"]][0]
future_prey_x = prey_x + obs["right_team_direction"][obs["ball_owned_player"]][0]
future_prey_y = prey_y + obs["right_team_direction"][obs["ball_owned_player"]][1]
future_berzerker_x = berzerker_x + obs["left_team_direction"][obs["active"]][0]
future_berzerker_y = berzerker_y + obs["left_team_direction"][obs["active"]][1]
distance_to_prey = get_distance(berzerker_x, berzerker_y, prey_x, prey_y)
future_distance_to_prey = get_distance(future_berzerker_x, future_berzerker_y, future_prey_x, future_prey_y)
# if berzerker is not close to his own penalty zone
# and prey is beyond x position of the most of berzerker's team
# and berzerker is close enough to prey
# and berzerker is running in direction of prey
if ((berzerker_x > -0.65 or abs(berzerker_y) > 0.3) and
players_amount <= 5 and
future_distance_to_prey < 0.015 and
distance_to_prey > future_distance_to_prey):
return True
return False
def get_action(obs, berzerker_x, berzerker_y):
""" get action of this memory pattern """
return Action.Slide
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom(obs, player_x, player_y):
""" run to the ball if it is to the bottom from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the bottom from player's position
if (obs["ball"][1] > player_y and
abs(obs["ball"][0] - player_x) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return Action.Bottom
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom_left(obs, player_x, player_y):
""" run to the ball if it is to the bottom left from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the bottom left from player's position
if (obs["ball"][0] < player_x and
obs["ball"][1] > player_y):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return Action.BottomLeft
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom_right(obs, player_x, player_y):
""" run to the ball if it is to the bottom right from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the bottom right from player's position
if (obs["ball"][0] > player_x and
obs["ball"][1] > player_y):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return Action.BottomRight
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_left(obs, player_x, player_y):
""" run to the ball if it is to the left from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the left from player's position
if (obs["ball"][0] < player_x and
abs(obs["ball"][1] - player_y) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return Action.Left
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_right(obs, player_x, player_y):
""" run to the ball if it is to the right from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the right from player's position
if (obs["ball"][0] > player_x and
abs(obs["ball"][1] - player_y) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return Action.Right
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top(obs, player_x, player_y):
""" run to the ball if it is to the top from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the top from player's position
if (obs["ball"][1] < player_y and
abs(obs["ball"][0] - player_x) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return Action.Top
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top_left(obs, player_x, player_y):
""" run to the ball if it is to the top left from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the top left from player's position
if (obs["ball"][0] < player_x and
obs["ball"][1] < player_y):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return Action.TopLeft
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top_right(obs, player_x, player_y):
""" run to the ball if it is to the top right from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the top right from player's position
if (obs["ball"][0] > player_x and
obs["ball"][1] < player_y):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return Action.TopRight
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def idle(obs, player_x, player_y):
""" do nothing, release all sticky actions """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
return True
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, [])
if action_to_release != None:
return action_to_release
return Action.Idle
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def corner(obs, player_x, player_y):
""" perform a high pass in corner game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is corner game mode
if obs['game_mode'] == GameMode.Corner:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, ["top", "bottom"])
if action_to_release != None:
return action_to_release
if Action.Top not in obs["sticky_actions"] and Action.Bottom not in obs["sticky_actions"]:
if player_y > 0:
return Action.Top
else:
return Action.Bottom
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
def free_kick(obs, player_x, player_y):
""" perform a high pass or a shot in free kick game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is free kick game mode
if obs['game_mode'] == GameMode.FreeKick:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
# shot if player close to goal
if player_x > 0.5:
action_to_release = get_active_sticky_action(obs, ["top_right", "bottom_right"])
if action_to_release != None:
return action_to_release
if Action.TopRight not in obs["sticky_actions"] and Action.BottomRight not in obs["sticky_actions"]:
if player_y > 0:
return Action.TopRight
else:
return Action.BottomRight
return Action.Shot
# high pass if player far from goal
else:
action_to_release = get_active_sticky_action(obs, ["right"])
if action_to_release != None:
return action_to_release
if Action.Right not in obs["sticky_actions"]:
return Action.Right
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
def goal_kick(obs, player_x, player_y):
""" perform a short pass in goal kick game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is goal kick game mode
if obs['game_mode'] == GameMode.GoalKick:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, ["top_right", "bottom_right"])
if action_to_release != None:
return action_to_release
# randomly choose direction
if Action.TopRight not in obs["sticky_actions"] and Action.BottomRight not in obs["sticky_actions"]:
if random.random() < 0.5:
return Action.TopRight
else:
return Action.BottomRight
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def kick_off(obs, player_x, player_y):
""" perform a short pass in kick off game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is kick off game mode
if obs['game_mode'] == GameMode.KickOff:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, ["top", "bottom"])
if action_to_release != None:
return action_to_release
if Action.Top not in obs["sticky_actions"] and Action.Bottom not in obs["sticky_actions"]:
if player_y > 0:
return Action.Top
else:
return Action.Bottom
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def penalty(obs, player_x, player_y):
""" perform a shot in penalty game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is penalty game mode
if obs['game_mode'] == GameMode.Penalty:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, ["top_right", "bottom_right"])
if action_to_release != None:
return action_to_release
# randomly choose direction
if Action.TopRight not in obs["sticky_actions"] and Action.BottomRight not in obs["sticky_actions"]:
if random.random() < 0.5:
return Action.TopRight
else:
return Action.BottomRight
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def throw_in(obs, player_x, player_y):
""" perform a short pass in throw in game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is throw in game mode
if obs['game_mode'] == GameMode.ThrowIn:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
action_to_release = get_active_sticky_action(obs, ["right"])
if action_to_release != None:
return action_to_release
if Action.Right not in obs["sticky_actions"]:
return Action.Right
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def defence_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for environments in which opponent's team has the ball """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player don't have the ball
if obs["ball_owned_team"] != 0:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
# shift ball position
obs["ball"][0] += obs["ball_direction"][0] * 5
# if opponent has the ball and is far from y axis center
if abs(obs["ball"][1]) > 0.07 and obs["ball_owned_team"] == 1:
if obs["ball"][1] > 0:
obs["ball"][1] -= 0.01
else:
obs["ball"][1] += 0.01
memory_patterns = [
khorne_slide,
run_to_ball_right,
run_to_ball_left,
run_to_ball_bottom,
run_to_ball_top,
run_to_ball_top_right,
run_to_ball_top_left,
run_to_ball_bottom_right,
run_to_ball_bottom_left,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def goalkeeper_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for goalkeeper """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player is a goalkeeper have the ball
if (obs["ball_owned_player"] == obs["active"] and
obs["ball_owned_team"] == 0 and
obs["ball_owned_player"] == 0):
return True
return False
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
far_from_goal_shot,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def offence_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for environments in which player's team has the ball """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player have the ball
if obs["ball_owned_player"] == obs["active"] and obs["ball_owned_team"] == 0:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
far_from_goal_shot,
far_from_goal_high_pass,
bad_angle_short_pass,
close_to_goalkeeper_shot,
go_through_opponents,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def other_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for all other environments """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
return True
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def special_game_modes_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for special game mode environments """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# if game mode is not normal
if obs['game_mode'] != GameMode.Normal:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
corner,
free_kick,
goal_kick,
kick_off,
penalty,
throw_in,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
# list of groups of memory patterns
groups_of_memory_patterns = [
special_game_modes_memory_patterns,
goalkeeper_memory_patterns,
offence_memory_patterns,
defence_memory_patterns,
other_memory_patterns
]
# dictionary of sticky actions
sticky_actions = {
"left": Action.Left,
"top_left": Action.TopLeft,
"top": Action.Top,
"top_right": Action.TopRight,
"right": Action.Right,
"bottom_right": Action.BottomRight,
"bottom": Action.Bottom,
"bottom_left": Action.BottomLeft,
"sprint": Action.Sprint,
"dribble": Action.Dribble
}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
# @human_readable_agent wrapper modifies raw observations
# provided by the environment:
# https://github.com/google-research/football/blob/master/gfootball/doc/observation.md#raw-observations
# into a form easier to work with by humans.
# Following modifications are applied:
# - Action, PlayerRole and GameMode enums are introduced.
# - 'sticky_actions' are turned into a set of active actions (Action enum)
# see usage example below.
# - 'game_mode' is turned into GameMode enum.
# - 'designated' field is removed, as it always equals to 'active'
# when a single player is controlled on the team.
# - 'left_team_roles'/'right_team_roles' are turned into PlayerRole enums.
# - Action enum is to be returned by the agent function.
@human_readable_agent
def agent(obs):
""" Ole ole ole ole """
# dictionary for Memory Patterns data
obs["memory_patterns"] = {}
# We always control left team (observations and actions
# are mirrored appropriately by the environment).
controlled_player_pos = obs["left_team"][obs["active"]]
# get action of appropriate pattern in agent's memory
action = get_action_of_agent(obs, controlled_player_pos[0], controlled_player_pos[1])
# return action
return action
| 42.347527 | 120 | 0.63171 |
4481dc9ab16eaa9a759504f2f52023fe96dcab30 | 126 | py | Python | python/code/docstrint.py | fishhello/learn | b87eaa9908e895a917c05c883a744533b38984cc | [
"MIT"
] | null | null | null | python/code/docstrint.py | fishhello/learn | b87eaa9908e895a917c05c883a744533b38984cc | [
"MIT"
] | null | null | null | python/code/docstrint.py | fishhello/learn | b87eaa9908e895a917c05c883a744533b38984cc | [
"MIT"
] | null | null | null | def add(x, y):
"""
计算两个输入参数之和
:param x: int.
:param y: int.
:return: int,x 和 y之和
"""
return x + y
| 14 | 24 | 0.460317 |
0865f6b32bd688b89e6c832866587822081fca82 | 1,665 | py | Python | apps/accounts/admin.py | switzersc-usds/bluebutton-web-server | 176b118fb3467fe6dc424f40bc117d814714d429 | [
"Apache-2.0"
] | null | null | null | apps/accounts/admin.py | switzersc-usds/bluebutton-web-server | 176b118fb3467fe6dc424f40bc117d814714d429 | [
"Apache-2.0"
] | null | null | null | apps/accounts/admin.py | switzersc-usds/bluebutton-web-server | 176b118fb3467fe6dc424f40bc117d814714d429 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from .models import (
ValidPasswordResetKey,
UserProfile,
ActivationKey,
UserIdentificationLabel)
admin.site.register(ActivationKey)
admin.site.register(ValidPasswordResetKey)
ua = UserAdmin
ua.list_display = ('username', 'email', 'first_name',
'last_name', 'is_staff', 'is_active', 'date_joined')
admin.site.unregister(User)
admin.site.register(User, ua)
class UserProfileAdmin(admin.ModelAdmin):
def get_user_email(self, obj):
return obj.user.email
get_user_email.admin_order_field = "user__email"
get_user_email.short_description = "Email Address"
def get_user_joined(self, obj):
return obj.user.date_joined
get_user_joined.admin_order_field = "user__date_joined"
get_user_joined.short_description = "Date Joined"
list_display = ('user', 'name', 'user_type',
'organization_name', 'get_user_email',
'get_user_joined')
search_fields = ('user__username', 'user__email', 'user__first_name',
'user__last_name', 'user_type', 'organization_name',
'user__date_joined')
raw_id_fields = ("user", )
admin.site.register(UserProfile, UserProfileAdmin)
class UserIdentificationLabelAdmin(admin.ModelAdmin):
model = UserIdentificationLabel
filter_horizontal = ('users',)
list_display = ("name", "slug", "weight")
list_filter = ("name", "slug")
ordering = ("weight", )
admin.site.register(UserIdentificationLabel, UserIdentificationLabelAdmin)
| 27.75 | 74 | 0.6997 |
74bd83349961eb43ea7fcac9d1f84aa392773d13 | 231,686 | py | Python | h2o-py/tests/pyunit_utils/utilsPY.py | JannisBush/h2o-3 | 30aa2a86e6bfa1febb5f95f3cb43811337895f7f | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/pyunit_utils/utilsPY.py | JannisBush/h2o-3 | 30aa2a86e6bfa1febb5f95f3cb43811337895f7f | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/pyunit_utils/utilsPY.py | JannisBush/h2o-3 | 30aa2a86e6bfa1febb5f95f3cb43811337895f7f | [
"Apache-2.0"
] | 1 | 2021-09-09T03:47:11.000Z | 2021-09-09T03:47:11.000Z | # Py2 compat
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from past.builtins import basestring
# standard lib
import copy
import datetime
from decimal import *
from functools import reduce
import imp
import json
import math
import os
import random
import re
import shutil
import string
import subprocess
from subprocess import STDOUT,PIPE
import sys
import time # needed to randomly generate time
import threading
import urllib.request, urllib.error, urllib.parse
import uuid # call uuid.uuid4() to generate unique uuid numbers
try:
from StringIO import StringIO # py2 (first as py2 also has io.StringIO, but without string support, only unicode)
except:
from io import StringIO # py3
try:
from tempfile import TemporaryDirectory
except ImportError:
import tempfile
class TemporaryDirectory:
def __init__(self):
self.tmp_dir = None
def __enter__(self):
self.tmp_dir = tempfile.mkdtemp()
return self.tmp_dir
def __exit__(self, *args):
shutil.rmtree(self.tmp_dir)
# 3rd parties
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
import scipy.special
# h2o
sys.path.insert(1, "../../")
import h2o
from h2o.model.binomial import H2OBinomialModel
from h2o.model.clustering import H2OClusteringModel
from h2o.model.multinomial import H2OMultinomialModel
from h2o.model.ordinal import H2OOrdinalModel
from h2o.model.regression import H2ORegressionModel
from h2o.estimators import H2OGradientBoostingEstimator, H2ODeepLearningEstimator, H2OGeneralizedLinearEstimator, \
H2OGeneralizedAdditiveEstimator, H2OKMeansEstimator, H2ONaiveBayesEstimator, H2ORandomForestEstimator, \
H2OPrincipalComponentAnalysisEstimator
from h2o.utils.typechecks import is_type
from h2o.utils.shared_utils import temp_ctr # unused in this file but exposed here for symmetry with rest_ctr
class Timeout:
def __init__(self, timeout_secs, on_timeout=None):
enabled = timeout_secs is not None and timeout_secs >= 0
self.timer = threading.Timer(timeout_secs, on_timeout) if enabled else None
def __enter__(self):
if self.timer:
self.timer.start()
return self
def __exit__(self, *args):
if self.timer:
self.timer.cancel()
class Namespace:
"""
simplistic namespace class allowing to create bag/namespace objects that are easily extendable in a functional way
"""
@staticmethod
def add(namespace, **kwargs):
namespace.__dict__.update(kwargs)
return namespace
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def extend(self, **kwargs):
"""
:param kwargs: attributes extending the current namespace
:return: a new namespace containing same attributes as the original + the extended ones
"""
clone = Namespace(**self.__dict__)
clone.__dict__.update(**kwargs)
return clone
def ns(**kwargs):
return Namespace(**kwargs)
def gen_random_uuid(numberUUID):
uuidVec = numberUUID*[None]
for uindex in range(numberUUID):
uuidVec[uindex] = uuid.uuid4()
return uuidVec
def gen_random_time(numberTimes, maxtime= datetime.datetime(2080, 8,6,8,14,59), mintime=datetime.datetime(1980, 8,6,6,14,59)):
'''
Simple method that I shameless copied from the internet.
:param numberTimes:
:param maxtime:
:param mintime:
:return:
'''
mintime_ts = int(time.mktime(mintime.timetuple()))
maxtime_ts = int(time.mktime(maxtime.timetuple()))
randomTimes = numberTimes*[None]
for tindex in range(numberTimes):
temptime = random.randint(mintime_ts, maxtime_ts)
randomTimes[tindex] = datetime.datetime.fromtimestamp(temptimes)
return randomTimes
def check_models(model1, model2, use_cross_validation=False, op='e'):
"""
Check that the given models are equivalent.
:param model1:
:param model2:
:param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use
training metrics.
:param op: comparison operator to use. 'e':==, 'g':>, 'ge':>=
:return: None. Throw meaningful error messages if the check fails
"""
# 1. Check model types
model1_type = model1.__class__.__name__
model2_type = model1.__class__.__name__
assert model1_type is model2_type, "The model types differ. The first model is of type {0} and the second " \
"models is of type {1}.".format(model1_type, model2_type)
# 2. Check model metrics
if isinstance(model1,H2OBinomialModel): # 2a. Binomial
# F1
f1_1 = model1.F1(xval=use_cross_validation)
f1_2 = model2.F1(xval=use_cross_validation)
if op == 'e': assert f1_1[0][1] == f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be == to the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'g': assert f1_1[0][1] > f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be > than the second.".format(f1_1[0][1], f1_2[0][1])
elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \
"{1}. Expected the first to be >= than the second.".format(f1_1[0][1], f1_2[0][1])
elif isinstance(model1,H2ORegressionModel): # 2b. Regression
# MSE
mse1 = model1.mse(xval=use_cross_validation)
mse2 = model2.mse(xval=use_cross_validation)
if op == 'e': assert mse1 == mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be == to the second.".format(mse1, mse2)
elif op == 'g': assert mse1 > mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be > than the second.".format(mse1, mse2)
elif op == 'ge': assert mse1 >= mse2, "The first model has an MSE of {0} and the second model has an MSE of " \
"{1}. Expected the first to be >= than the second.".format(mse1, mse2)
elif isinstance(model1,H2OMultinomialModel) or isinstance(model1,H2OOrdinalModel): # 2c. Multinomial
# hit-ratio
pass
elif isinstance(model1,H2OClusteringModel): # 2d. Clustering
# totss
totss1 = model1.totss(xval=use_cross_validation)
totss2 = model2.totss(xval=use_cross_validation)
if op == 'e': assert totss1 == totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be == to the second.".format(totss1,
totss2)
elif op == 'g': assert totss1 > totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be > than the second.".format(totss1,
totss2)
elif op == 'ge': assert totss1 >= totss2, "The first model has an TOTSS of {0} and the second model has an " \
"TOTSS of {1}. Expected the first to be >= than the second." \
"".format(totss1, totss2)
def check_dims_values(python_obj, h2o_frame, rows, cols, dim_only=False):
"""
Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python
object conforms to the rules specified in the h2o frame documentation.
:param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame
:param h2o_frame: an H2OFrame
:param rows: number of rows
:param cols: number of columns
:param dim_only: check the dimensions only
:return: None
"""
h2o_rows, h2o_cols = h2o_frame.dim
assert h2o_rows == rows and h2o_cols == cols, "failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}" \
"".format(h2o_rows, rows, h2o_cols, cols)
if not dim_only:
if isinstance(python_obj, (list, tuple)):
for c in range(cols):
for r in range(rows):
pval = python_obj[r]
if isinstance(pval, (list, tuple)): pval = pval[c]
hval = h2o_frame[r, c]
assert pval == hval or abs(pval - hval) < 1e-10, \
"expected H2OFrame to have the same values as the python object for row {0} " \
"and column {1}, but h2o got {2} and python got {3}.".format(r, c, hval, pval)
elif isinstance(python_obj, dict):
for r in range(rows):
for k in list(python_obj.keys()):
pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k]
hval = h2o_frame[r,k]
assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} " \
"and column {1}, but h2o got {2} and python got {3}.".format(r, k, hval, pval)
def np_comparison_check(h2o_data, np_data, num_elements):
"""
Check values achieved by h2o against values achieved by numpy
:param h2o_data: an H2OFrame or H2OVec
:param np_data: a numpy array
:param num_elements: number of elements to compare
:return: None
"""
# Check for numpy
try:
imp.find_module('numpy')
except ImportError:
assert False, "failed comparison check because unable to import numpy"
import numpy as np
rows, cols = h2o_data.dim
for i in range(num_elements):
r = random.randint(0,rows-1)
c = random.randint(0,cols-1)
h2o_val = h2o_data[r,c]
np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]
if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(
assert np.absolute(h2o_val - np_val) < 1e-5, \
"failed comparison check! h2o computed {0} and numpy computed {1}".format(h2o_val, np_val)
# perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are
# returned.
def mojo_predict(model, tmpdir, mojoname, glrmReconstruct=False, get_leaf_node_assignment=False, glrmIterNumber=-1, zipFilePath=None):
"""
perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are returned.
It is assumed that the input data set is saved as in.csv in tmpdir directory.
:param model: h2o model where you want to use to perform prediction
:param tmpdir: directory where your mojo zip files are stired
:param mojoname: name of your mojo zip file.
:param glrmReconstruct: True to return reconstructed dataset, else return the x factor.
:return: the h2o prediction frame and the mojo prediction frame
"""
newTest = h2o.import_file(os.path.join(tmpdir, 'in.csv'), header=1) # Make sure h2o and mojo use same in.csv
predict_h2o = model.predict(newTest)
# load mojo and have it do predict
outFileName = os.path.join(tmpdir, 'out_mojo.csv')
mojoZip = os.path.join(tmpdir, mojoname) + ".zip"
if not(zipFilePath==None):
mojoZip = zipFilePath
genJarDir = str.split(os.path.realpath("__file__"),'/')
genJarDir = '/'.join(genJarDir[0:genJarDir.index('h2o-py')]) # locate directory of genmodel.jar
java_cmd = ["java", "-ea", "-cp", os.path.join(genJarDir, "h2o-assemblies/genmodel/build/libs/genmodel.jar"),
"-Xmx12g", "-XX:MaxPermSize=2g", "-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv",
"--input", os.path.join(tmpdir, 'in.csv'), "--output",
outFileName, "--mojo", mojoZip, "--decimal"]
if get_leaf_node_assignment:
java_cmd.append("--leafNodeAssignment")
predict_h2o = model.predict_leaf_node_assignment(newTest)
if glrmReconstruct: # used for GLRM to grab the x coefficients (factors) instead of the predicted values
java_cmd.append("--glrmReconstruct")
if glrmIterNumber > 0:
java_cmd.append("--glrmIterNumber")
java_cmd.append(str(glrmIterNumber))
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
files = os.listdir(tmpdir)
print("listing files {1} in directory {0}".format(tmpdir, files))
outfile = os.path.join(tmpdir, 'out_mojo.csv')
if not os.path.exists(outfile) or os.stat(outfile).st_size == 0:
print("MOJO SCORING FAILED:")
print("--------------------")
print(o.decode("utf-8"))
print("***** importing file {0}".format(outfile))
pred_mojo = h2o.import_file(outfile, header=1) # load mojo prediction in
# to a frame and compare
if glrmReconstruct or ('glrm' not in model.algo):
return predict_h2o, pred_mojo
else:
return newTest.frame_id, pred_mojo
# perform pojo predict. Frame containing pojo predict is returned.
def pojo_predict(model, tmpdir, pojoname):
h2o.download_pojo(model, path=tmpdir)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
java_file = os.path.join(tmpdir, pojoname + ".java")
in_csv = (os.path.join(tmpdir, 'in.csv')) # import the test dataset
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", java_file]
subprocess.check_call(javac_cmd)
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv",
"--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv, "--decimal"]
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
predict_pojo = h2o.import_file(out_pojo_csv, header=1)
return predict_pojo
def javapredict(algo, equality, train, test, x, y, compile_only=False, separator=",", setInvNumNA=False,**kwargs):
print("Creating model in H2O")
if algo == "gbm": model = H2OGradientBoostingEstimator(**kwargs)
elif algo == "random_forest": model = H2ORandomForestEstimator(**kwargs)
elif algo == "deeplearning": model = H2ODeepLearningEstimator(**kwargs)
elif algo == "glm": model = H2OGeneralizedLinearEstimator(**kwargs)
elif algo == "gam": model = H2OGeneralizedAdditiveEstimator(**kwargs)
elif algo == "naive_bayes": model = H2ONaiveBayesEstimator(**kwargs)
elif algo == "kmeans": model = H2OKMeansEstimator(**kwargs)
elif algo == "pca": model = H2OPrincipalComponentAnalysisEstimator(**kwargs)
else: raise ValueError
if algo == "kmeans" or algo == "pca": model.train(x=x, training_frame=train)
else: model.train(x=x, y=y, training_frame=train)
print(model)
# HACK: munge model._id so that it conforms to Java class name. For example, change K-means to K_means.
# TODO: clients should extract Java class name from header.
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
pojoname = regex.sub("_", model._id)
print("Downloading Java prediction model code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname))
os.makedirs(tmpdir)
h2o.download_pojo(model, path=tmpdir)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar)
print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar))
java_file = os.path.join(tmpdir, pojoname + ".java")
assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file)
print("java code saved in {0}".format(java_file))
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file]
subprocess.check_call(javac_cmd)
if not compile_only:
print("Predicting in H2O")
predictions = model.predict(test)
predictions.summary()
predictions.head()
out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv")
h2o.download_csv(predictions, out_h2o_csv)
assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv)
print("H2O Predictions saved in {0}".format(out_h2o_csv))
print("Setting up for Java POJO")
in_csv = os.path.join(tmpdir, "in.csv")
h2o.download_csv(test[x], in_csv)
# hack: the PredictCsv driver can't handle quoted strings, so remove them
f = open(in_csv, "r+")
csv = f.read()
csv = re.sub('\"', "", csv)
csv = re.sub(",", separator, csv) # replace with arbitrary separator for input dataset
f.seek(0)
f.write(csv)
f.truncate()
f.close()
assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv)
print("Input CSV to PredictCsv saved in {0}".format(in_csv))
print("Running PredictCsv Java Program")
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv", "--decimal",
"--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv, "--separator", separator]
if setInvNumNA:
java_cmd.append("--setConvertInvalidNum")
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
predictions2 = h2o.upload_file(path=out_pojo_csv)
print("Pojo predictions saved in {0}".format(out_pojo_csv))
print("Comparing predictions between H2O and Java POJO")
# Dimensions
hr, hc = predictions.dim
pr, pc = predictions2.dim
assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr)
assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc)
# Value
if not(equality == "class"or equality == "numeric"):
raise ValueError
compare_frames_local(predictions, predictions2, prob=1, tol=1e-4) # faster frame compare
def javamunge(assembly, pojoname, test, compile_only=False):
"""
Here's how to use:
assembly is an already fit H2OAssembly;
The test set should be used to compare the output here and the output of the POJO.
"""
print("Downloading munging POJO code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname))
os.makedirs(tmpdir)
assembly.to_pojo(pojoname, path=tmpdir, get_jar=True)
h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar")
assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar)
print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar))
java_file = os.path.join(tmpdir, pojoname + ".java")
assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file)
print("java code saved in {0}".format(java_file))
print("Compiling Java Pojo")
javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file]
subprocess.check_call(javac_cmd)
if not compile_only:
print("Setting up for Java POJO")
in_csv = os.path.join(tmpdir, "in.csv")
h2o.download_csv(test, in_csv)
assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv)
print("Input CSV to mungedCSV saved in {0}".format(in_csv))
print("Predicting in H2O")
munged = assembly.fit(test)
munged.head()
out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv")
h2o.download_csv(munged, out_h2o_csv)
assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv)
print("Munged frame saved in {0}".format(out_h2o_csv))
print("Running PredictCsv Java Program")
out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv")
cp_sep = ";" if sys.platform == "win32" else ":"
java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g",
"-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.MungeCsv", "--header", "--munger", pojoname,
"--input", in_csv, "--output", out_pojo_csv]
print("JAVA COMMAND: " + " ".join(java_cmd))
p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)
o, e = p.communicate()
print("Java output: {0}".format(o))
assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv)
munged2 = h2o.upload_file(path=out_pojo_csv, col_types=test.types)
print("Pojo predictions saved in {0}".format(out_pojo_csv))
print("Comparing predictions between H2O and Java POJO")
# Dimensions
hr, hc = munged.dim
pr, pc = munged2.dim
assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr)
assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc)
# Value
import math
import numbers
munged.show()
munged2.show()
for r in range(hr):
for c in range(hc):
hp = munged[r,c]
pp = munged2[r,c]
if isinstance(hp, numbers.Number):
assert isinstance(pp, numbers.Number)
assert (math.fabs(hp-pp) < 1e-8) or (math.isnan(hp) and math.isnan(pp)), "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp)
else:
assert hp==pp, "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp)
def locate(path):
"""
Search for a relative path and turn it into an absolute path.
This is handy when hunting for data files to be passed into h2o and used by import file.
Note: This function is for unit testing purposes only.
Parameters
----------
path : str
Path to search for
:return: Absolute path if it is found. None otherwise.
"""
if (test_is_on_hadoop()):
# Jenkins jobs create symbolic links to smalldata and bigdata on the machine that starts the test. However,
# in an h2o multinode hadoop cluster scenario, the clustered machines don't know about the symbolic link.
# Consequently, `locate` needs to return the actual path to the data on the clustered machines. ALL jenkins
# machines store smalldata and bigdata in /home/0xdiag/. If ON.HADOOP is set by the run.py, the path arg MUST
# be an immediate subdirectory of /home/0xdiag/. Moreover, the only guaranteed subdirectories of /home/0xdiag/
# are smalldata and bigdata.
p = os.path.realpath(os.path.join("/home/0xdiag/", path))
if not os.path.exists(p): raise ValueError("File not found: " + path)
return p
else:
tmp_dir = os.path.realpath(os.getcwd())
possible_result = os.path.join(tmp_dir, path)
try:
while (True):
if (os.path.exists(possible_result)):
return possible_result
next_tmp_dir = os.path.dirname(tmp_dir)
if (next_tmp_dir == tmp_dir):
raise ValueError("File not found: " + path)
tmp_dir = next_tmp_dir
possible_result = os.path.join(tmp_dir, path)
except ValueError as e:
url = "https://h2o-public-test-data.s3.amazonaws.com/{}".format(path)
if url_exists(url):
return url
raise
def url_exists(url):
head_req = urllib.request.Request(url, method='HEAD')
try:
with urllib.request.urlopen(head_req) as test:
return test.status == 200
except urllib.error.URLError:
return False
def hadoop_namenode_is_accessible():
url = "http://{0}:50070".format(hadoop_namenode())
try:
urllib.urlopen(url)
internal = True
except:
internal = False
return internal
def test_is_on_hadoop():
if hasattr(sys.modules["tests.pyunit_utils"], '__on_hadoop__'):
return sys.modules["tests.pyunit_utils"].__on_hadoop__
return False
def hadoop_namenode():
if os.getenv("NAME_NODE"):
return os.getenv("NAME_NODE").split(".")[0]
elif hasattr(sys.modules["tests.pyunit_utils"], '__hadoop_namenode__'):
return sys.modules["tests.pyunit_utils"].__hadoop_namenode__
return None
def pyunit_exec(test_name):
with open(test_name, "r") as t:
pyunit = t.read()
test_path = os.path.abspath(test_name)
pyunit_c = compile(pyunit, test_path, 'exec')
exec(pyunit_c, dict(__name__='__main__', __file__=test_path)) # forcing module name to ensure that the test behaves the same way as when executed using `python my_test.py`
def standalone_test(test):
if not h2o.connection() or not h2o.connection().connected:
print("Creating connection for test %s" % test.__name__)
h2o.init(strict_version_check=False)
print("New session: %s" % h2o.connection().session_id)
h2o.remove_all()
h2o.log_and_echo("------------------------------------------------------------")
h2o.log_and_echo("")
h2o.log_and_echo("STARTING TEST "+test.__name__)
h2o.log_and_echo("")
h2o.log_and_echo("------------------------------------------------------------")
test()
def run_tests(tests, run_in_isolation=True):
#flatten in case of nested tests/test suites
all_tests = reduce(lambda l, r: (l.extend(r) if isinstance(r, (list, tuple)) else l.append(r)) or l, tests, [])
for test in all_tests:
header = "Running {}{}".format(test.__name__, "" if not hasattr(test, 'tag') else " [{}]".format(test.tag))
print("\n"+('='*len(header))+"\n"+header)
if run_in_isolation:
standalone_test(test)
else:
test()
def tag_test(test, tag):
if tag is not None:
test.tag = tag
return test
def assert_warn(predicate, message):
try:
assert predicate, message
except AssertionError as e:
print("WARN: {}".format(str(e)))
def make_random_grid_space(algo, ncols=None, nrows=None):
"""
Construct a dictionary of the form {gbm_parameter:list_of_values, ...}, which will eventually be passed to
H2OGridSearch to build a grid object. The gbm parameters, and their associated values, are randomly selected.
:param algo: a string {"gbm", "rf", "dl", "km", "glm"} representing the algo dimension of the grid space
:param ncols: Used for mtries selection or k (pca)
:param nrows: Used for k (pca)
:return: a dictionary of parameter_name:list_of_values
"""
grid_space = {}
if algo in ["gbm", "rf"]:
if random.randint(0,1): grid_space['ntrees'] = random.sample(list(range(1,6)),random.randint(2,3))
if random.randint(0,1): grid_space['max_depth'] = random.sample(list(range(1,6)),random.randint(2,3))
if random.randint(0,1): grid_space['min_rows'] = random.sample(list(range(1,11)),random.randint(2,3))
if random.randint(0,1): grid_space['nbins'] = random.sample(list(range(2,21)),random.randint(2,3))
if random.randint(0,1): grid_space['nbins_cats'] = random.sample(list(range(2,1025)),random.randint(2,3))
if algo == "gbm":
if random.randint(0,1): grid_space['learn_rate'] = [random.random() for _ in range(random.randint(2,3))]
grid_space['distribution'] = random.sample(['bernoulli', 'multinomial', 'gaussian', 'poisson', 'tweedie', 'gamma'], 1)
if algo == "rf":
if random.randint(0,1): grid_space['mtries'] = random.sample(list(range(1,ncols+1)),random.randint(2,3))
if random.randint(0,1): grid_space['sample_rate'] = [random.random() for r in range(random.randint(2,3))]
elif algo == "km":
grid_space['k'] = random.sample(list(range(1,10)),random.randint(2,3))
if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['standardize'] = [True, False]
if random.randint(0,1): grid_space['seed'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['init'] = random.sample(['Random','PlusPlus','Furthest'],random.randint(2,3))
elif algo == "glm":
if random.randint(0,1): grid_space['alpha'] = [random.random() for r in range(random.randint(2,3))]
grid_space['family'] = random.sample(['binomial','gaussian','poisson','tweedie','gamma'], 1)
if grid_space['family'] == "tweedie":
if random.randint(0,1):
grid_space['tweedie_variance_power'] = [round(random.random()+1,6) for r in range(random.randint(2,3))]
grid_space['tweedie_link_power'] = 1 - grid_space['tweedie_variance_power']
elif algo == "dl":
if random.randint(0,1): grid_space['activation'] = \
random.sample(["Rectifier", "Tanh", "TanhWithDropout", "RectifierWithDropout", "MaxoutWithDropout"],
random.randint(2,3))
if random.randint(0,1): grid_space['l2'] = [0.001*random.random() for r in range(random.randint(2,3))]
grid_space['distribution'] = random.sample(['bernoulli','multinomial','gaussian','poisson','tweedie','gamma'],1)
return grid_space
elif algo == "naiveBayes":
grid_space['laplace'] = 0
if random.randint(0,1): grid_space['laplace'] = [round(random.random() + r, 6) for r in random.sample(list(range(0,11)), random.randint(2,3))]
if random.randint(0,1): grid_space['min_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]
if random.randint(0,1): grid_space['eps_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]
elif algo == "pca":
if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))
if random.randint(0,1): grid_space['transform'] = random.sample(["NONE","STANDARDIZE","NORMALIZE","DEMEAN","DESCALE"], random.randint(2,3))
grid_space['k'] = random.sample(list(range(1,min(ncols,nrows))),random.randint(2,3))
else:
raise ValueError
return grid_space
# Validate given models' parameters against expected values
def expect_model_param(models, attribute_name, expected_values):
print("param: {0}".format(attribute_name))
actual_values = list(set([m.params[attribute_name]['actual'] \
if type(m.params[attribute_name]['actual']) != list
else m.params[attribute_name]['actual'][0] for m in models.models]))
# possible for actual to be a list (GLM)
if type(expected_values) != list:
expected_values = [expected_values]
# limit precision. Rounding happens in some models like RF
actual_values = [x if isinstance(x,basestring) else round(float(x),5) for x in actual_values]
expected_values = [x if isinstance(x,basestring) else round(float(x),5) for x in expected_values]
print("actual values: {0}".format(actual_values))
print("expected values: {0}".format(expected_values))
actual_values_len = len(actual_values)
expected_values_len = len(expected_values)
assert actual_values_len == expected_values_len, "Expected values len: {0}. Actual values len: " \
"{1}".format(expected_values_len, actual_values_len)
actual_values = sorted(actual_values)
expected_values = sorted(expected_values)
for i in range(len(actual_values)):
if isinstance(actual_values[i], float):
assert abs(actual_values[i]-expected_values[i]) < 1.1e-5, "Too large of a difference betewen actual and " \
"expected value. Actual value: {}. Expected value: {}"\
.format(actual_values[i], expected_values[i])
else:
assert actual_values[i] == expected_values[i], "Expected: {}. Actual: {}"\
.format(expected_values[i], actual_values[i])
def rest_ctr():
return h2o.connection().requests_count
def write_syn_floating_point_dataset_glm(csv_training_data_filename, csv_validation_data_filename,
csv_test_data_filename, csv_weight_name, row_count, col_count, data_type,
max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,
valid_row_count, test_row_count, class_number=2,
class_method=('probability', 'probability', 'probability'),
class_margin=[0.0, 0.0, 0.0]):
"""
Generate random data sets to test the GLM algo using the following steps:
1. randomly generate the intercept and weight vector;
2. generate a set of predictors X;
3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random
Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X
is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the
relationship between the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))
:param csv_training_data_filename: string representing full path filename to store training data set. Set to
null string if no training data set is to be generated.
:param csv_validation_data_filename: string representing full path filename to store validation data set. Set to
null string if no validation data set is to be generated.
:param csv_test_data_filename: string representing full path filename to store test data set. Set to null string if
no test data set is to be generated.
:param csv_weight_name: string representing full path filename to store intercept and weight used to generate
all data sets.
:param row_count: integer representing number of samples (predictor, response) in training data set
:param col_count: integer representing the number of predictors in the data set
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param valid_row_count: integer representing number of samples (predictor, response) in validation data set
:param test_row_count: integer representing number of samples (predictor, response) in test data set
:param class_number: integer, optional, representing number of classes for binomial and multinomial
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by the margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
# generate bias b and weight as a column vector
weights = generate_weights_glm(csv_weight_name, col_count, data_type, min_w_value, max_w_value,
family_type=family_type, class_number=class_number)
# generate training data set
if len(csv_training_data_filename) > 0:
generate_training_set_glm(csv_training_data_filename, row_count, col_count, min_p_value, max_p_value, data_type,
family_type, noise_std, weights,
class_method=class_method[0], class_margin=class_margin[0], weightChange=True)
# generate validation data set
if len(csv_validation_data_filename) > 0:
generate_training_set_glm(csv_validation_data_filename, valid_row_count, col_count, min_p_value, max_p_value,
data_type, family_type, noise_std, weights,
class_method=class_method[1], class_margin=class_margin[1])
# generate test data set
if len(csv_test_data_filename) > 0:
generate_training_set_glm(csv_test_data_filename, test_row_count, col_count, min_p_value, max_p_value,
data_type, family_type, noise_std, weights,
class_method=class_method[2], class_margin=class_margin[2])
def write_syn_mixed_dataset_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot,
csv_validation_data_filename, csv_validation_filename_true_one_hot,
csv_test_data_filename, csv_test_filename_true_one_hot, csv_weight_filename, row_count,
col_count, max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,
valid_row_count, test_row_count, enum_col, enum_level_vec, class_number=2,
class_method=['probability', 'probability', 'probability'],
class_margin=[0.0, 0.0, 0.0]):
"""
This function differs from write_syn_floating_point_dataset_glm in one small point. The predictors in this case
contains categorical data as well as real data.
Generate random data sets to test the GLM algo using the following steps:
1. randomly generate the intercept and weight vector;
2. generate a set of predictors X;
3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random
Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X
is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the
relationship between the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))
:param csv_training_data_filename: string representing full path filename to store training data set. Set to null
string if no training data set is to be generated.
:param csv_training_data_filename_true_one_hot: string representing full path filename to store training data set
with true one-hot encoding. Set to null string if no training data set is to be generated.
:param csv_validation_data_filename: string representing full path filename to store validation data set. Set to
null string if no validation data set is to be generated.
:param csv_validation_filename_true_one_hot: string representing full path filename to store validation data set
with true one-hot. Set to null string if no validation data set is to be generated.
:param csv_test_data_filename: string representing full path filename to store test data set. Set to null
string if no test data set is to be generated.
:param csv_test_filename_true_one_hot: string representing full path filename to store test data set with true
one-hot encoding. Set to null string if no test data set is to be generated.
:param csv_weight_filename: string representing full path filename to store intercept and weight used to generate
all data sets.
:param row_count: integer representing number of samples (predictor, response) in training data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param valid_row_count: integer representing number of samples (predictor, response) in validation data set
:param test_row_count: integer representing number of samples (predictor, response) in test data set
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
# add column count of encoded categorical predictors, if maximum value for enum is 3, it has 4 levels.
# hence 4 bits are used to encode it with true one hot encoding. That is why we are adding 1 bit per
# categorical columns added to our predictors
new_col_count = col_count - enum_col + sum(enum_level_vec)+len(enum_level_vec)
# generate the weights to be applied to the training/validation/test data sets
# this is for true one hot encoding. For reference+one hot encoding, will skip
# few extra weights
weights = generate_weights_glm(csv_weight_filename, new_col_count, 2, min_w_value, max_w_value,
family_type=family_type, class_number=class_number)
# generate training data set
if len(csv_training_data_filename) > 0:
generate_training_set_mixed_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot, row_count,
col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,
enum_level_vec, class_number=class_number,
class_method=class_method[0], class_margin=class_margin[0], weightChange=True)
# generate validation data set
if len(csv_validation_data_filename) > 0:
generate_training_set_mixed_glm(csv_validation_data_filename, csv_validation_filename_true_one_hot,
valid_row_count, col_count, min_p_value, max_p_value, family_type, noise_std,
weights, enum_col, enum_level_vec, class_number=class_number,
class_method=class_method[1], class_margin=class_margin[1])
# generate test data set
if len(csv_test_data_filename) > 0:
generate_training_set_mixed_glm(csv_test_data_filename, csv_test_filename_true_one_hot, test_row_count,
col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,
enum_level_vec, class_number=class_number,
class_method=class_method[2], class_margin=class_margin[2])
def generate_weights_glm(csv_weight_filename, col_count, data_type, min_w_value, max_w_value, family_type='gaussian',
class_number=2):
"""
Generate random intercept and weight vectors (integer or real) for GLM algo and save
the values in a file specified by csv_weight_filename.
:param csv_weight_filename: string representing full path filename to store intercept and weight used to generate
all data set
:param col_count: integer representing the number of predictors in the data set
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param max_w_value: integer representing maximum intercept/weight values
:param min_w_value: integer representing minimum intercept/weight values
:param family_type: string ,optional, represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param class_number: integer, optional, representing number classes for binomial and multinomial
:return: column vector of size 1+colCount representing intercept and weight or matrix of size
1+colCount by class_number
"""
# first generate random intercept and weight
if 'gaussian' in family_type.lower():
if data_type == 1: # generate random integer intercept/weight
weight = np.random.random_integers(min_w_value, max_w_value, [col_count+1, 1])
elif data_type == 2: # generate real intercept/weights
weight = np.random.uniform(min_w_value, max_w_value, [col_count+1, 1])
else:
assert False, "dataType must be 1 or 2 for now."
elif ('binomial' in family_type.lower()) or ('multinomial' in family_type.lower()
or ('ordinal' in family_type.lower())):
if 'binomial' in family_type.lower(): # for binomial, only need 1 set of weight
class_number -= 1
if class_number <= 0:
assert False, "class_number must be >= 2!"
if isinstance(col_count, np.ndarray):
temp_col_count = col_count[0]
else:
temp_col_count = col_count
if data_type == 1: # generate random integer intercept/weight
weight = np.random.random_integers(min_w_value, max_w_value, [temp_col_count+1, class_number])
elif data_type == 2: # generate real intercept/weights
weight = np.random.uniform(min_w_value, max_w_value, [temp_col_count+1, class_number])
else:
assert False, "dataType must be 1 or 2 for now."
# special treatment for ordinal weights
if 'ordinal' in family_type.lower():
num_pred = len(weight)
for index in range(class_number):
weight[0,index] = 0
for indP in range(1,num_pred):
weight[indP,index] = weight[indP,0] # make sure betas for all classes are the same
np.savetxt(csv_weight_filename, weight.transpose(), delimiter=",")
return weight
def generate_training_set_glm(csv_filename, row_count, col_count, min_p_value, max_p_value, data_type, family_type,
noise_std, weight, class_method='probability', class_margin=0.0, weightChange=False):
"""
Generate supervised data set given weights for the GLM algo. First randomly generate the predictors, then
call function generate_response_glm to generate the corresponding response y using the formula: y = w^T x+b+e
where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between
the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)).
For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector
X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)). The predictors and
responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param data_type: integer representing the type of predictors or weights (1: integers, 2: real)
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param class_method: string tuple, optional, describing how we derive the final response from the class
probabilities generated for binomial and multinomial family-type for training/validation/test data set respectively.
If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set
to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in the margin. If the maximum class probability fails
to be greater by the margin than the second highest class probability, the data sample is discarded.
:param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability in order for us to keep the data sample for
training/validation/test data set respectively. This field is only meaningful if class_method is set to
'threshold'
:return: None
"""
if data_type == 1: # generate random integers
x_mat = np.random.random_integers(min_p_value, max_p_value, [row_count, col_count])
elif data_type == 2: # generate random real numbers
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count])
else:
assert False, "dataType must be 1 or 2 for now. "
# generate the response vector to the input predictors
response_y = generate_response_glm(weight, x_mat, noise_std, family_type,
class_method=class_method, class_margin=class_margin, weightChange=weightChange)
# for family_type = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.
# need to delete this data sample before proceeding
# if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()) or ('ordinal' in family_type.lower()):
# if 'threshold' in class_method.lower():
# if np.any(response_y < 0): # remove negative entries out of data set
# (x_mat, response_y) = remove_negative_response(x_mat, response_y)
# write to file in csv format
np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",")
def generate_clusters(cluster_center_list, cluster_pt_number_list, cluster_radius_list):
"""
This function is used to generate clusters of points around cluster_centers listed in
cluster_center_list. The radius of the cluster of points are specified by cluster_pt_number_list.
The size of each cluster could be different and it is specified in cluster_radius_list.
:param cluster_center_list: list of coordinates of cluster centers
:param cluster_pt_number_list: number of points to generate for each cluster center
:param cluster_radius_list: list of size of each cluster
:return: list of sample points that belong to various clusters
"""
k = len(cluster_pt_number_list) # number of clusters to generate clusters for
if (not(k == len(cluster_center_list))) or (not(k == len(cluster_radius_list))):
assert False, "Length of list cluster_center_list, cluster_pt_number_list, cluster_radius_list must be the same!"
training_sets = []
for k_ind in range(k):
new_cluster_data = generate_one_cluster(cluster_center_list[k_ind], cluster_pt_number_list[k_ind],
cluster_radius_list[k_ind])
if k_ind > 0:
training_sets = np.concatenate((training_sets, new_cluster_data), axis=0)
else:
training_sets = new_cluster_data
# want to shuffle the data samples so that the clusters are all mixed up
map(np.random.shuffle, training_sets)
return training_sets
def generate_one_cluster(cluster_center, cluster_number, cluster_size):
"""
This function will generate a full cluster wither cluster_number points centered on cluster_center
with maximum radius cluster_size
:param cluster_center: python list denoting coordinates of cluster center
:param cluster_number: integer denoting number of points to generate for this cluster
:param cluster_size: float denoting radius of cluster
:return: np matrix denoting a cluster
"""
pt_dists = np.random.uniform(0, cluster_size, [cluster_number, 1])
coord_pts = len(cluster_center) # dimension of each cluster point
one_cluster_data = np.zeros((cluster_number, coord_pts), dtype=np.float)
for p_ind in range(cluster_number):
coord_indices = list(range(coord_pts))
random.shuffle(coord_indices) # randomly determine which coordinate to generate
left_radius = pt_dists[p_ind]
for c_ind in range(coord_pts):
coord_index = coord_indices[c_ind]
one_cluster_data[p_ind, coord_index] = random.uniform(-1*left_radius+cluster_center[coord_index],
left_radius+cluster_center[coord_index])
left_radius = math.sqrt(pow(left_radius, 2)-pow((one_cluster_data[p_ind, coord_index]-
cluster_center[coord_index]), 2))
return one_cluster_data
def remove_negative_response(x_mat, response_y):
"""
Recall that when the user chooses to generate a data set for multinomial or binomial using the 'threshold' method,
response y is set to the class with the maximum class probability if the maximum class probability
exceeds the second highest class probability by the value set in margin. If the maximum class probability fails
to be greater by margin than the second highest class probability, the data sample is discarded. However, when we
generate the data set, we keep all samples. For data sample with maximum class probability that fails to be
greater by margin than the second highest class probability, the response is set to be -1. This function will
remove all data samples (predictors and responses) with response set to -1.
:param x_mat: predictor matrix containing all predictor values
:param response_y: response that can be negative if that data sample is to be removed
:return: tuple containing x_mat, response_y with negative data samples removed.
"""
y_response_negative = np.where(response_y < 0) # matrix of True or False
x_mat = np.delete(x_mat,y_response_negative[0].transpose(),axis=0) # remove predictor row with negative response
# remove rows with negative response
response_y = response_y[response_y >= 0]
return x_mat,response_y.transpose()
def generate_training_set_mixed_glm(csv_filename, csv_filename_true_one_hot, row_count, col_count, min_p_value,
max_p_value, family_type, noise_std, weight, enum_col, enum_level_vec,
class_number=2, class_method='probability', class_margin=0.0, weightChange=False):
"""
Generate supervised data set given weights for the GLM algo with mixed categorical and real value
predictors. First randomly generate the predictors, then call function generate_response_glm to generate the
corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise
added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to
be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between
the response Y (K possible classes) and predictor vector X is assumed to be
Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) e is the random Gaussian noise added to the
response. The predictors and responses are saved in a file specified by csv_filename.
:param csv_filename: string representing full path filename to store supervised data set
:param csv_filename_true_one_hot: string representing full path filename to store data set with true one-hot
encoding.
:param row_count: integer representing the number of training samples in the data set
:param col_count: integer representing the number of predictors in the data set
:param max_p_value: integer representing maximum predictor values
:param min_p_value: integer representing minimum predictor values
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param weight: vector representing w in our formula to generate the response.
:param enum_col: integer representing actual number of categorical columns in data set
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param class_number: integer, optional, representing number classes for binomial and multinomial
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with
the maximum class probability if the maximum class probability exceeds the second highest class probability by
the value set in margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to
exceed the second highest class probability by in order for us to keep the data set sample. This field is only
meaningful if class_method is set to 'threshold'
:return: None
"""
# generate the random training data sets
enum_dataset = np.zeros((row_count, enum_col), dtype=np.int) # generate the categorical predictors
# generate categorical data columns
for indc in range(enum_col):
enum_dataset[:, indc] = np.random.random_integers(0, enum_level_vec[indc], row_count)
# generate real data columns
x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count-enum_col])
x_mat = np.concatenate((enum_dataset, x_mat), axis=1) # concatenate categorical and real predictor columns
if len(csv_filename_true_one_hot) > 0:
generate_and_save_mixed_glm(csv_filename_true_one_hot, x_mat, enum_level_vec, enum_col, True, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin, weightChange=weightChange)
if len(csv_filename) > 0:
generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, False, weight, noise_std,
family_type, class_method=class_method, class_margin=class_margin, weightChange=False)
def generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, true_one_hot, weight, noise_std,
family_type, class_method='probability', class_margin=0.0, weightChange=False):
"""
Given the weights and input data matrix with mixed categorical and real value predictors, this function will
generate a supervised data set and save the input data and response in a csv format file specified by
csv_filename. It will first encode the enums without using one hot encoding with or without a reference
level first before generating a response Y.
:param csv_filename: string representing full path filename to store supervised data set with reference level
plus true one-hot encoding.
:param x_mat: predictor matrix with mixed columns (categorical/real values)
:param enum_level_vec: vector containing maximum integer value for each categorical column
:param enum_col: integer representing actual number of categorical columns in data set
:param true_one_hot: bool indicating whether we are using true one hot encoding or reference level plus
one hot encoding
:param weight: vector representing w in our formula to generate the response
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported
by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data sample. This field is only meaningful if
class_method is set to 'threshold'
:return: None
"""
# encode the enums
x_mat_encoded = encode_enum_dataset(x_mat, enum_level_vec, enum_col, true_one_hot, False)
# extract the correct weight dimension for the data set
if not true_one_hot:
(num_row, num_col) = x_mat_encoded.shape
weight = weight[0:num_col+1] # +1 to take care of the intercept term
# generate the corresponding response vector given the weight and encoded input predictors
response_y = generate_response_glm(weight, x_mat_encoded, noise_std, family_type,
class_method=class_method, class_margin=class_margin, weightChange=weightChange)
# for familyType = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.
# need to delete this before proceeding
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
if 'threshold' in class_method.lower():
(x_mat,response_y) = remove_negative_response(x_mat, response_y)
# write generated data set to file in csv format
np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",")
def encode_enum_dataset(dataset, enum_level_vec, enum_col, true_one_hot, include_nans):
"""
Given 2-d numpy array of predictors with categorical and real columns, this function will
encode the enum columns with 1-hot encoding or with reference plus one hot encoding
:param dataset: 2-d numpy array of predictors with both categorical and real columns
:param enum_level_vec: vector containing maximum level for each categorical column
:param enum_col: number of categorical columns in the data set
:param true_one_hot: bool indicating if we are using true one hot encoding or with one reference level + one hot
encoding
:param include_nans: bool indicating if we have nans in categorical columns
:return: data set with categorical columns encoded with 1-hot encoding or 1-hot encoding plus reference
"""
(num_row, num_col) = dataset.shape
# split the data set into categorical and real parts
enum_arrays = dataset[:, 0:enum_col]
new_enum_arrays = []
# perform the encoding for each element of categorical part
for indc in range(enum_col):
enum_col_num = enum_level_vec[indc]+1
if not true_one_hot:
enum_col_num -= 1
if include_nans and np.any(enum_arrays[:, indc]):
enum_col_num += 1
new_temp_enum = np.zeros((num_row, enum_col_num))
one_hot_matrix = one_hot_encoding(enum_col_num)
last_col_index = enum_col_num-1
# encode each enum using 1-hot encoding or plus reference value
for indr in range(num_row):
enum_val = enum_arrays[indr, indc]
if true_one_hot: # not using true one hot
new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 0, last_col_index)
else:
if enum_val:
new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 1, last_col_index)
if indc == 0:
new_enum_arrays = new_temp_enum
else:
new_enum_arrays = np.concatenate((new_enum_arrays, new_temp_enum), axis=1)
return np.concatenate((new_enum_arrays, dataset[:, enum_col:num_col]), axis=1)
def replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index):
"""
Generate encoded bits for a categorical data value using one hot encoding.
:param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding
:param enum_val: categorical data value, could be np.nan
:param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding
:param last_col_index: index into encoding for np.nan if exists
:return: vector representing the encoded values for a enum value
"""
if np.isnan(enum_val): # if data value is np.nan
return one_hot_matrix[last_col_index]
else:
return one_hot_matrix[int(enum_val-add_value)]
def one_hot_encoding(enum_level):
"""
Generate the one_hot_encoding matrix given the number of enum_level.
:param enum_level: generate the actual one-hot encoding matrix
:return: numpy array for the enum_level specified. Note, enum_level <= 6
"""
if enum_level >= 2:
base_array = np.array([[0, 1], [1, 0]]) # for 2 enum levels
for enum_index in range(3, enum_level+1): # loop to build encoding for enum levels > 2
(num_row, num_col) = base_array.shape
col_zeros = np.asmatrix(np.zeros(num_row)).transpose() # column of zero matrix
base_array = np.concatenate((col_zeros, base_array), axis=1) # add column of zero
row_zeros = np.asmatrix(np.zeros(num_row+1)) # add row of zeros
row_zeros[0, 0] = 1 # set first element to 1
base_array = np.concatenate((base_array, row_zeros), axis=0)
return base_array
else:
assert False, "enum_level must be >= 2."
def generate_response_glm(weight, x_mat, noise_std, family_type, class_method='probability',
class_margin=0.0, weightChange=False, even_distribution=True):
"""
Generate response vector given weight matrix, predictors matrix for the GLM algo.
:param weight: vector representing w in our formula to generate the response
:param x_mat: random numpy matrix (2-D ndarray) containing the predictors
:param noise_std: Gaussian noise standard deviation used to generate noise e to add to response
:param family_type: string represents the various distribution families (Gaussian, multinomial, binomial)
supported by our GLM algo
:param class_method: string, optional, describing how we derive the final response from the class probabilities
generated for binomial and multinomial familyType. If set to 'probability', response y is generated randomly
according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the
maximum class probability if the maximum class probability exceeds the second highest class probability by the
value set in the margin. If the maximum class probability fails to be greater by margin than the second highest
class probability, the data sample is discarded.
:param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed
the second highest class probability in order for us to keep the data set sample. This field is only meaningful if
class_method is set to 'threshold'
:return: vector representing the response
"""
(num_row, num_col) = x_mat.shape
temp_ones_col = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones_col, x_mat), axis=1)
response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])
if 'ordinal' in family_type.lower():
(num_sample, num_class) = response_y.shape
lastClass = num_class - 1
if weightChange:
tresp = []
# generate the new y threshold
for indP in range(num_sample):
tresp.append(-response_y[indP,0])
tresp.sort()
num_per_class = int(len(tresp)/num_class)
if (even_distribution):
for indC in range(lastClass):
weight[0,indC] = tresp[(indC+1)*num_per_class]
else: # do not generate evenly distributed class, generate randomly distributed classes
splitInd = []
lowV = 0.1
highV = 1
v1 = 0
acc = 0
for indC in range(lastClass):
tempf = random.uniform(lowV, highV)
splitInd.append(v1+int(tempf*num_per_class))
v1 = splitInd[indC] # from last class
acc += 1-tempf
highV = 1+acc
for indC in range(lastClass): # put in threshold
weight[0,indC] = tresp[splitInd[indC]]
response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])
discrete_y = np.zeros((num_sample, 1), dtype=np.int)
for indR in range(num_sample):
discrete_y[indR, 0] = lastClass
for indC in range(lastClass):
if (response_y[indR, indC] >= 0):
discrete_y[indR, 0] = indC
break
return discrete_y
# added more to form Multinomial response
if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
temp_mat = np.exp(response_y) # matrix of n by K where K = 1 for binomials
if 'binomial' in family_type.lower():
ntemp_mat = temp_mat + 1
btemp_mat = temp_mat / ntemp_mat
temp_mat = np.concatenate((1-btemp_mat, btemp_mat), axis=1) # inflate temp_mat to 2 classes
response_y = derive_discrete_response(temp_mat, class_method, class_margin, family_type)
return response_y
def derive_discrete_response(prob_mat, class_method, class_margin, family_type='binomial'):
"""
This function is written to generate the final class response given the probabilities (Prob(y=k)). There are
two methods that we use and is specified by the class_method. If class_method is set to 'probability',
response y is generated randomly according to the class probabilities calculated. If set to 'threshold',
response y is set to the class with the maximum class probability if the maximum class probability exceeds the
second highest class probability by the value set in margin. If the maximum class probability fails to be
greater by margin than the second highest class probability, the data sample will be discarded later by
marking the final response as -1.
:param prob_mat: probability matrix specifying the probability that y=k where k is a class
:param class_method: string set to 'probability' or 'threshold'
:param class_margin: if class_method='threshold', class_margin is the margin used to determine if a response is to
be kept or discarded.
:return: response vector representing class of y or -1 if an data sample is to be discarded.
"""
(num_sample, num_class) = prob_mat.shape
discrete_y = np.argmax(prob_mat, axis=1)
return discrete_y
def normalize_matrix(mat):
"""
This function will normalize a matrix across each row such that the row sum is 1.
:param mat: matrix containing prob(y=k)
:return: normalized matrix containing prob(y=k)
"""
(n, K) = mat.shape
kronmat = np.ones((1, K), dtype=float)
row_sum = np.sum(mat, axis=1)
row_sum_mat = np.kron(row_sum, kronmat)
return mat/row_sum_mat
def move_files(dir_path, old_name, new_file, action='move'):
"""
Simple function to move or copy a data set (old_name) to a special directory (dir_path)
with new name (new_file) so that we will be able to re-run the tests if we
have found something wrong with the algorithm under test with the data set.
This is done to avoid losing the data set.
:param dir_path: string representing full directory path where a file is to be moved to
:param old_name: string representing file (filename with full directory path) to be moved to new directory.
:param new_file: string representing the file name of the moved in the new directory
:param action: string, optional, represent the action 'move' or 'copy' file
:return: None
"""
new_name = os.path.join(dir_path, new_file) # generate new filename including directory path
if os.path.isfile(old_name): # only move/copy file old_name if it actually exists
if 'move' in action:
motion = 'mv '
elif 'copy' in action:
motion = 'cp '
else:
assert False, "Illegal action setting. It can only be 'move' or 'copy'!"
cmd = motion+old_name+' '+new_name # generate cmd line string to move the file
subprocess.call(cmd, shell=True)
def remove_files(filename):
"""
Simple function to remove data set saved in filename if the dynamic test is completed with no
error. Some data sets we use can be rather big. This is performed to save space.
:param filename: string representing the file to be removed. Full path is included.
:return: None
"""
cmd = 'rm ' + filename
subprocess.call(cmd, shell=True)
def random_col_duplication(num_cols, duplication_threshold, max_number, to_scale, max_scale_factor):
"""
This function will randomly determine for each column if it should be duplicated.
If it is to be duplicated, how many times, the duplication should be. In addition, a
scaling factor will be randomly applied to each duplicated column if enabled.
:param num_cols: integer representing number of predictors used
:param duplication_threshold: threshold to determine if a column is to be duplicated. Set
this number to be low if you want to encourage column duplication and vice versa
:param max_number: maximum number of times a column is to be duplicated
:param to_scale: bool indicating if a duplicated column is to be scaled
:param max_scale_factor: real representing maximum scale value for repeated columns
:return: a tuple containing two vectors: col_return, col_scale_return.
col_return: vector indicating the column indices of the original data matrix that will be included
in the new data matrix with duplicated columns
col_scale_return: vector indicating for each new column in the new data matrix with duplicated columns,
what scale should be applied to that column.
"""
col_indices = list(range(num_cols)) # contains column indices of predictors in original data set
col_scales = [1]*num_cols # scaling factor for original data set, all ones.
for ind in range(num_cols): # determine for each column if to duplicate it
temp = random.uniform(0, 1) # generate random number from 0 to 1
if temp > duplication_threshold: # duplicate column if random number generated exceeds duplication_threshold
rep_num = random.randint(1, max_number) # randomly determine how many times to repeat a column
more_col_indices = [ind]*rep_num
col_indices.extend(more_col_indices)
temp_scale = []
for ind in range(rep_num):
if to_scale: # for each duplicated column, determine a scaling factor to multiply the column with
temp_scale.append(random.uniform(0, max_scale_factor))
else:
temp_scale.append(1)
col_scales.extend(temp_scale)
# randomly shuffle the predictor column orders and the corresponding scaling factors
new_col_indices = list(range(len(col_indices)))
random.shuffle(new_col_indices)
col_return = [col_indices[i] for i in new_col_indices]
col_scale_return = [col_scales[i] for i in new_col_indices]
return col_return, col_scale_return
def duplicate_scale_cols(col_indices, col_scale, old_filename, new_filename):
"""
This function actually performs the column duplication with scaling giving the column
indices and scaling factors for each column. It will first load the original data set
from old_filename. After performing column duplication and scaling, the new data set
will be written to file with new_filename.
:param col_indices: vector indicating the column indices of the original data matrix that will be included
in the new data matrix with duplicated columns
:param col_scale: vector indicating for each new column in the new data matrix with duplicated columns,
what scale should be applied to that column
:param old_filename: string representing full directory path and filename where data set is stored
:param new_filename: string representing full directory path and filename where new data set is to be stored
:return: None
"""
# pd_frame = pd.read_csv(old_filename, header=None) # read in original data set
#
# pd_frame_new = pd.DataFrame() # new empty data frame
#
# for ind in range(len(col_indices)): # for each column
# tempc = pd_frame.ix[:, col_indices[ind]]*col_scale[ind] # extract a column from old data frame and scale it
# pd_frame_new = pd.concat([pd_frame_new, tempc], axis=1) # add it to the new data frame
np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))
(num_row, num_col) = np_frame.shape
np_frame_new = np.asmatrix(np.zeros((num_row, len(col_indices)), dtype=np.float))
for ind in range(len(col_indices)):
np_frame_new[:, ind] = np_frame[:, col_indices[ind]]*col_scale[ind]
# done changing the data frame. Save it in a new file
np.savetxt(new_filename, np_frame_new, delimiter=",")
def insert_nan_in_data(old_filename, new_filename, missing_fraction):
"""
Give the filename of a data set stored in old_filename, this function will randomly determine
for each predictor to replace its value with nan or not with probability missing_frac. The
new data set will be stored in filename new_filename.
:param old_filename: string representing full directory path and filename where data set is stored
:param new_filename: string representing full directory path and filename where new data set with missing
values is to be stored
:param missing_fraction: real value representing the probability of replacing a predictor with nan.
:return: None
"""
# pd_frame = pd.read_csv(old_filename, header=None) # read in a dataset
np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))
(row_count, col_count) = np_frame.shape
random_matrix = np.random.uniform(0, 1, [row_count, col_count-1])
for indr in range(row_count): # for each predictor value, determine if to replace value with nan
for indc in range(col_count-1):
if random_matrix[indr, indc] < missing_fraction:
np_frame[indr, indc] = np.nan
# save new data set with missing values to new file
np.savetxt(new_filename, np_frame, delimiter=",")
# pd_frame.to_csv(new_filename, sep=',', header=False, index=False, na_rep='nan')
def print_message_values(start_string, nump_array):
"""
This function prints the value of a nump_array with a string message in front of it.
:param start_string: string representing message to be printed
:param nump_array: array storing something
:return: None
"""
print(start_string)
print(nump_array)
def show_test_results(test_name, curr_test_val, new_test_val):
"""
This function prints the test execution results which can be passed or failed. A message will be printed on
screen to warn user of the test result.
:param test_name: string representing test name
:param curr_test_val: integer representing number of tests failed so far before the test specified in test_name
is executed
:param new_test_val: integer representing number of tests failed after the test specified in test_name is
executed
:return: integer: 0 if test passed and 1 if test faild.
"""
failed_string = "Ooops, " + test_name + " failed. I am sorry..."
pass_string = "Yeah, " + test_name + " passed!"
if (curr_test_val < new_test_val): # this test has failed
print(failed_string)
return 1
else:
print(pass_string)
return 0
def assert_H2OTwoDimTable_equal_upto(table1, table2, col_header_list, tolerance=1e-6):
'''
This method will compare two H2OTwoDimTables that are almost of the same size. table1 can be shorter
than table2. However, for whatever part of table2 table1 has, they must be the same.
:param table1:
:param table2:
:param col_header_list:
:param tolerance:
:return:
'''
size1 = len(table1.cell_values)
for cname in col_header_list:
colindex = table1.col_header.index(cname)
for cellind in range(size1):
val1 = table1.cell_values[cellind][colindex]
val2 = table2.cell_values[cellind][colindex]
if isinstance(val1, float) and isinstance(val2, float):
assert abs(val1-val2) < tolerance, \
"table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of " \
"{3}".format(val1, val2, cname, tolerance)
else:
assert val1==val2, "table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of " \
"{3}".format(val1, val2, cname, tolerance)
print("******* Congrats! Test passed. ")
def extract_col_value_H2OTwoDimTable(table, col_name):
'''
This function given the column name will extract a list containing the value used for the column name from the
H2OTwoDimTable.
:param table:
:param col_name:
:return:
'''
tableList = []
col_header = table.col_header
colIndex = col_header.index(col_name)
for ind in range(len(table.cell_values)):
temp = table.cell_values[ind]
tableList.append(temp[colIndex])
return tableList
def assert_H2OTwoDimTable_equal_upto(table1, table2, col_header_list, tolerance=1e-6):
'''
This method will compare two H2OTwoDimTables that are almost of the same size. table1 can be shorter
than table2. However, for whatever part of table2 table1 has, they must be the same.
:param table1:
:param table2:
:param col_header_list:
:param tolerance:
:return:
'''
size1 = len(table1.cell_values)
for cname in col_header_list:
colindex = table1.col_header.index(cname)
for cellind in range(size1):
val1 = table1.cell_values[cellind][colindex]
val2 = table2.cell_values[cellind][colindex]
if isinstance(val1, float) and isinstance(val2, float) and not(math.isnan(val1) and math.isnan(val2)):
assert abs(val1-val2) < tolerance, \
"table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of " \
"{3}".format(val1, val2, cname, tolerance)
elif not(isinstance(val1, float) and isinstance(val2, float)) :
assert val1==val2, "table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of " \
"{3}".format(val1, val2, cname, tolerance)
print("******* Congrats! Test passed. ")
def assert_equal_scoring_history(model1, model2, col_compare_list, tolerance=1e-6):
scoring_hist1 = model1._model_json["output"]["scoring_history"]
scoring_hist2 = model2._model_json["output"]["scoring_history"]
assert_H2OTwoDimTable_equal_upto(scoring_hist1, scoring_hist2, col_compare_list, tolerance=tolerance)
def assert_H2OTwoDimTable_equal(table1, table2, col_header_list, tolerance=1e-6, check_sign=False, check_all=True,
num_per_dim=10):
"""
This method compares two H2OTwoDimTables and verify that their difference is less than value set in tolerance. It
is probably an overkill for I have assumed that the order of col_header_list may not be in the same order as
the values in the table.cell_values[ind][0]. In addition, I do not assume an order for the names in the
table.cell_values[ind][0] either for there is no reason for an order to exist.
To limit the test run time, we can test a randomly sampled of points instead of all points
:param table1: H2OTwoDimTable to be compared
:param table2: the other H2OTwoDimTable to be compared
:param col_header_list: list of strings denote names that we want the comparison to be performed
:param tolerance: default to 1e-6
:param check_sign: bool, determine if the sign of values are important or not. For eigenvectors, they are not.
:param check_all: bool, determine if we need to compare every single element
:param num_per_dim: integer, number of elements to sample per dimension. We have 3 here.
:return: None if comparison succeed and raise an error if comparison failed for whatever reason
"""
num_comparison = len(set(col_header_list))
size1 = len(table1.cell_values)
size2 = len(table2.cell_values)
worst_error = 0
assert size1==size2, "The two H2OTwoDimTables are of different size!"
assert num_comparison<=size1, "H2OTwoDimTable do not have all the attributes specified in col_header_list."
flip_sign_vec = generate_sign_vec(table1, table2) if check_sign else [1]*len(table1.cell_values[0]) # correct for sign change for eigenvector comparisons
randRange1 = generate_for_indices(len(table1.cell_values), check_all, num_per_dim, 0)
randRange2 = generate_for_indices(len(table2.cell_values), check_all, num_per_dim, 0)
for ind in range(num_comparison):
col_name = col_header_list[ind]
next_name=False
for name_ind1 in randRange1:
if col_name!=str(table1.cell_values[name_ind1][0]):
continue
for name_ind2 in randRange2:
if not(col_name==str(table2.cell_values[name_ind2][0])):
continue
# now we have the col header names, do the actual comparison
if str(table1.cell_values[name_ind1][0])==str(table2.cell_values[name_ind2][0]):
randRange3 = generate_for_indices(min(len(table2.cell_values[name_ind2]), len(table1.cell_values[name_ind1])), check_all, num_per_dim,1)
for indC in randRange3:
val1 = table1.cell_values[name_ind1][indC]
val2 = table2.cell_values[name_ind2][indC]*flip_sign_vec[indC]
if isinstance(val1, float) and isinstance(val2, float):
compare_val_ratio = abs(val1-val2)/max(1, abs(val1), abs(val2))
if compare_val_ratio > tolerance:
print("Table entry difference is {0} at dimension {1} and eigenvector number "
"{2}".format(compare_val_ratio, name_ind1, indC))
print("The first vector is {0} and the second vector is {1}".format(table1.cell_values[name_ind1], table2.cell_values[name_ind2]))
assert False, "Table entries are not equal within tolerance."
worst_error = max(worst_error, compare_val_ratio)
else:
assert False, "Tables contains non-numerical values. Comparison is for numericals only!"
next_name=True
break
else:
assert False, "Unknown metric names found in col_header_list."
if next_name: # ready to go to the next name in col_header_list
break
print("******* Congrats! Test passed. Maximum difference of your comparison is {0}".format(worst_error))
def generate_for_indices(list_size, check_all, num_per_dim, start_val):
if check_all:
return list(range(start_val, list_size))
else:
randomList = list(range(start_val, list_size))
random.shuffle(randomList)
return randomList[0:min(list_size, num_per_dim)]
def generate_sign_vec(table1, table2):
sign_vec = [1]*len(table1.cell_values[0])
for indC in range(1, len(table2.cell_values[0])): # may need to look at other elements since some may be zero
for indR in range(0, len(table2.cell_values)):
if (abs(table1.cell_values[indR][indC]) > 0) and (abs(table2.cell_values[indR][indC]) > 0):
sign_vec[indC] = int(np.sign(table1.cell_values[indR][indC]) * np.sign(table2.cell_values[indR][indC]))
# if (np.sign(table1.cell_values[indR][indC])!=np.sign(table2.cell_values[indR][indC])):
# sign_vec[indC] = -1
# else:
# sign_vec[indC] = 1
break # found what we need. Goto next column
return sign_vec
def equal_two_dicts(dict1, dict2, tolerance=1e-6, throwError=True):
size1 = len(dict1)
if (size1 == len(dict2)): # only proceed if lengths are the same
for key1 in dict1.keys():
diff = abs(dict1[key1]-dict2[key1])
if (diff > tolerance):
if throwError:
assert False, "Dict 1 value {0} and Dict 2 value {1} do not agree.".format(dict1[key1], dict2[key1])
else:
return False
def equal_two_arrays(array1, array2, eps=1e-6, tolerance=1e-6, throw_error=True):
"""
This function will compare the values of two python tuples. First, if the values are below
eps which denotes the significance level that we care, no comparison is performed. Next,
False is returned if the different between any elements of the two array exceeds some tolerance.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:param throw_error: throws error when two arrays are not equal
:return: True if elements in array1 and array2 are close and False otherwise
"""
size1 = len(array1)
if size1 == len(array2): # arrays must be the same size
# compare two arrays
for ind in range(size1):
if not ((array1[ind] < eps) and (array2[ind] < eps)):
# values to be compared are not too small, perform comparison
# look at differences between elements of array1 and array2
compare_val_h2o_py = abs(array1[ind] - array2[ind])
if compare_val_h2o_py > tolerance: # difference is too high, return false
if throw_error:
assert False, "Array 1 value {0} and array 2 value {1} do not agree.".format(array1[ind], array2[ind])
else:
return False
return True # return True, elements of two arrays are close enough
else:
if throw_error:
assert False, "The two arrays are of different size!"
else:
return False
def equal_2d_tables(table1, table2, tolerance=1e-6):
"""
This function will compare the values of two python tuples.
False is returned if the different between any elements of the two array exceeds some tolerance.
:param table1: numpy array containing some values of interest
:param table2: numpy array containing some values of interest that we would like to compare it with array1
:param tolerance: threshold for which we allow the two array elements to be different by
:return: True if elements in array1 and array2 are close and False otherwise
"""
size1 = len(table1)
if size1 == len(table2): # arrays must be the same size
# compare two arrays
for ind in range(size1):
if len(table1[ind]) == len(table2[ind]):
for ind2 in range(len(table1[ind])):
if type(table1[ind][ind2]) == float:
if abs(table1[ind][ind2]-table2[ind][ind2]) > tolerance:
return False
else:
assert False, "The two arrays are of different size!"
return True
else:
assert False, "The two arrays are of different size!"
def compare_two_arrays(array1, array2, eps, tolerance, comparison_string, array1_string, array2_string, error_string,
success_string, template_is_better, just_print=False):
"""
This function is written to print out the performance comparison results for various values that
we care about. It will return 1 if the values of the two arrays exceed threshold specified in tolerance.
The actual comparison is performed by calling function equal_two_array.
:param array1: numpy array containing some values of interest
:param array2: numpy array containing some values of interest that we would like to compare it with array1
:param eps: significance level that we care about in order to perform the comparison
:param tolerance: threshold for which we allow the two array elements to be different by
:param comparison_string: string stating what the comparison is about, e.g. "Comparing p-values ...."
:param array1_string: string stating what is the array1 attribute of interest, e.g. "H2O p-values: "
:param array2_string: string stating what is the array2 attribute of interest, e.g. "Theoretical p-values: "
:param error_string: string stating what you want to say if the difference between array1 and array2
exceeds tolerance, e.g "P-values are not equal!"
:param success_string: string stating what you want to say if the difference between array1 and array2 does not
exceed tolerance "P-values are close enough!"
:param template_is_better: bool, True, will return 1 if difference among elements of array1 and array2 exceeds
tolerance. False, will always return 0 even if difference among elements of array1 and array2 exceeds tolerance.
In this case, the system under test actually performs better than the template.
:param just_print: bool if True will print attribute values without doing comparison. False will print
attribute values and perform comparison
:return: if template_is_better = True, return 0 if elements in array1 and array2 are close and 1 otherwise;
if template_is_better = False, will always return 0 since system under tests performs better than
template system.
"""
# display array1, array2 with proper description
print(comparison_string)
print(array1_string, array1)
print(array2_string, array2)
if just_print: # just print the two values and do no comparison
return 0
else: # may need to actually perform comparison
if template_is_better:
try:
assert equal_two_arrays(array1, array2, eps, tolerance), error_string
print(success_string)
sys.stdout.flush()
return 0
except:
sys.stdout.flush()
return 1
else:
print("Test result is actually better than comparison template!")
return 0
def make_Rsandbox_dir(base_dir, test_name, make_dir):
"""
This function will remove directory "Rsandbox/test_name" off directory base_dir and contents if it exists.
If make_dir is True, it will create a clean directory "Rsandbox/test_name" off directory base_dir.
:param base_dir: string contains directory path where we want to build our Rsandbox/test_name off from
:param test_name: string contains unit test name that the Rsandbox is created for
:param make_dir: bool, True: will create directory baseDir/Rsandbox/test_name, False: will not create
directory.
:return: syndatasets_dir: string containing the full path of the directory name specified by base_dir, test_name
"""
# create the Rsandbox directory path for the test.
syndatasets_dir = os.path.join(base_dir, "Rsandbox_" + test_name)
if os.path.exists(syndatasets_dir): # remove Rsandbox directory if it exists
shutil.rmtree(syndatasets_dir)
if make_dir: # create Rsandbox directory if make_dir is True
os.makedirs(syndatasets_dir)
return syndatasets_dir
def get_train_glm_params(model, what_param, family_type='gaussian'):
"""
This function will grab the various attributes (like coefficients, p-values, and others) off a GLM
model that has been built.
:param model: GLM model that we want to extract information from
:param what_param: string indicating the model attribute of interest like 'p-value','weights',...
:param family_type: string, optional, represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:return: attribute value of interest
"""
coeff_pvalues = model._model_json["output"]["coefficients_table"].cell_values
if what_param == 'p-values':
if 'gaussian' in family_type.lower():
p_value_h2o = []
for ind in range(len(coeff_pvalues)):
p_value_h2o.append(coeff_pvalues[ind][-1])
return p_value_h2o
else:
assert False, "P-values are only available to Gaussian family."
elif what_param == 'weights':
if 'gaussian' in family_type.lower():
weights = []
for ind in range(len(coeff_pvalues)):
weights.append(coeff_pvalues[ind][1])
return weights
elif ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):
# for multinomial, the coefficients are organized as features by number of classes for
# nonstandardized and then standardized weights. Need to grab the correct matrix as
# number of classes by n_features matrix
num_feature = len(coeff_pvalues)
num_class = (len(coeff_pvalues[0])-1)/2
coeffs = np.zeros((num_class,num_feature), dtype=np.float)
end_index = int(num_class+1)
for col_index in range(len(coeff_pvalues)):
coeffs[:, col_index] = coeff_pvalues[col_index][1:end_index]
return coeffs
elif what_param == 'best_lambda':
lambda_str = model._model_json["output"]["model_summary"].cell_values[0][4].split('=')
return float(str(lambda_str[-2]).split(',')[0])
elif what_param == 'confusion_matrix':
if 'multinomial' in family_type.lower():
return model._model_json["output"]["training_metrics"]._metric_json["cm"]["table"]
elif 'binomial' in family_type.lower():
return model.confusion_matrix().table
else:
assert False, "parameter value not found in GLM model"
def less_than(val1, val2):
"""
Simple function that returns True if val1 <= val2 and False otherwise.
:param val1: first value of interest
:param val2: second value of interest
:return: bool: True if val1 <= val2 and False otherwise
"""
if round(val1, 3) <= round(val2, 3): # only care to the 3rd position after decimal point
return True
else:
return False
def replace_nan_with_mean(data_with_nans, nans_row_col_indices, col_means):
"""
Given a data set with nans, row and column indices of where the nans are and the col_means, this
function will replace the nans with the corresponding col_means.
:param data_with_nans: data set matrix with nans
:param nans_row_col_indices: matrix containing the row and column indices of where the nans are
:param col_means: vector containing the column means of data_with_NAs
:return: data_with_NAs: data set with nans replaced with column means
"""
num_NAs = len(nans_row_col_indices[0])
for ind in range(num_NAs):
data_with_nans[nans_row_col_indices[0][ind], nans_row_col_indices[1][ind]] = \
col_means[nans_row_col_indices[1][ind]]
return data_with_nans
def remove_csv_files(dir_path, suffix=".csv", action='remove', new_dir_path=""):
"""
Given a directory, this function will gather all function ending with string specified
in suffix. Next, it is going to delete those files if action is set to 'remove'. If
action is set to 'copy', a new_dir_path must be specified where the files ending with suffix
will be moved to this new directory instead.
:param dir_path: string representing full path to directory of interest
:param suffix: string representing suffix of filename that are to be found and deleted
:param action: string, optional, denote the action to perform on files, 'remove' or 'move'
:param new_dir_path: string, optional, representing full path to new directory
:return: None
"""
filenames = os.listdir(dir_path) # list all files in directory
# only collect files with filename ending with suffix
to_remove = [filename for filename in filenames if filename.endswith(suffix)]
# delete files ending with suffix
for fn in to_remove:
temp_fn = os.path.join(dir_path, fn)
# only remove if file actually exists.
if os.path.isfile(temp_fn):
if 'remove' in action:
remove_files(temp_fn)
elif 'copy' in action:
move_files(new_dir_path, temp_fn, fn, action=action)
else:
assert False, "action string can only be 'remove' or 'copy."
def extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, end_test_str, want_p_values,
attr1_bool, attr2_bool, att1_template, att2_template, att3_template,
att4_template, compare_att1_str, h2o_att1_str, template_att1_str,
att1_str_fail, att1_str_success, compare_att2_str, h2o_att2_str,
template_att2_str, att2_str_fail, att2_str_success, compare_att3_str,
h2o_att3_str, template_att3_str, att3_str_fail, att3_str_success,
compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,
att4_str_success, failed_test_number, ignored_eps, allowed_diff,
noise_var, template_must_be_better, attr3_bool=True, attr4_bool=True):
"""
This function basically will compare four attributes (weight, p-values, training data MSE, test data MSE) of a test
with a template model. If the difference of comparison exceeds a certain threshold, the test will be determined as
failed and vice versa. There are times when we do not care about p-values and/or weight comparisons but mainly
concerned with MSEs. We can set the input parameters to indicate if this is the case.
:param model_h2o: H2O model that we want to evaluate
:param h2o_model_test_metrics: test performance of H2O model under evaluation
:param end_test_str: string representing end test banner to be printed
:param want_p_values: bool True if we want to care about p-values and False if we don't
:param attr1_bool: bool True if we want to compare weight difference between H2O model and template model
and False otherwise.
:param attr2_bool: bool True if we want to compare p-value difference between H2O model and template model
and False otherwise.
:param att1_template: value of first template attribute, the weight vector
:param att2_template: value of second template attribute, the p-value vector
:param att3_template: value of third template attribute, the training data set MSE
:param att4_template: value of fourth template attribute, the test data set MSE
:param compare_att1_str: string describing the comparison of first attribute, e.g. "Comparing intercept and
weights ...."
:param h2o_att1_str: string describing H2O model first attribute values, e.g. "H2O intercept and weights: "
:param template_att1_str: string describing template first attribute values, e.g. "Theoretical intercept and
weights: "
:param att1_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Intercept and weights are not equal!"
:param att1_str_success: string describing message to print out if difference < threshold, e.g.
"Intercept and weights are close enough!"
:param compare_att2_str: string describing the comparison of first attribute, e.g. "Comparing p-values ...."
:param h2o_att2_str: string describing H2O model first attribute values, e.g. "H2O p-values: "
:param template_att2_str: string describing template first attribute values, e.g. "Theoretical p-values: "
:param att2_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"P-values are not equal!"
:param att2_str_success: string describing message to print out if difference < threshold, e.g.
"P-values are close enough!"
:param compare_att3_str: string describing the comparison of first attribute, e.g. "Comparing training MSEs ...."
:param h2o_att3_str: string describing H2O model first attribute values, e.g. "H2O training MSE: "
:param template_att3_str: string describing template first attribute values, e.g. "Theoretical train MSE: "
:param att3_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Training MSEs are not equal!"
:param att3_str_success: string describing message to print out if difference < threshold, e.g.
"Training MSEs are close enough!"
:param compare_att4_str: string describing the comparison of first attribute, e.g. "Comparing test MSEs ...."
:param h2o_att4_str: string describing H2O model first attribute values, e.g. "H2O test MSE: "
:param template_att4_str: string describing template first attribute values, e.g. "Theoretical test MSE: "
:param att4_str_fail: string describing message to print out if difference exceeds threshold, e.g.
"Test MSEs are not equal!"
:param att4_str_success: string describing message to print out if difference < threshold, e.g.
"Test MSEs are close enough!"
:param failed_test_number: integer denote the number of tests failed
:param ignored_eps: if value < than this value, no comparison is performed
:param allowed_diff: threshold if exceeded will fail a test
:param noise_var: Gaussian noise variance used to generate data set
:param template_must_be_better: bool: True: template value must be lower, False: don't care
:param attr3_bool: bool denoting if we should compare attribute 3 values
:param attr4_bool: bool denoting if we should compare attribute 4 values
:return: a tuple containing test h2o model training and test performance metrics that include: weight, pValues,
mse_train, r2_train, mse_test, r2_test
"""
# grab weight from h2o model
test1_weight = get_train_glm_params(model_h2o, 'weights')
# grab p-values from h2o model
test1_p_values = []
if want_p_values:
test1_p_values = get_train_glm_params(model_h2o, 'p-values')
# grab other performance metrics
test1_mse_train = model_h2o.mse()
test1_r2_train = model_h2o.r2()
test1_mse_test = h2o_model_test_metrics.mse()
test1_r2_test = h2o_model_test_metrics.r2()
# compare performances of template and h2o model weights
failed_test_number += compare_two_arrays(test1_weight, att1_template, ignored_eps, allowed_diff*100, compare_att1_str,
h2o_att1_str, template_att1_str, att1_str_fail, att1_str_success,
attr1_bool)
# p-values
if want_p_values:
if np.isnan(np.asarray(test1_p_values)).any(): # p-values contain nan
failed_test_number += 1
failed_test_number += compare_two_arrays(test1_p_values, att2_template, ignored_eps, allowed_diff,
compare_att2_str, h2o_att2_str, template_att2_str, att2_str_fail,
att2_str_success, attr2_bool)
# Training MSE
need_to_compare = less_than(att3_template, test1_mse_train)
# in some cases, template value should always be better. Training data MSE should always
# be better without regularization than with regularization
if (not need_to_compare) and template_must_be_better:
failed_test_number += 1
failed_test_number += compare_two_arrays([test1_mse_train], [att3_template], ignored_eps, noise_var,
compare_att3_str, h2o_att3_str,
template_att3_str, att3_str_fail, att3_str_success, attr3_bool)
# Test MSE
need_to_compare = less_than(att4_template, test1_mse_test)
failed_test_number += compare_two_arrays([test1_mse_test], [att4_template], ignored_eps, noise_var,
compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,
att4_str_success, need_to_compare, attr4_bool)
# print end test banner
print(end_test_str)
print("*******************************************************************************************")
sys.stdout.flush()
return test1_weight, test1_p_values, test1_mse_train, test1_r2_train, test1_mse_test,\
test1_r2_test, failed_test_number
def extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics, family_type, end_test_str,
compare_att_str=["", "", "", "", "", "", ""],
h2o_att_str=["", "", "", "", "", "", ""],
template_att_str=["", "", "", "", "", "", ""],
att_str_fail=["", "", "", "", "", "", ""],
att_str_success=["", "", "", "", "", "", ""],
test_model=None, test_model_metric=None, template_params=None,
can_be_better_than_template=[
False, False, False, False, False, False],
just_print=[True, True, True, True, True, True],
ignored_eps=1e-15, allowed_diff=1e-5, failed_test_number=0):
"""
This function basically will compare and print out six performance metrics of a test with a
template model. If the difference of comparison exceeds a certain threshold, the test will be determined as
failed and vice versa. There are times when we do not care about comparisons but mainly concerned with
logloss/prediction accuracy in determining if a test shall fail. We can set the input parameters to indicate
if this is the case.
:param model_h2o: H2O model that we want to evaluate
:param h2o_model_test_metrics: test performance of H2O model under evaluation
:param family_type: string represents the various distribution families (gaussian, multinomial, binomial)
supported by our GLM algo
:param end_test_str: string to be printed at the end of a test
:param compare_att_str: array of strings describing what we are trying to compare
:param h2o_att_str: array of strings describing each H2O attribute of interest
:param template_att_str: array of strings describing template attribute of interest
:param att_str_fail: array of strings to be printed if the comparison failed
:param att_str_success: array of strings to be printed if comparison succeeded
:param test_model: template model whose attributes we want to compare our H2O model with
:param test_model_metric: performance on test data set of template model
:param template_params: array containing template attribute values that we want to compare our H2O model with
:param can_be_better_than_template: array of bool: True: template value must be lower, False: don't care
:param just_print: array of bool for each attribute if True, no comparison is performed, just print the attributes
and if False, will compare the attributes and print the attributes as well
:param ignored_eps: if value < than this value, no comparison is performed
:param allowed_diff: threshold if exceeded will fail a test
:param failed_test_number: integer denote the number of tests failed so far
:return: accumulated number of tests that have failed so far
"""
# grab performance metrics from h2o model
(h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,
h2o_confusion_matrix_test, h2o_accuracy_test) = grab_model_params_metrics(model_h2o, h2o_model_test_metrics,
family_type)
# grab performance metrics from template model
if test_model and test_model_metric:
(template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,
template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = \
grab_model_params_metrics(test_model, test_model_metric, family_type)
elif template_params:
# grab template comparison values from somewhere else
(template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,
template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = template_params
else:
assert False, "No valid template parameters are given for comparison."
# print and/or compare the weights between template and H2O
compare_index = 0
failed_test_number += compare_two_arrays(h2o_weight, template_weight, ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
# this is logloss from training data set,
if not(just_print[compare_index]) and not(can_be_better_than_template[compare_index]):
if (h2o_logloss_train < template_logloss_train) and \
(abs(h2o_logloss_train-template_logloss_train) > 1e-5):
# H2O performed better than template which is not allowed
failed_test_number += 1 # increment failed_test_number and just print the results
compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, True)
else:
failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,
allowed_diff, compare_att_str[compare_index],
h2o_att_str[compare_index], template_att_str[compare_index],
att_str_fail[compare_index], att_str_success[compare_index], True,
False)
else:
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_logloss_train, template_logloss_train, False)
# print and compare the logloss between template and H2O for training data
failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,
allowed_diff, compare_att_str[compare_index],
h2o_att_str[compare_index], template_att_str[compare_index],
att_str_fail[compare_index], att_str_success[compare_index],
template_better, just_print[compare_index])
compare_index += 1
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_logloss_test, template_logloss_test, False)
# print and compare the logloss between template and H2O for test data
failed_test_number += compare_two_arrays([h2o_logloss_test], [template_logloss_test], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
compare_index += 1
# print the confusion matrix from training data
failed_test_number += compare_two_arrays(h2o_confusion_matrix_train, template_confusion_matrix_train, ignored_eps,
allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
# print the confusion matrix from test data
failed_test_number += compare_two_arrays(h2o_confusion_matrix_test, template_confusion_matrix_test, ignored_eps,
allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], True, just_print[compare_index])
compare_index += 1
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_accuracy_train, template_accuracy_train, True)
# print accuracy from training dataset
failed_test_number += compare_two_arrays([h2o_accuracy_train], [template_accuracy_train], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
compare_index += 1
# print accuracy from test dataset
template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],
h2o_accuracy_test, template_accuracy_test, True)
failed_test_number += compare_two_arrays([h2o_accuracy_test], [template_accuracy_test], ignored_eps, allowed_diff,
compare_att_str[compare_index], h2o_att_str[compare_index],
template_att_str[compare_index], att_str_fail[compare_index],
att_str_success[compare_index], template_better, just_print[compare_index])
# print end test banner
print(end_test_str)
print("*******************************************************************************************")
sys.stdout.flush()
return failed_test_number
def is_template_better(just_print, can_be_better_than_template, h2o_att, template_att, bigger_is_better):
"""
This function is written to determine if the system under test performs better than the template model
performance.
:param just_print: bool representing if we are just interested in printing the attribute values
:param can_be_better_than_template: bool stating that it is okay in this case for the system under test to perform
better than the template system.
:param h2o_att: number representing the h2o attribute under test
:param template_att: number representing the template attribute
:param bigger_is_better: bool representing if metric is perceived to be better if its value is higher
:return: bool indicating if the template attribute is better.
"""
if just_print: # not interested in comparison, just want to print attribute values
return True # does not matter what we return here
else:
if bigger_is_better: # metric is better if it is greater
return not(h2o_att > template_att)
else: # metric is better if it is less
return not(h2o_att < template_att)
def grab_model_params_metrics(model_h2o, h2o_model_test_metrics, family_type):
"""
This function will extract and return the various metrics from a H2O GLM model and the corresponding H2O model
test metrics.
:param model_h2o: GLM H2O model
:param h2o_model_test_metrics: performance on test data set from H2O GLM model
:param family_type: string representing 'gaussian', 'binomial' or 'multinomial'
:return: tuple containing weight, logloss/confusion matrix/prediction accuracy calculated from training data set
and test data set respectively
"""
# grab weight from h2o model
h2o_weight = get_train_glm_params(model_h2o, 'weights', family_type=family_type)
# grab other performance metrics
h2o_logloss_train = model_h2o.logloss()
h2o_confusion_matrix_train = get_train_glm_params(model_h2o, 'confusion_matrix', family_type=family_type)
last_index = len(h2o_confusion_matrix_train.cell_values)-1
h2o_logloss_test = h2o_model_test_metrics.logloss()
if 'multinomial' in family_type.lower():
h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix()
h2o_accuracy_train = 1-h2o_confusion_matrix_train.cell_values[last_index][last_index]
h2o_accuracy_test = 1-h2o_confusion_matrix_test.cell_values[last_index][last_index]
elif 'binomial' in family_type.lower():
h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix().table
real_last_index = last_index+1
h2o_accuracy_train = 1-float(h2o_confusion_matrix_train.cell_values[last_index][real_last_index])
h2o_accuracy_test = 1-float(h2o_confusion_matrix_test.cell_values[last_index][real_last_index])
else:
assert False, "Only 'multinomial' and 'binomial' distribution families are supported for " \
"grab_model_params_metrics function!"
return h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,\
h2o_confusion_matrix_test, h2o_accuracy_test
def prepare_data_sklearn_multinomial(training_data_xy):
"""
Sklearn model requires that the input matrix should contain a column of ones in order for
it to generate the intercept term. In addition, it wants the response vector to be in a
certain format as well.
:param training_data_xy: matrix containing both the predictors and response column
:return: tuple containing the predictor columns with a column of ones as the first column and
the response vector in the format that Sklearn wants.
"""
(num_row, num_col) = training_data_xy.shape
# change response to be enum and not real
y_ind = num_col-1
training_data_xy[y_ind] = training_data_xy[y_ind].astype(int)
# prepare response column for sklearn logistic regression
response_y = training_data_xy[:, y_ind]
response_y = np.ravel(response_y)
training_data = training_data_xy[:, range(0, y_ind)]
# added column of ones into data matrix X_MAT
temp_ones = np.asmatrix(np.ones(num_row)).transpose()
x_mat = np.concatenate((temp_ones, training_data), axis=1)
return response_y, x_mat
def get_gridables(params_in_json):
"""
This function is written to walk through all parameters of a model and grab the parameters, its type and
its default values as three lists of all the gridable parameters.
:param params_in_json: a list of parameters associated with a H2O model. Each list is a dict containing fields
of interest like name, type, gridable, default values, ....
:return: three lists: gridable_params, gridable_types and gridable_defaults containing the names of the parameter,
its associated type like int, float, unicode, bool and default parameter values
"""
# grab all gridable parameters and its type
gridable_parameters = []
gridable_types = []
gridable_defaults = []
for each_param in params_in_json:
if each_param['gridable']:
gridable_parameters.append(str(each_param["name"]))
gridable_types.append(each_param["type"])
if type(each_param["default_value"]) == 'unicode': # hyper-parameters cannot be unicode
gridable_defaults.append(str(each_param["default_value"]))
else:
gridable_defaults.append(each_param["default_value"])
return gridable_parameters, gridable_types, gridable_defaults
def add_fold_weights_offset_columns(h2o_frame, nfold_max_weight_offset, column_names, column_type='fold_assignment'):
"""
Add fold_columns to H2O training frame specified in h2o_frame according to nfold. The new added
columns should use the names in column_names. Returns a h2o_frame with newly added fold_columns.
Copied from Eric's code.
:param h2o_frame: H2O frame containing training data
:param nfold_max_weight_offset: integer, number of fold in the cross-validation or maximum weight scale or offset
:param column_names: list of strings denoting the column names for the new fold columns
:param column_type: optional string denoting whether we are trying to generate fold_assignment or
weights_column or offset_column
:return: H2O frame with added fold column assignments
"""
number_row = h2o_frame.nrow
# copied this part from Eric's code
for index in range(len(column_names)):
if 'fold_assignment' in column_type:
temp_a = np.random.random_integers(0, nfold_max_weight_offset - 1, [number_row, 1]) # inclusive
elif 'weights_column' in column_type:
temp_a = np.random.uniform(0, nfold_max_weight_offset, [number_row, 1])
elif 'offset_column' in column_type:
temp_a = random.uniform(0, nfold_max_weight_offset)*np.asmatrix(np.ones(number_row)).transpose()
else:
assert False, "column_type must be either 'fold_assignment' or 'weights_column'!"
fold_assignments = h2o.H2OFrame(temp_a)
fold_assignments.set_names([column_names[index]])
h2o_frame = h2o_frame.cbind(fold_assignments)
return h2o_frame
def gen_grid_search(model_params, hyper_params, exclude_parameters, gridable_parameters, gridable_types,
gridable_defaults, max_int_number, max_int_val, min_int_val, max_real_number, max_real_val,
min_real_val, quantize_level='1.00000000'):
"""
This function is written to randomly generate griddable parameters for a gridsearch. For parameters already
found in hyper_params, no random list will be generated. In addition, we will check to make sure that the
griddable parameters are actually used by the model before adding them to the hyper_params dict.
:param model_params: list of string containing names of argument to the model
:param hyper_params: dict structure containing a list of gridable parameters names with their list
:param exclude_parameters: list containing parameter names not to be added to hyper_params
:param gridable_parameters: list of gridable parameter names
:param gridable_types: list of gridable parameter types
:param gridable_defaults: list of gridable parameter default values
:param max_int_number: integer, size of integer gridable parameter list
:param max_int_val: integer, maximum integer value for integer gridable parameter
:param min_int_val: integer, minimum integer value for integer gridable parameter
:param max_real_number: integer, size of real gridable parameter list
:param max_real_val: float, maximum real value for real gridable parameter
:param min_real_val: float, minimum real value for real gridable parameter
:param quantize_level: string representing the quantization level of floating point values generated randomly.
:return: a tuple of hyper_params: dict of hyper parameters for gridsearch, true_gridable_parameters:
a list of string containing names of truely gridable parameters, true_gridable_types: a list of string
denoting parameter types and true_gridable_defaults: default values of those truly gridable parameters
"""
count_index = 0
true_gridable_parameters = []
true_gridable_types = []
true_gridable_defaults = []
for para_name in gridable_parameters:
# parameter must not in exclusion list
if (para_name in model_params) and (para_name not in exclude_parameters):
true_gridable_parameters.append(para_name)
true_gridable_types.append(gridable_types[count_index])
true_gridable_defaults.append(gridable_defaults[count_index])
if para_name not in hyper_params.keys(): # add default value to user defined parameter list
# gridable parameter not seen before. Randomly generate values for it
if ('int' in gridable_types[count_index]) or ('long' in gridable_types[count_index]):
# make sure integer values are not duplicated, using set action to remove duplicates
hyper_params[para_name] = list(set([random.randint(min_int_val, max_int_val) for p in
range(0, max_int_number)]))
elif ('double' in gridable_types[count_index]) or ('float' in gridable_types[count_index]):
hyper_params[para_name] = fix_float_precision(list(np.random.uniform(min_real_val, max_real_val,
max_real_number)), quantize_level=quantize_level)
count_index += 1
return hyper_params, true_gridable_parameters, true_gridable_types, true_gridable_defaults
def fix_float_precision(float_list, quantize_level='1.00000000'):
"""
This function takes in a floating point tuple and attempt to change it to floating point number with fixed
precision.
:param float_list: tuple/list of floating point numbers
:param quantize_level: string, optional, represent the number of fix points we care
:return: tuple of floats to the exact precision specified in quantize_level
"""
fixed_float = []
for num in float_list:
fixed_float.append(float(Decimal(num).quantize(Decimal(quantize_level))))
return list(set(fixed_float))
def extract_used_params_xval(a_grid_model, model_param_names, params_dict, algo="GBM"):
"""
This function performs similar functions to function extract_used_params. However, for max_runtime_secs,
we need to go into each cross-valudation model and grab the max_runtime_secs and add them up in order to
get the correct value. In addition, we put your algo model specific parameters into params_dict.
:param a_grid_model: list of models generated by gridsearch
:param model_param_names: hyper-parameter names that are specified for the gridsearch.
:param params_dict: dict containing name/value pairs specified to an algo.
:param algo: string, optional, denoting the algo we are looking at.
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
# need to extract the max_runtime_secs ONE cross-validation model or the base model
if a_grid_model._is_xvalidated:
xv_keys = a_grid_model._xval_keys
for id in xv_keys: # only need to get info from one model
each_xv_model = h2o.get_model(id) # get each model
params_used = extract_used_params(model_param_names, each_xv_model.params, params_dict, algo)
break
else:
params_used = extract_used_params(model_param_names, a_grid_model.params, params_dict, algo)
return params_used
def extract_used_params(model_param_names, grid_model_params, params_dict, algo="GLM"):
"""
This function is used to build a dict out of parameters used by our gridsearch to build a H2O model given
the dict structure that describes the parameters and their values used by gridsearch to build that
particular mode.
:param model_param_names: list contains parameter names that we are interested in extracting
:param grid_model_params: dict contains key as names of parameter and values as list of two values: default and
actual.
:param params_dict: dict containing extra parameters to add to params_used like family, e.g. 'gaussian',
'binomial', ...
:return: params_used: a dict structure containing parameters that take on values as name/value pairs which
will be used to build a model by hand using the same parameter setting as the model built by gridsearch.
"""
params_used = dict()
grid_model_params_keys = grid_model_params.keys()
for each_parameter in model_param_names:
parameter_name = str(each_parameter)
if parameter_name in grid_model_params_keys:
params_used[parameter_name] = grid_model_params[each_parameter]['actual']
if params_dict:
for key, value in params_dict.items():
params_used[key] = value # add distribution family to parameters used list
# only for GLM, change lambda to Lambda
if algo =="GLM":
if 'lambda' in params_used.keys():
params_used['Lambda'] = params_used['lambda']
del params_used['lambda']
return params_used
def insert_error_grid_search(hyper_params, gridable_parameters, gridable_types, error_number):
"""
This function will randomly introduce errors into a copy of hyper_params. Depending on the random number
error_number generated, the following errors can be introduced:
error_number = 0: randomly alter the name of a hyper-parameter name;
error_number = 1: randomly choose a hyper-parameter and remove all elements in its list
error_number = 2: add randomly generated new hyper-parameter names with random list
error_number other: randomly choose a hyper-parameter and insert an illegal type into it
:param hyper_params: dict containing all legal hyper-parameters for our grid search
:param gridable_parameters: name of griddable parameters (some may not be griddable)
:param gridable_types: type of griddable parameters
:param error_number: integer representing which errors to introduce into the gridsearch hyper-parameters
:return: new dict with errors in either parameter names or parameter values
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
param_index = random.randint(0, len(hyper_params)-1)
param_name = list(hyper_params)[param_index]
param_type = gridable_types[gridable_parameters.index(param_name)]
if error_number == 0: # grab a hyper-param randomly and copy its name twice
new_name = param_name+param_name
error_hyper_params[new_name] = error_hyper_params[param_name]
del error_hyper_params[param_name]
elif error_number == 1:
error_hyper_params[param_name] = []
elif error_number == 2:
new_param = generate_random_words(random.randint(20,100))
error_hyper_params[new_param] = error_hyper_params[param_name]
else:
error_hyper_params = insert_bad_value(error_hyper_params, param_name, param_type)
return error_hyper_params
def insert_bad_value(error_hyper_params, param_name, param_type):
"""
This function is written to insert a value that is of a different type into an array than the one
its other elements are for.
:param error_hyper_params: dict containing all hyper-parameters for a grid search
:param param_name: string denoting the hyper-parameter we want to insert bad element to
:param param_type: string denoting hyper-parameter type
:return: dict containing new inserted error value
"""
if 'int' in param_type: # insert a real number into integer
error_hyper_params[param_name].append(random.uniform(-10,10))
elif 'enum' in param_type: # insert an float into enums
error_hyper_params[param_name].append(random.uniform(-10,10))
elif 'double' in param_type: # insert an enum into float
error_hyper_params[param_name].append(random.uniform(0,1) > 0.5)
else: # insert a random string for all other cases
error_hyper_params[param_name].append(generate_random_words(random.randint(20,100)))
return error_hyper_params
def generate_random_words(word_length):
"""
This function will generate a random word consisting of letters, numbers and
punctuation given the word_length.
:param word_length: integer denoting length of the word
:return: string representing the random word
"""
if word_length > 0:
all_chars = string.ascii_letters + string.digits + string.punctuation
return ''.join((random.choice(all_chars)) for index in range(int(word_length)))
else:
assert False, "word_length must be an integer greater than 0."
def generate_redundant_parameters(hyper_params, gridable_parameters, gridable_defaults, error_number):
"""
This function will randomly choose a set of hyper_params and make a dict out of it so we can
duplicate the parameter specification in both the model and grid search.
:param hyper_params: dict containing all griddable parameters as hyper_param to grid search
:param gridable_parameters: list of gridable parameters (not truly)
:param gridable_defaults: list of default values for gridable parameters
:param error_number: int, indicate ways to change the model parameter and the hyper-parameter
Here are the actions performed on the model parameter and hyper-parameters.
error_number = 0: set model parameter to be a value out of the hyper-parameter value list, should not
generate error;
error_number = 1: set model parameter to be default value, should not generate error in this case;
error_number = 3: make sure model parameter is not set to default and choose a value not in the
hyper-parameter value list.
:return: 2 dicts containing duplicated parameters with specification, new hyperparameter specification
"""
error_hyper_params = copy.deepcopy(hyper_params)
# error_hyper_params = {k : v for k, v in hyper_params.items()}
params_dict = {}
num_params = random.randint(1, len(error_hyper_params))
params_list = list(error_hyper_params)
# remove default values out of hyper_params
for key in params_list:
default_value = gridable_defaults[gridable_parameters.index(key )]
if default_value in error_hyper_params[key]:
error_hyper_params[key].remove(default_value)
for index in range(num_params):
param_name = params_list[index]
hyper_params_len = len(error_hyper_params[param_name])
if error_number == 0:
# randomly assigned the parameter to take one value out of the list
param_value_index = random.randint(0, len(error_hyper_params[param_name])-1)
params_dict[param_name] = error_hyper_params[param_name][param_value_index]
elif error_number == 1:
param_value_index = gridable_parameters.index(param_name)
params_dict[param_name] = gridable_defaults[param_value_index]
else:
# randomly assign model parameter to one of the hyper-parameter values, should create error condition here
param_value_index = random.randint(0, hyper_params_len-1)
params_dict[param_name] = error_hyper_params[param_name][param_value_index]
# final check to make sure lambda is Lambda
if 'lambda' in list(params_dict):
params_dict["Lambda"] = params_dict['lambda']
del params_dict["lambda"]
return params_dict, error_hyper_params
def count_models(hyper_params):
"""
Given a hyper_params dict, this function will return the maximum number of models that can be built out of all
the combination of hyper-parameters.
:param hyper_params: dict containing parameter name and a list of values to iterate over
:return: max_model_number: int representing maximum number of models built
"""
max_model_number = 1
for key in list(hyper_params):
max_model_number *= len(hyper_params[key])
return max_model_number
def error_diff_2_models(grid_table1, grid_table2, metric_name):
"""
This function will take two models generated by gridsearch and calculate the mean absolute differences of
the metric values specified by the metric_name in the two model. It will return the mean differences.
:param grid_table1: first H2OTwoDimTable generated by gridsearch
:param grid_table2: second H2OTwoDimTable generated by gridsearch
:param metric_name: string, name of the metric of interest
:return: real number which is the mean absolute metric difference between the two models
"""
num_model = len(grid_table1.cell_values)
metric_diff = 0
for model_index in range(num_model):
metric_diff += abs(grid_table1.cell_values[model_index][-1] - grid_table2.cell_values[model_index][-1])
if (num_model > 0):
return metric_diff/num_model
else:
assert False, "error_diff_2_models: your table contains zero models."
def find_grid_runtime(model_list):
"""
This function given a grid_model built by gridsearch will go into the model and calculate the total amount of
time it took to actually build all the models in second
:param model_list: list of model built by gridsearch, cartesian or randomized with cross-validation
enabled.
:return: total_time_sec: total number of time in seconds in building all the models
"""
total_time_sec = 0
for each_model in model_list:
total_time_sec += each_model._model_json["output"]["run_time"] # time in ms
# if cross validation is used, need to add those run time in here too
if each_model._is_xvalidated:
xv_keys = each_model._xval_keys
for id in xv_keys:
each_xv_model = h2o.get_model(id)
total_time_sec += each_xv_model._model_json["output"]["run_time"]
return total_time_sec/1000.0 # return total run time in seconds
def evaluate_metrics_stopping(model_list, metric_name, bigger_is_better, search_criteria, possible_model_number):
"""
This function given a list of dict that contains the value of metric_name will manually go through the
early stopping condition and see if the randomized grid search will give us the correct number of models
generated. Note that you cannot assume the model_list is in the order of when a model is built. It actually
already come sorted which we do not want....
:param model_list: list of models built sequentially that contains metric of interest among other fields
:param metric_name: string representing name of metric that we want to based our stopping condition on
:param bigger_is_better: bool indicating if the metric is optimized by getting bigger if True and vice versa
:param search_criteria: dict structure containing the search criteria for randomized gridsearch
:param possible_model_number: integer, represent the absolute possible number of models built based on the
hyper-parameter size
:return: bool indicating if the early topping condition is justified
"""
tolerance = search_criteria["stopping_tolerance"]
stop_round = search_criteria["stopping_rounds"]
min_list_len = 2*stop_round # minimum length of metrics needed before we start early stopping evaluation
metric_list = [] # store metric of optimization
stop_now = False
# provide metric list sorted by time. Oldest model appear first.
metric_list_time_ordered = sort_model_by_time(model_list, metric_name)
for metric_value in metric_list_time_ordered:
metric_list.append(metric_value)
if len(metric_list) > min_list_len: # start early stopping evaluation now
stop_now = evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better)
if stop_now:
if len(metric_list) < len(model_list): # could have stopped early in randomized gridsearch
return False
else: # randomized gridsearch stopped at the correct condition
return True
if len(metric_list) == possible_model_number: # never meet early stopping condition at end of random gridsearch
return True # if max number of model built, still ok
else:
return False # early stopping condition never met but random gridsearch did not build all models, bad!
def sort_model_by_time(model_list, metric_name):
"""
This function is written to sort the metrics that we care in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:param metric_name: string representing name of metric that we want to based our stopping condition on
:return: model_metric_list sorted by time
"""
model_num = len(model_list)
model_metric_list = [None] * model_num
for index in range(model_num):
model_index = int(model_list[index]._id.split('_')[-1])
model_metric_list[model_index] = \
model_list[index]._model_json["output"]["cross_validation_metrics"]._metric_json[metric_name]
return model_metric_list
def evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better):
"""
This function mimics the early stopping function as implemented in ScoreKeeper.java. Please see the Java file
comment to see the explanation of how the early stopping works.
:param metric_list: list containing the optimization metric under consideration for gridsearch model
:param stop_round: integer, determine averaging length
:param tolerance: real, tolerance to see if the grid search model has improved enough to keep going
:param bigger_is_better: bool: True if metric is optimized as it gets bigger and vice versa
:return: bool indicating if we should stop early and sorted metric_list
"""
if (bigger_is_better):
metric_list.reverse()
shortest_len = 2*stop_round
if (isinstance(metric_list[0], float)):
startIdx = 0
else:
startIdx = 1
bestInLastK = 1.0*sum(metric_list[startIdx:stop_round])/stop_round
lastBeforeK = 1.0*sum(metric_list[stop_round:shortest_len])/stop_round
if not(np.sign(bestInLastK) == np.sign(lastBeforeK)):
return False
ratio = bestInLastK/lastBeforeK
if math.isnan(ratio):
return False
if bigger_is_better:
return not (ratio > 1+tolerance)
else:
return not (ratio < 1-tolerance)
def check_and_count_models(hyper_params, params_zero_one, params_more_than_zero, params_more_than_one,
params_zero_positive, max_grid_model):
"""
This function will look at the hyper-parameter space set in hyper_params, generate a new hyper_param space that
will contain a smaller number of grid_models. It will determine how many models will be built from
this new hyper_param space. In order to arrive at the correct answer, it must discount parameter settings that
are illegal.
:param hyper_params: dict containing model parameter names and list of values to set it to
:param params_zero_one: list containing model parameter names whose values must be between 0 and 1
:param params_more_than_zero: list containing model parameter names whose values must exceed zero
:param params_more_than_one: list containing model parameter names whose values must exceed one
:param params_zero_positive: list containing model parameter names whose values must equal to or exceed zero
:param max_grid_model: maximum number of grid_model that can be generated from the new hyper_params space
:return: total model: integer denoting number of grid models that can be built from all legal parameter settings
in new hyper_parameter space
final_hyper_params: dict of new hyper parameter space derived from the original hyper_params
"""
total_model = 1
hyper_keys = list(hyper_params)
random.shuffle(hyper_keys) # get all hyper_parameter names in random order
final_hyper_params = dict()
for param in hyper_keys:
# this param should be > 0 and <= 2
if param == "col_sample_rate_change_per_level":
param_len = len([x for x in hyper_params["col_sample_rate_change_per_level"] if (x > 0)
and (x <= 2)])
elif param in params_zero_one:
param_len = len([x for x in hyper_params[param] if (x >= 0)
and (x <= 1)])
elif param in params_more_than_zero:
param_len = len([x for x in hyper_params[param] if (x > 0)])
elif param in params_more_than_one:
param_len = len([x for x in hyper_params[param] if (x > 1)])
elif param in params_zero_positive:
param_len = len([x for x in hyper_params[param] if (x >= 0)])
else:
param_len = len(hyper_params[param])
if (param_len >= 0) and ((total_model*param_len) <= max_grid_model):
total_model *= param_len
final_hyper_params[param] = hyper_params[param]
elif (total_model*param_len) > max_grid_model:
break
return total_model, final_hyper_params
def write_hyper_parameters_json(dir1, dir2, json_filename, hyper_parameters):
"""
Write a json file of the hyper_parameters in directories dir1 and dir2 for debugging purposes.
:param dir1: String containing first directory where you want to write the json file to
:param dir2: String containing second directory where you want to write the json file to
:param json_filename: String containing json file name
:param hyper_parameters: dict containing hyper-parameters used
"""
# save hyper-parameter file in test directory
with open(os.path.join(dir1, json_filename), 'w') as test_file:
json.dump(hyper_parameters, test_file)
# save hyper-parameter file in sandbox
with open(os.path.join(dir2, json_filename), 'w') as test_file:
json.dump(hyper_parameters, test_file)
def compare_frames(frame1, frame2, numElements, tol_time=0, tol_numeric=0, strict=False, compare_NA=True,
custom_comparators=None):
"""
This function will compare two H2O frames to make sure their dimension, and values in all cells are the same.
It will not compare the column names though.
:param frame1: H2O frame to be compared
:param frame2: H2O frame to be compared
:param numElements: integer to denote number of rows to compare. Done to reduce compare time.
Set to 0 or negative number if you want to compare all elements.
:param tol_time: optional parameter to limit time value difference.
:param tol_numeric: optional parameter to limit numeric value difference.
:param strict: optional parameter to enforce strict comparison or not. If True, column type must
match in order to pass the test.
:param compare_NA: optional parameter to compare NA or not. For csv file generated from orc file, the
NAs are represented as some other symbol but our CSV will not be able to parse it correctly as NA.
In this case, do not compare the number of NAs.
:param custom_comparators: dictionary specifying custom comparators for some columns.
:return: boolean: True, the two frames are equal and False otherwise.
"""
# check frame dimensions
rows1, cols1 = frame1.dim
rows2, cols2 = frame2.dim
assert rows1 == rows2 and cols1 == cols2, "failed dim check! frame 1 rows:{0} frame 2 rows:{1} frame 1 cols:{2} " \
"frame2 cols:{3}".format(rows1, rows2, cols1, cols2)
na_frame1 = frame1.isna().sum().sum(axis=1)[:,0]
na_frame2 = frame2.isna().sum().sum(axis=1)[:,0]
probVal = numElements/rows1 if numElements > 0 else 1
if compare_NA: # check number of missing values
assert na_frame1.flatten() == na_frame2.flatten(), "failed numbers of NA check! Frame 1 NA number: {0}, frame 2 " \
"NA number: {1}".format(na_frame1.flatten(), na_frame2.flatten())
# check column types are the same before proceeding to check each row content.
for col_ind in range(cols1):
c1_key = frame1.columns[col_ind]
c2_key = frame2.columns[col_ind]
c2_type = frame2.types[c2_key]
c1_type = frame1.types[c1_key]
print("###### Comparing column: {0} and column type is {1}.".format(col_ind, c1_type))
if strict: # every column type must match
assert c1_type == c2_type, "failed column type check! frame1 col type: {0}, frame2 col type: " \
"{1}".format(c1_type, c2_type)
else:
if str(c2_type) == 'enum': # orc files do not have enum column type. We convert it here
frame1[col_ind].asfactor()
if custom_comparators and c1_key in custom_comparators:
custom_comparators[c1_key](frame1, frame2, col_ind, rows1, numElements)
elif (str(c1_type) == 'string') or (str(c1_type) == 'enum'):
# compare string
compare_frames_local_onecolumn_NA_string(frame1[col_ind], frame2[col_ind], prob=probVal)
else:
if str(c2_type) == 'time': # compare time columns
compare_frames_local_onecolumn_NA(frame1[col_ind], frame2[col_ind], prob=probVal, tol=tol_time)
else:
compare_frames_local_onecolumn_NA(frame1[col_ind], frame2[col_ind], prob=probVal, tol=tol_numeric)
return True
def catch_warnings():
import warnings
warnings.simplefilter("always", RuntimeWarning)
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
return warnings.catch_warnings(record=True)
def contains_warning(ws, message):
return any(issubclass(w.category, RuntimeWarning) and message in str(w.message) for w in ws)
def no_warnings(ws):
return len(ws) == 0
def expect_warnings(filewithpath, warn_phrase="warn", warn_string_of_interest="warn", number_of_times=1, in_hdfs=False):
"""
This function will execute a command to run and analyze the print outs of
running the command. The goal here is to capture any warnings that we may expect
out of running those commands.
:param filewithpath: name of file to be parsed with path
:param warn_phrase: capture the warning header, sometimes it is warn or userwarn.
:param warn_string_of_interest: specific warning message string
:param number_of_times: number of warning lines we are expecting.
:return: True if warning was found and False otherwise
"""
number_warngings = 0
buffer = StringIO() # redirect warning messages to string buffer for later analysis
sys.stderr = buffer
frame = None
if in_hdfs:
frame = h2o.import_file(filewithpath)
else:
frame = h2o.import_file(path=locate(filewithpath))
sys.stderr = sys.__stderr__ # redirect it back to stdout.
try: # for python 2.7
if len(buffer.buflist) > 0:
for index in range(len(buffer.buflist)):
print("*** captured warning message: {0}".format(buffer.buflist[index]))
if (warn_phrase in buffer.buflist[index]) and (warn_string_of_interest in buffer.buflist[index]):
number_warngings = number_warngings+1
except: # for python 3.
warns = buffer.getvalue()
print("*** captured warning message: {0}".format(warns))
if (warn_phrase in warns) and (warn_string_of_interest in warns):
number_warngings = number_warngings+1
print("Number of warnings found: {0} and number of times that warnings should appear {1}.".format(number_warngings,
number_of_times))
if number_warngings >= number_of_times:
return True
else:
return False
def compare_frame_summary(frame1_summary, frame2_summary, compareNames=False, compareTypes=False):
"""
This method is written to compare the frame summary between two frames.
:param frame1_summary:
:param frame2_summary:
:param compareNames:
:param compareTypes:
:return:
"""
frame1_column_number = len(frame1_summary)
frame2_column_number = len(frame2_summary)
assert frame1_column_number == frame2_column_number, "failed column number check! Frame 1 column number: {0}," \
"frame 2 column number: {1}".format(frame1_column_number,
frame2_column_number)
for col_index in range(frame1_column_number): # check summary for each column
for key_val in list(frame1_summary[col_index]):
if not(compareNames) and (str(key_val) == 'label'):
continue
if not(compareTypes) and (str(key_val) == 'type'):
continue
if str(key_val) == 'precision': # skip comparing precision
continue
val1 = frame1_summary[col_index][key_val]
val2 = frame2_summary[col_index][key_val]
if isinstance(val1, list) or isinstance(val1, dict):
if isinstance(val1, dict):
assert val1 == val2, "failed column summary comparison for column {0} and summary " \
"type {1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
if len(val1) > 0:
# find if elements are float
float_found = False
for ind in range(len(val1)):
if isinstance(val1[ind], float):
float_found = True
break
if float_found:
for ind in range(len(val1)):
if not(str(val1[ind] == 'NaN')):
assert abs(val1[ind]-val2[ind]) < 1e-5, "failed column summary comparison for " \
"column {0} and summary type {1}, frame 1" \
" value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val),
val1[ind], val2[ind])
else:
assert val1 == val2, "failed column summary comparison for column {0} and summary" \
" type {1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
if isinstance(val1, float):
assert abs(val1-val2) < 1e-5, "failed column summary comparison for column {0} and summary type " \
"{1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
else:
assert val1 == val2, "failed column summary comparison for column {0} and summary type " \
"{1}, frame 1 value is {2}, frame 2 value is " \
"{3}".format(col_index, str(key_val), val1, val2)
def cannaryHDFSTest(hdfs_name_node, file_name):
"""
This function is written to detect if the hive-exec version is too old. It will return
True if it is too old and false otherwise.
:param hdfs_name_node:
:param file_name:
:return:
"""
url_orc = "hdfs://{0}{1}".format(hdfs_name_node, file_name)
try:
tempFrame = h2o.import_file(url_orc)
h2o.remove(tempFrame)
print("Your hive-exec version is good. Parsing success for {0}.".format(url_orc))
return False
except Exception as e:
print("Error exception is {0}".format(str(e)))
if "NoSuchFieldError: vector" in str(e):
return True
else: # exception is caused by other reasons.
return False
def extract_scoring_history_field(aModel, fieldOfInterest, takeFirst=False):
"""
Given a fieldOfInterest that are found in the model scoring history, this function will extract the list
of field values for you from the model.
:param aModel: H2O model where you want to extract a list of fields from the scoring history
:param fieldOfInterest: string representing a field of interest.
:return: List of field values or None if it cannot be found
"""
return extract_from_twoDimTable(aModel._model_json["output"]["scoring_history"], fieldOfInterest, takeFirst)
def extract_from_twoDimTable(metricOfInterest, fieldOfInterest, takeFirst=False):
"""
Given a fieldOfInterest that are found in the model scoring history, this function will extract the list
of field values for you from the model.
:param aModel: H2O model where you want to extract a list of fields from the scoring history
:param fieldOfInterest: string representing a field of interest.
:return: List of field values or None if it cannot be found
"""
allFields = metricOfInterest._col_header
return extract_field_from_twoDimTable(allFields, metricOfInterest.cell_values, fieldOfInterest, takeFirst=False)
def extract_field_from_twoDimTable(allFields, cell_values, fieldOfInterest, takeFirst=False):
if fieldOfInterest in allFields:
cellValues = []
fieldIndex = allFields.index(fieldOfInterest)
for eachCell in cell_values:
cellValues.append(eachCell[fieldIndex])
if takeFirst: # only grab the result from the first iteration.
break
return cellValues
else:
return None
def model_run_time_sorted_by_time(model_list):
"""
This function is written to sort the metrics that we care in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:return: model run time in secs sorted by order of building
"""
model_num = len(model_list)
model_runtime_sec_list = [None] * model_num
for index in range(model_num):
model_index = int(model_list[index]._id.split('_')[-1]) - 1 # model names start at 1
model_runtime_sec_list[model_index] = \
(model_list[index]._model_json["output"]["run_time"]/1000.0)
return model_runtime_sec_list
def model_seed_sorted(model_list):
"""
This function is written to find the seed used by each model in the order of when the model was built. The
oldest model metric will be the first element.
:param model_list: list of models built sequentially that contains metric of interest among other fields
:return: model seed sorted by order of building
"""
model_num = len(model_list)
model_seed_list = [None] * model_num
for index in range(model_num):
for pIndex in range(len(model_list.models[0]._model_json["parameters"])):
if model_list.models[index]._model_json["parameters"][pIndex]["name"]=="seed":
model_seed_list[index]=model_list.models[index]._model_json["parameters"][pIndex]["actual_value"]
break
model_seed_list.sort()
return model_seed_list
def check_ignore_cols_automl(models,names,x,y):
models = sum(models.as_data_frame().values.tolist(),[])
for model in models:
if "StackedEnsemble" in model:
continue
else:
assert set(h2o.get_model(model).params["ignored_columns"]["actual"]) == set(names) - {y} - set(x), \
"ignored columns are not honored for model " + model
# This method is not changed to local method using as_data_frame because the frame size is too big.
def check_sorted_2_columns(frame1, sorted_column_indices, prob=0.5, ascending=[True, True]):
for colInd in sorted_column_indices:
for rowInd in range(0, frame1.nrow-1):
if (random.uniform(0.0,1.0) < prob):
if colInd == sorted_column_indices[0]:
if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])):
if ascending[colInd]:
assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else:
assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else: # for second column
if not(math.isnan(frame1[rowInd, sorted_column_indices[0]])) and not(math.isnan(frame1[rowInd+1,sorted_column_indices[0]])):
if (frame1[rowInd,sorted_column_indices[0]]==frame1[rowInd+1, sorted_column_indices[0]]): # meaningful to compare row entries then
if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])):
if ascending[colInd]:
assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
else:
assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd, frame1[rowInd,colInd],
rowInd+1, frame1[rowInd+1,colInd])
# This method is not changed to local method using as_data_frame because the frame size is too big.
def check_sorted_1_column(frame1, sorted_column_index, prob=0.5, ascending=True):
totRow = frame1.nrow * prob
skipRow = int(frame1.nrow/totRow)
for rowInd in range(0, frame1.nrow-1, skipRow):
if not (math.isnan(frame1[rowInd, sorted_column_index])) and not (
math.isnan(frame1[rowInd + 1, sorted_column_index])):
if ascending:
assert frame1[rowInd, sorted_column_index] <= frame1[
rowInd + 1, sorted_column_index], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd,
frame1[rowInd, sorted_column_index],
rowInd + 1,
frame1[rowInd + 1, sorted_column_index])
else:
assert frame1[rowInd, sorted_column_index] >= frame1[
rowInd + 1, sorted_column_index], "Wrong sort order: value at row {0}: {1}, value at " \
"row {2}: {3}".format(rowInd,
frame1[rowInd, sorted_column_index],
rowInd + 1,
frame1[rowInd + 1, sorted_column_index])
def assert_correct_frame_operation(sourceFrame, h2oResultFrame, operString):
"""
This method checks each element of a numeric H2OFrame and throw an assert error if its value does not
equal to the same operation carried out by python.
:param sourceFrame: original H2OFrame.
:param h2oResultFrame: H2OFrame after operation on original H2OFrame is carried out.
:param operString: str representing one of 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',
'ceil', 'cos', 'cosh', 'cospi', 'cumprod', 'cumsum', 'digamma', 'exp', 'expm1', 'floor', 'round',
'sin', 'sign', 'round', 'sinh', 'tan', 'tanh'
:return: None.
"""
validStrings = ['acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'ceil', 'cos', 'cosh',
'exp', 'floor', 'gamma', 'lgamma', 'log', 'log10', 'sin', 'sinh',
'sqrt', 'tan', 'tanh', 'trigamma', 'expm1']
npValidStrings = ['log2', 'sign']
nativeStrings = ['round', 'abs', 'cumsum']
multpi = ['cospi', 'sinpi', 'tanpi']
others = ['log1p', 'signif', 'trigamma', 'digamma', 'cumprod']
# check for valid operString
assert operString in validStrings+npValidStrings+nativeStrings+multpi+others, "Illegal operator " \
"{0} specified.".format(operString)
result_comp = lambda x:x # default method
if operString == "log1p":
result_comp = lambda x:math.log(x+1)
elif operString == 'signif':
result_comp = lambda x:round(x, 7)
elif operString == 'trigamma':
result_comp = lambda x:scipy.special.polygamma(1, x)
elif operString == 'digamma':
result_comp = lambda x:scipy.special.polygamma(0, x)
elif operString=='cumprod':
result_comp = lambda x:factorial(x)
# stringOperations = 'result_val = factorial(sourceFrame[row_ind, col_ind])'
elif operString in validStrings:
result_comp = lambda x:getattr(math, operString)(x)
elif operString in nativeStrings:
result_comp =lambda x:__builtins__.get(operString)(x)
stringOperations = 'result_val = '+operString+'(sourceFrame[row_ind, col_ind])'
elif operString in npValidStrings:
result_comp = lambda x:getattr(np, operString)(x)
# stringOperations = 'result_val = np.'+operString+'(sourceFrame[row_ind, col_ind])'
elif operString in multpi:
result_comp = lambda x:getattr(math, operString.split('p')[0])(x*math.pi)
#stringOperations = 'result_val = math.'+operString.split('p')[0]+'(sourceFrame[row_ind, col_ind]*math.pi)'
for col_ind in range(sourceFrame.ncols):
for row_ind in range(sourceFrame.nrows):
result_val = result_comp(sourceFrame[row_ind, col_ind])
assert abs(h2oResultFrame[row_ind, col_ind]-result_val) <= 1e-6, \
" command {0}({3}) is not working. Expected: {1}. Received: {2}".format(operString, result_val,
h2oResultFrame[row_ind, col_ind], sourceFrame[row_ind, col_ind])
def factorial(n):
"""
Defined my own factorial just in case using python2.5 or less.
:param n:
:return:
"""
if n>0 and n<2:
return 1
if n>=2:
return n*factorial(n-1)
def cumop(items, op, colInd=0): # take in one column only
res = [None]*len(items)
for index in range(len(items)):
res[index] = op(res[index-1], items[index, colInd]) if index > 0 else items[index, colInd]
return res
def compare_string_frames_local(f1, f2, prob=0.5):
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
cname1 = temp1[0]
cname2 = temp2[0]
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
for colInd in range(f1.ncol):
name1 = cname1[colInd]
for rowInd in range(1, f2.nrow):
if random.uniform(0,1) < prob:
assert temp1[rowInd][colInd]==temp2[rowInd][cname2.index(name1)], "Failed frame values check at row {2} and column {3}! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd)
def check_data_rows(f1, f2, index_list=[], num_rows=10):
'''
This method will compare the relationships of the data rows within each frames. In particular, we are
interested in the relative direction of each row vectors and the relative distances. No assertions will
be thrown.
:param f1:
:param f2:
:param index_list:
:param num_rows:
:return:
'''
temp1 = f1.as_data_frame(use_pandas=True).as_matrix()
temp2 = f2.as_data_frame(use_pandas=True).as_matrix()
if len(index_list)==0:
index_list = random.sample(range(f1.nrow), num_rows)
maxInnerProduct = 0
maxDistance = 0
for row_index in range(1, len(index_list)):
r1 = np.inner(temp1[index_list[row_index-1]], temp1[index_list[row_index]])
r2 = np.inner(temp2[index_list[row_index-1]], temp2[index_list[row_index]])
d1 = np.linalg.norm(temp1[index_list[row_index-1]]-temp1[index_list[row_index]])
d2 = np.linalg.norm(temp2[index_list[row_index-1]]-temp2[index_list[row_index]])
diff1 = min(abs(r1-r2), abs(r1-r2)/max(abs(r1), abs(r2)))
maxInnerProduct = max(maxInnerProduct, diff1)
diff2 = min(abs(d1-d2), abs(d1-d2)/max(abs(d1), abs(d2)))
maxDistance = max(maxDistance, diff2)
print("Maximum inner product different is {0}. Maximum distance difference is "
"{1}".format(maxInnerProduct, maxDistance))
def compare_data_rows(f1, f2, index_list=[], num_rows=10, tol=1e-3):
'''
This method will compare the relationships of the data rows within each frames. In particular, we are
interested in the relative direction of each row vectors and the relative distances. An assertion will be
thrown if they are different beyond a tolerance.
:param f1:
:param f2:
:param index_list:
:param num_rows:
:return:
'''
temp1 = f1.as_data_frame(use_pandas=True).as_matrix()
temp2 = f2.as_data_frame(use_pandas=True).as_matrix()
if len(index_list)==0:
index_list = random.sample(range(f1.nrow), num_rows)
maxInnerProduct = 0
maxDistance = 0
for row_index in range(1, len(index_list)):
r1 = np.inner(temp1[index_list[row_index-1]], temp1[index_list[row_index]])
r2 = np.inner(temp2[index_list[row_index-1]], temp2[index_list[row_index]])
d1 = np.linalg.norm(temp1[index_list[row_index-1]]-temp1[index_list[row_index]])
d2 = np.linalg.norm(temp2[index_list[row_index-1]]-temp2[index_list[row_index]])
diff1 = min(abs(r1-r2), abs(r1-r2)/max(abs(r1), abs(r2)))
maxInnerProduct = max(maxInnerProduct, diff1)
diff2 = min(abs(d1-d2), abs(d1-d2)/max(abs(d1), abs(d2)))
maxDistance = max(maxDistance, diff2)
assert diff1 < tol, \
"relationship between data row {0} and data row {1} are different among the two dataframes. Inner " \
"product from frame 1 is {2}. Inner product from frame 2 is {3}. The difference between the two is" \
" {4}".format(index_list[row_index-1], index_list[row_index], r1, r2, diff1)
assert diff2 < tol, \
"distance betwee data row {0} and data row {1} are different among the two dataframes. Distance " \
"between 2 rows from frame 1 is {2}. Distance between 2 rows from frame 2 is {3}. The difference" \
" between the two is {4}".format(index_list[row_index-1], index_list[row_index], d1, d2, diff2)
print("Maximum inner product different is {0}. Maximum distance difference is "
"{1}".format(maxInnerProduct, maxDistance))
def compute_frame_diff(f1, f2):
'''
This method will take the absolute difference two frames and sum across all elements
:param f1:
:param f2:
:return:
'''
frameDiff = h2o.H2OFrame.sum(h2o.H2OFrame.sum(h2o.H2OFrame.abs(f1-f2)), axis=1)[0,0]
return frameDiff
def compare_frames_local(f1, f2, prob=0.5, tol=1e-6, returnResult=False):
'''
Compare two h2o frames and make sure they are equal. However, we do not compare uuid column at this point
:param f1:
:param f2:
:param prob:
:param tol:
:param returnResult:
:return:
'''
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "Frame 1 row {0}, col {1}. Frame 2 row {2}, col {3}. They are " \
"different.".format(f1.nrow, f1.ncol, f2.nrow, f2.ncol)
typeDict = f1.types
frameNames = f1.names
for colInd in range(f1.ncol):
if (typeDict[frameNames[colInd]]==u'enum'):
if returnResult:
result = compare_frames_local_onecolumn_NA_enum(f1[colInd], f2[colInd], prob=prob, tol=tol, returnResult=returnResult)
if not(result) and returnResult:
return False
else:
result = compare_frames_local_onecolumn_NA_enum(f1[colInd], f2[colInd], prob=prob, tol=tol, returnResult=returnResult)
if not(result) and returnResult:
return False
elif (typeDict[frameNames[colInd]]==u'string'):
if returnResult:
result = compare_frames_local_onecolumn_NA_string(f1[colInd], f2[colInd], prob=prob, returnResult=returnResult)
if not(result) and returnResult:
return False
else:
compare_frames_local_onecolumn_NA_string(f1[colInd], f2[colInd], prob=prob, returnResult=returnResult)
elif (typeDict[frameNames[colInd]]==u'uuid'):
continue # do nothing here
else:
if returnResult:
result = compare_frames_local_onecolumn_NA(f1[colInd], f2[colInd], prob=prob, tol=tol, returnResult=returnResult)
if not(result) and returnResult:
return False
else:
compare_frames_local_onecolumn_NA(f1[colInd], f2[colInd], prob=prob, tol=tol, returnResult=returnResult)
if returnResult:
return True
def compare_frames_local_svm(f1, f2, prob=0.5, tol=1e-6, returnResult=False):
'''
compare f1 and f2 but with f2 parsed from svmlight parser. Here, the na's should be replaced with 0.0
:param f1: normal h2oFrame
:param f2: h2oFrame parsed from a svmlight parser.
:param prob:
:param tol:
:param returnResult:
:return:
'''
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
for rowInd in range(1, f1.nrow):
for colInd in range(f1.ncol):
if (len(temp1[rowInd][colInd]))==0: # encounter NAs
if returnResult:
if (abs(float(temp2[rowInd][colInd]))) > tol:
return False
assert (abs(float(temp2[rowInd][colInd]))) <= tol, \
"Expected: 0.0 but received: {0} for row: {1}, col: " \
"{2}".format(temp2[rowInd][colInd], rowInd, colInd)
else:
if returnResult:
if abs(float(temp1[rowInd][colInd])-float(temp2[rowInd][colInd]))>tol:
return False
assert abs(float(temp1[rowInd][colInd])-float(temp2[rowInd][colInd]))<=tol, \
"Expected: {1} but received: {0} for row: {2}, col: " \
"{3}".format(temp2[rowInd][colInd], temp1[rowInd][colInd], rowInd, colInd)
if returnResult:
return True
# frame compare with NAs in column
def compare_frames_local_onecolumn_NA(f1, f2, prob=0.5, tol=1e-6, returnResult=False, oneLessRow=False):
if (f1.types[f1.names[0]] == u'time'): # we have to divide by 1000 before converting back and forth between ms and time format
tol = 10
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
if oneLessRow:
lastF2Row = f2.nrow
else:
lastF2Row = f2.nrow+1
for colInd in range(f1.ncol):
for rowInd in range(1,lastF2Row):
if (random.uniform(0,1) < prob):
if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:
if returnResult:
if not(len(temp1[rowInd]) == len(temp2[rowInd])):
return False
else:
assert len(temp1[rowInd]) == len(temp2[rowInd]), "Failed frame values check at row {2} ! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd], temp2[rowInd], rowInd)
else:
v1 = float(temp1[rowInd][colInd])
v2 = float(temp2[rowInd][colInd])
diff = abs(v1-v2)/max(1.0, abs(v1), abs(v2))
if returnResult:
if (diff > tol):
return False
else:
assert diff<=tol, "Failed frame values check at row {2} and column {3}! frame1 value: {0}, column name: {4}. frame2 value: " \
"{1}, column name:{5}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd, f1.names[0], f2.names[0])
if returnResult:
return True
# frame compare with NAs in column
def compare_frames_local_onecolumn_NA_enum(f1, f2, prob=0.5, tol=1e-6, returnResult=False):
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
for colInd in range(f1.ncol):
for rowInd in range(1,f2.nrow+1):
if (random.uniform(0,1) < prob):
if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:
if returnResult:
if not(len(temp1[rowInd]) == len(temp2[rowInd])):
return False
else:
assert len(temp1[rowInd]) == len(temp2[rowInd]), "Failed frame values check at row {2} ! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd], temp2[rowInd], rowInd)
else:
if returnResult:
if not(temp1[rowInd][colInd]==temp2[rowInd][colInd]):
return False
else:
assert temp1[rowInd][colInd]==temp2[rowInd][colInd], "Failed frame values check at row {2} and column {3}! frame1 value: {0}, column name: {4}. frame2 value: " \
"{1}, column name:{5}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd, f1.names[0], f2.names[0])
if returnResult:
return True
# frame compare with NAs in column
def compare_frames_local_onecolumn_NA_string(f1, f2, prob=0.5, returnResult=False):
temp1 = f1.as_data_frame(use_pandas=False)
temp2 = f2.as_data_frame(use_pandas=False)
assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes."
for colInd in range(f1.ncol):
for rowInd in range(1,f2.nrow+1):
if (random.uniform(0,1) < prob):
if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:
if returnResult:
if not(len(temp1[rowInd]) == len(temp2[rowInd])):
return False
else:
assert len(temp1[rowInd]) == len(temp2[rowInd]), "Failed frame values check at row {2} ! " \
"frame1 value: {0}, frame2 value: " \
"{1}".format(temp1[rowInd], temp2[rowInd], rowInd)
else:
if returnResult:
if not(temp1[rowInd][colInd]==temp2[rowInd][colInd]):
return False
else:
assert temp1[rowInd][colInd]==temp2[rowInd][colInd], "Failed frame values check at row {2} and column {3}! frame1 value: {0}, column name: {4}. frame2 value: " \
"{1}, column name:{5}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd, f1.names[0], f2.names[0])
if returnResult:
return True
def build_save_model_generic(params, x, train, respName, algoName, tmpdir):
if algoName.lower() == "gam":
model = H2OGeneralizedAdditiveEstimator(**params)
elif algoName.lower() == "glm":
model = H2OGeneralizedLinearEstimator(**params)
elif algoName.lower() == "gbm":
model = H2OGradientBoostingEstimator(**params)
elif algoName.lower() == "drf":
model = H2ORandomForestEstimator(**params)
else:
raise Exception("build_save_model does not support algo "+algoName+". Please add this to build_save_model.")
model.train(x=x, y=respName, training_frame=train)
model.download_mojo(path=tmpdir)
return model
# generate random dataset, copied from Pasha
def random_dataset(response_type, verbose=True, ncol_upper=25000, ncol_lower=15000, NTESTROWS=200, missing_fraction=0.0, seed=None):
"""Create and return a random dataset."""
if verbose:
print("\nCreating a dataset for a %s problem:" % response_type)
random.seed(seed)
fractions = {k + "_fraction": random.random() for k in "real categorical integer time string binary".split()}
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] /= 3
fractions["time_fraction"] /= 2
sum_fractions = sum(fractions.values())
for k in fractions:
fractions[k] /= sum_fractions
if response_type == 'binomial':
response_factors = 2
elif response_type == 'gaussian':
response_factors = 1
else:
response_factors = random.randint(3, 10)
df = h2o.create_frame(rows=random.randint(ncol_lower, ncol_upper) + NTESTROWS, cols=random.randint(3, 20),
missing_fraction=missing_fraction,
has_response=True, response_factors=response_factors, positive_response=True, factors=10,
seed=seed, **fractions)
if verbose:
print()
df.show()
return df
# generate random dataset of ncolumns of Strings, copied from Pasha
def random_dataset_strings_only(nrow, ncol, seed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 0
fractions["integer_fraction"] = 0
fractions["time_fraction"] = 0
fractions["string_fraction"] = 1 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
return h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=0, has_response=False, seed=seed, **fractions)
def random_dataset_all_types(nrow, ncol, seed=None):
fractions=dict()
fractions['real_fraction']=0.16,
fractions['categorical_fraction']=0.16,
fractions['integer_fraction']=0.16,
fractions['binary_fraction']=0.16,
fractions['time_fraction']=0.16,
fractions['string_fraction']=0.2
return h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=0.1, has_response=False, seed=seed)
# generate random dataset of ncolumns of enums only, copied from Pasha
def random_dataset_enums_only(nrow, ncol, factorL=10, misFrac=0.01, randSeed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 1
fractions["integer_fraction"] = 0
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, factors=factorL,
seed=randSeed, **fractions)
return df
# generate random dataset of ncolumns of enums only, copied from Pasha
def random_dataset_int_only(nrow, ncol, rangeR=10, misFrac=0.01, randSeed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 0
fractions["integer_fraction"] = 1
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, integer_range=rangeR,
seed=randSeed, **fractions)
return df
# generate random dataset of ncolumns of integer and reals, copied from Pasha
def random_dataset_numeric_only(nrow, ncol, integerR=100, misFrac=0.01, randSeed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 0.25 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 0
fractions["integer_fraction"] = 0.75
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, integer_range=integerR,
seed=randSeed, **fractions)
return df
# generate random dataset of ncolumns of integer and reals, copied from Pasha
def random_dataset_real_only(nrow, ncol, realR=100, misFrac=0.01, randSeed=None):
"""Create and return a random dataset."""
fractions = dict()
fractions["real_fraction"] = 1 # Right now we are dropping string columns, so no point in having them.
fractions["categorical_fraction"] = 0
fractions["integer_fraction"] = 0
fractions["time_fraction"] = 0
fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them.
fractions["binary_fraction"] = 0
df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, integer_range=realR,
seed=randSeed, **fractions)
return df
def getMojoName(modelID):
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
return regex.sub("_", modelID)
def convertH2OFrameToDMatrix(h2oFrame, yresp, enumCols=[]):
"""
This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The
H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
:param h2oFrame: H2OFrame to be converted to DMatrix
:param yresp: string denoting the response column name
:param enumCols: list of enum column names in the H2OFrame
:return: DMatrix
"""
import xgboost as xgb
pandas = __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols);
return xgb.DMatrix(data=pandas[0], label=pandas[1])
def convertH2OFrameToDMatrixSparse(h2oFrame, yresp, enumCols=[]):
"""
This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The
H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
:param h2oFrame: H2OFrame to be converted to DMatrix
:param yresp: string denoting the response column name
:param enumCols: list of enum column names in the H2OFrame
:return: DMatrix
"""
import xgboost as xgb
pandas = __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols);
return xgb.DMatrix(data=csr_matrix(pandas[0]), label=pandas[1])
def __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols=[]):
"""
This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The
H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)
column. There can be NAs in any columns.
:param h2oFrame: H2OFrame to be converted to DMatrix
:param yresp: string denoting the response column name
:param enumCols: list of enum column names in the H2OFrame
:return: DMatrix
"""
import xgboost as xgb
pandaFtrain = h2oFrame.as_data_frame(use_pandas=True, header=True)
nrows = h2oFrame.nrow
if len(enumCols) > 0: # start with first enum column
pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows)
pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)
for colInd in range(1, len(enumCols)):
cname=enumCols[colInd]
ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows)
pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)
pandaFtrain.drop([cname], axis=1, inplace=True)
pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)
c0= h2oFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)
pandaFtrain.drop([yresp], axis=1, inplace=True)
pandaF = pd.concat([c0, pandaFtrain], axis=1)
pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)
newX = list(pandaFtrain.columns.values)
data = pandaF.as_matrix(newX)
label = pandaF.as_matrix([yresp])
return (data,label)
def generatePandaEnumCols(pandaFtrain, cname, nrows):
"""
For a H2O Enum column, we perform one-hot-encoding here and added one more column "missing(NA)" to it.
:param pandaFtrain:
:param cname:
:param nrows:
:return:
"""
cmissingNames=[cname+".missing(NA)"]
tempnp = np.zeros((nrows,1), dtype=np.int)
# check for nan and assign it correct value
colVals = pandaFtrain[cname]
for ind in range(nrows):
try:
float(colVals[ind])
if math.isnan(colVals[ind]):
tempnp[ind]=1
except ValueError:
pass
zeroFrame = pd.DataFrame(tempnp)
zeroFrame.columns=cmissingNames
temp = pd.get_dummies(pandaFtrain[cname], prefix=cname, drop_first=False)
tempNames = list(temp) # get column names
colLength = len(tempNames)
newNames = ['a']*colLength
newIndics = [0]*colLength
if "." in tempNames[0]:
header = tempNames[0].split('.')[0]
for ind in range(colLength):
newIndics[ind] = int(tempNames[ind].split('.')[1][1:])
newIndics.sort()
for ind in range(colLength):
newNames[ind] = header+'.l'+str(newIndics[ind]) # generate correct order of names
ftemp = temp[newNames]
else:
ftemp = temp
ctemp = pd.concat([ftemp, zeroFrame], axis=1)
return ctemp
def summarizeResult_binomial(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,
nativeScoreTime, tolerance=1e-6):
'''
This method will summarize and compare H2OXGBoost and native XGBoost results for binomial classifiers.
This method will summarize and compare H2OXGBoost and native XGBoost results for binomial classifiers.
:param h2oPredictD:
:param nativePred:
:param h2oTrainTimeD:
:param nativeTrainTime:
:param h2oPredictTimeD:
:param nativeScoreTime:
:return:
'''
# Result comparison in terms of time
print("H2OXGBoost train time is {0}s. Native XGBoost train time is {1}s.\n H2OXGBoost scoring time is {2}s."
" Native XGBoost scoring time is {3}s.".format(h2oTrainTimeD/1000.0, nativeTrainTime,
h2oPredictTimeD, nativeScoreTime))
# Result comparison in terms of actual prediction value between the two
colnames = h2oPredictD.names
h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()
h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)
# compare prediction probability and they should agree if they use the same seed
for ind in range(h2oPredictD.nrow):
assert abs(h2oPredictLocalD[colnames[2]][ind]-nativePred[ind])<tolerance, "H2O prediction prob: {0} and native " \
"XGBoost prediction prob: {1}. They are " \
"very different.".format(h2oPredictLocalD[colnames[2]][ind], nativePred[ind])
def summarize_metrics_binomial(h2o_metrics, xgboost_metrics, names, tolerance=1e-4):
for i in range(len(h2o_metrics)):
difference = abs(h2o_metrics[i] - xgboost_metrics[i])
print("H2O {0} metric: {1} and native " \
"XGBoost {0} metric: {2}. " \
"Difference is {3}".format(names[i], h2o_metrics[i], xgboost_metrics[i], difference))
assert difference < tolerance, "H2O {0} metric: {1} and native " \
"XGBoost {0} metric: {2}. They are " \
"very different.".format(names[i], h2o_metrics[i], xgboost_metrics[i])
def summarizeResult_multinomial(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,
nativeScoreTime, tolerance=1e-6):
# Result comparison in terms of time
print("H2OXGBoost train time is {0}s. Native XGBoost train time is {1}s.\n H2OGBoost scoring time is {2}s."
" Native XGBoost scoring time is {3}s.".format(h2oTrainTimeD/1000.0, nativeTrainTime,
h2oPredictTimeD, nativeScoreTime))
# Result comparison in terms of actual prediction value between the two
h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()
h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)
nclass = len(nativePred[0])
colnames = h2oPredictD.names
# compare prediction probability and they should agree if they use the same seed
for ind in range(h2oPredictD.nrow):
for col in range(nclass):
assert abs(h2oPredictLocalD[colnames[col+1]][ind]-nativePred[ind][col])<tolerance, \
"H2O prediction prob: {0} and native XGBoost prediction prob: {1}. They are very " \
"different.".format(h2oPredictLocalD[colnames[col+1]][ind], nativePred[ind][col])
def genTrainFrame(nrow, ncol, enumCols=0, enumFactors=2, responseLevel=2, miscfrac=0, randseed=None):
if ncol>0:
trainFrameNumerics = random_dataset_numeric_only(nrow, ncol, integerR = 1000000, misFrac=miscfrac, randSeed=randseed)
if enumCols > 0:
trainFrameEnums = random_dataset_enums_only(nrow, enumCols, factorL=enumFactors, misFrac=miscfrac, randSeed=randseed)
yresponse = random_dataset_enums_only(nrow, 1, factorL=responseLevel, misFrac=0, randSeed=randseed)
yresponse.set_name(0,'response')
if enumCols > 0:
if ncol > 0: # mixed datasets
trainFrame = trainFrameEnums.cbind(trainFrameNumerics.cbind(yresponse))
else: # contains enum datasets
trainFrame = trainFrameEnums.cbind(yresponse)
else: # contains numerical datasets
trainFrame = trainFrameNumerics.cbind(yresponse)
return trainFrame
def check_xgb_var_imp(h2o_train, h2o_model, xgb_train, xgb_model, tolerance=1e-6):
column_map = dict(zip(h2o_train.names, xgb_train.feature_names))
h2o_var_imps = h2o_model.varimp()
h2o_var_frequencies = h2o_model._model_json["output"]["variable_importances_frequency"].cell_values
freq_map = dict(map(lambda t: (t[0], t[1]), h2o_var_frequencies))
# XGBoost reports average gain of a split
xgb_var_imps = xgb_model.get_score(importance_type="gain")
for h2o_var_imp in h2o_var_imps:
frequency = freq_map[h2o_var_imp[0]]
xgb_var_imp = xgb_var_imps[column_map[h2o_var_imp[0]]]
abs_diff = abs(h2o_var_imp[1]/frequency - xgb_var_imp)
norm = max(1, abs(h2o_var_imp[1]/frequency), abs(xgb_var_imp))
assert abs_diff/norm < tolerance, "Variable importance of feature {0} is different. H2O: {1}, XGB {2}"\
.format(h2o_var_imp[0], h2o_var_imp[1], xgb_var_imp)
def summarizeResult_regression(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD, nativeScoreTime, tolerance=1e-6):
# Result comparison in terms of time
print("H2OXGBoost train time is {0}ms. Native XGBoost train time is {1}s.\n H2OGBoost scoring time is {2}s."
" Native XGBoost scoring time is {3}s.".format(h2oTrainTimeD, nativeTrainTime,
h2oPredictTimeD, nativeScoreTime))
# Result comparison in terms of actual prediction value between the two
h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()
h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)
# compare prediction probability and they should agree if they use the same seed
for ind in range(h2oPredictD.nrow):
assert abs((h2oPredictLocalD['predict'][ind]-nativePred[ind])/max(1, abs(h2oPredictLocalD['predict'][ind]), abs(nativePred[ind])))<tolerance, \
"H2O prediction: {0} and native XGBoost prediction: {1}. They are very " \
"different.".format(h2oPredictLocalD['predict'][ind], nativePred[ind])
def summarizeResult_binomial_DS(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,
nativeScoreTime, h2oPredictS, tolerance=1e-6):
# Result comparison in terms of time
print("H2OXGBoost train time with sparse DMatrix is {0}s. Native XGBoost train time with dense DMtraix is {1}s.\n H2OGBoost scoring time is {2}s."
" Native XGBoost scoring time with dense DMatrix is {3}s.".format(h2oTrainTimeD/1000.0, nativeTrainTime,
h2oPredictTimeD, nativeScoreTime))
# Result comparison in terms of actual prediction value between the two
h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()
h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)
h2oPredictS['predict'] = h2oPredictS['predict'].asnumeric()
h2oPredictLocalS = h2oPredictS.as_data_frame(use_pandas=True, header=True)
# compare prediction probability and they should agree if they use the same seed
for ind in range(h2oPredictD.nrow):
assert abs(h2oPredictLocalD['c0.l1'][ind]-nativePred[ind])<tolerance or \
abs(h2oPredictLocalS['c0.l1'][ind]-nativePred[ind])<tolerance, \
"H2O prediction prob: {0} and native XGBoost prediction prob: {1}. They are very " \
"different.".format(h2oPredictLocalD['c0.l1'][ind], nativePred[ind])
def compare_weightedStats(model, dataframe, xlist, xname, weightV, pdpTDTable, tol=1e-6):
'''
This method is used to test the partial dependency plots and is not meant for any other functions.
:param model:
:param dataframe:
:param xlist:
:param xname:
:param weightV:
:param pdpTDTable:
:param tol:
:return:
'''
weightStat = manual_partial_dependence(model, dataframe, xlist, xname, weightV) # calculate theoretical weighted sts
wMean = extract_col_value_H2OTwoDimTable(pdpTDTable, "mean_response") # stats for age predictor
wStd = extract_col_value_H2OTwoDimTable(pdpTDTable, "stddev_response")
wStdErr = extract_col_value_H2OTwoDimTable(pdpTDTable, "std_error_mean_response")
equal_two_arrays(weightStat[0], wMean, tol, tol, throw_error=True)
equal_two_arrays(weightStat[1], wStd, tol, tol, throw_error=True)
equal_two_arrays(weightStat[2], wStdErr, tol, tol, throw_error=True)
def manual_partial_dependence(model, dataframe, xlist, xname, weightV):
meanV = []
stdV = []
stderrV = []
nRows = dataframe.nrow
nCols = dataframe.ncol-1
for xval in xlist:
cons = [xval]*nRows
if xname in dataframe.names:
dataframe=dataframe.drop(xname)
if not((is_type(xval, str) and xval=='NA') or (isinstance(xval, float) and math.isnan(xval))):
dataframe = dataframe.cbind(h2o.H2OFrame(cons))
dataframe.set_name(nCols, xname)
pred = model.predict(dataframe).as_data_frame(use_pandas=False, header=False)
pIndex = len(pred[0])-1
sumEle = 0.0
sumEleSq = 0.0
sumWeight = 0.0
numNonZeroWeightCount = 0.0
m = 1.0/math.sqrt(dataframe.nrow*1.0)
for rindex in range(len(pred)):
val = float(pred[rindex][pIndex]);
weight = float(weightV[rindex][0])
if (abs(weight) > 0) and isinstance(val, float) and not(math.isnan(val)):
temp = val*weight
sumEle = sumEle+temp
sumEleSq = sumEleSq+temp*val
sumWeight = sumWeight+weight
numNonZeroWeightCount = numNonZeroWeightCount+1
wMean = sumEle/sumWeight
scale = numNonZeroWeightCount*1.0/(numNonZeroWeightCount-1)
wSTD = math.sqrt((sumEleSq/sumWeight-wMean*wMean)*scale)
meanV.append(wMean)
stdV.append(wSTD)
stderrV.append(wSTD*m)
return meanV, stdV, stderrV
def compare_frames_equal_names(frame1, frame2):
'''
This method will compare two frames with same column names and column types. The current accepted column
types are enum, int and string.
:param frame1:
:param frame2:
:return:
'''
cnames = frame1.names
ctypes = frame1.types
for cind in range(0, frame1.ncol):
name1 = cnames[cind]
type = str(ctypes[name1])
if (type=="enum"):
compare_frames_local_onecolumn_NA_enum(frame1[name1], frame2[name1], prob=1, tol=0)
elif (type=='string'):
compare_frames_local_onecolumn_NA_string(frame1[name1], frame2[name1], prob=1)
else:
compare_frames_local_onecolumn_NA(frame1[name1], frame2[name1], prob=1, tol=1e-10)
def write_H2OFrame_2_SVMLight(filename, h2oFrame):
'''
The function will write a h2oFrame into svmlight format and save it to a file. However, it only supports
column types of real/integer and nothing else
:param filename:
:param h2oFrame:
:return:
'''
fwriteFile = open(filename, 'w')
ncol = h2oFrame.ncol
nrow = h2oFrame.nrow
fdataframe = h2oFrame.as_data_frame(use_pandas=False)
for rowindex in range(1, nrow+1):
if len(fdataframe[rowindex][0])==0: # special treatment for response column
writeWords = "" # convert na response to 0.0
else:
writeWords = fdataframe[rowindex][0]
for colindex in range(1, ncol):
if not(len(fdataframe[rowindex][colindex])==0):
writeWords = writeWords + " "+str(colindex) + ":"+fdataframe[rowindex][colindex]
fwriteFile.write(writeWords)
fwriteFile.write('\n')
fwriteFile.close()
def write_H2OFrame_2_ARFF(filenameWithPath, filename, h2oFrame, uuidVecs, uuidNames):
'''
This function will write a H2OFrame into arff format and save it to a text file in ARFF format.
:param filename:
:param h2oFrame:
:return:
'''
fwriteFile = open(filenameWithPath, 'w')
nrow = h2oFrame.nrow
# write the arff headers here
writeWords = "@RELATION "+filename+'\n\n'
fwriteFile.write(writeWords)
typesDict = h2oFrame.types
colnames = h2oFrame.names
uuidtypes = len(uuidNames)*["UUID"]
for cname in colnames:
writeWords = "@ATTRIBUTE "+cname
if typesDict[cname]==u'int':
writeWords = writeWords + " integer"
elif typesDict[cname]==u'time':
writeWords = writeWords + " date"
else:
writeWords = writeWords + " "+typesDict[cname]
fwriteFile.write(writeWords)
fwriteFile.write('\n')
for cindex in range(len(uuidNames)):
writeWords = "@ATTRIBUTE " +uuidNames[cindex]+" uuid"
fwriteFile.write(writeWords)
fwriteFile.write('\n')
fwriteFile.write("\n@DATA\n")
# write the arff body as csv
fdataframe = h2oFrame.as_data_frame(use_pandas=False)
for rowindex in range(1,nrow+1):
writeWords = ""
for cindex in range(h2oFrame.ncol):
if len(fdataframe[rowindex][cindex])>0:
if typesDict[colnames[cindex]]==u'time':
writeWords = writeWords+\
str(datetime.datetime.fromtimestamp(float(fdataframe[rowindex][cindex])/1000.0))+","
elif typesDict[colnames[cindex]] in [u'enum', u'string']:
writeWords=writeWords+fdataframe[rowindex][cindex]+","
else:
writeWords=writeWords+fdataframe[rowindex][cindex]+","
else:
writeWords = writeWords + ","
# process the uuid ones
for cindex in range(len(uuidVecs)-1):
writeWords=writeWords+str(uuidVecs[cindex][rowindex-1])+","
writeWords=writeWords+str(uuidVecs[-1][rowindex-1])+'\n'
fwriteFile.write(writeWords)
fwriteFile.close()
def checkCorrectSkips(originalFullFrame, csvfile, skipped_columns):
skippedFrameUF = h2o.upload_file(csvfile, skipped_columns=skipped_columns)
skippedFrameIF = h2o.import_file(csvfile, skipped_columns=skipped_columns) # this two frames should be the same
compare_frames_local(skippedFrameUF, skippedFrameIF, prob=0.5)
skipCounter = 0
typeDict = originalFullFrame.types
frameNames = originalFullFrame.names
for cindex in range(len(frameNames)):
if cindex not in skipped_columns:
print("Checking column {0}...".format(cindex))
if typeDict[frameNames[cindex]] == u'enum':
compare_frames_local_onecolumn_NA_enum(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1, tol=1e-10,
returnResult=False)
elif typeDict[frameNames[cindex]] == u'string':
compare_frames_local_onecolumn_NA_string(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1,
returnResult=False)
else:
compare_frames_local_onecolumn_NA(originalFullFrame[cindex], skippedFrameIF[skipCounter],
prob=1, tol=1e-10, returnResult=False)
skipCounter = skipCounter + 1
def checkCorrectSkipsFolder(originalFullFrame, csvfile, skipped_columns):
skippedFrameIF = h2o.import_file(csvfile, skipped_columns=skipped_columns) # this two frames should be the same
skipCounter = 0
typeDict = originalFullFrame.types
frameNames = originalFullFrame.names
for cindex in range(len(frameNames)):
if cindex not in skipped_columns:
print("Checking column {0}...".format(cindex))
if typeDict[frameNames[cindex]] == u'enum':
compare_frames_local_onecolumn_NA_enum(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1, tol=1e-10,
returnResult=False)
elif typeDict[frameNames[cindex]] == u'string':
compare_frames_local_onecolumn_NA_string(originalFullFrame[cindex],
skippedFrameIF[skipCounter], prob=1,
returnResult=False)
else:
compare_frames_local_onecolumn_NA(originalFullFrame[cindex], skippedFrameIF[skipCounter],
prob=1, tol=1e-10, returnResult=False)
skipCounter = skipCounter + 1
def assertModelColNamesTypesCorrect(modelNames, modelTypes, frameNames, frameTypesDict):
fName = list(frameNames)
mName = list(modelNames)
assert fName.sort() == mName.sort(), "Expected column names {0}, actual column names {1} and they" \
" are different".format(frameNames, modelNames)
for ind in range(len(frameNames)):
if modelTypes[modelNames.index(frameNames[ind])].lower()=="numeric":
assert (frameTypesDict[frameNames[ind]].lower()=='real') or \
(frameTypesDict[frameNames[ind]].lower()=='int'), \
"Expected training data types for column {0} is {1}. Actual training data types for column {2} from " \
"model output is {3}".format(frameNames[ind], frameTypesDict[frameNames[ind]],
frameNames[ind], modelTypes[modelNames.index(frameNames[ind])])
else:
assert modelTypes[modelNames.index(frameNames[ind])].lower()==frameTypesDict[frameNames[ind]].lower(), \
"Expected training data types for column {0} is {1}. Actual training data types for column {2} from " \
"model output is {3}".format(frameNames[ind], frameTypesDict[frameNames[ind]],
frameNames[ind], modelTypes[modelNames.index(frameNames[ind])])
def saveModelMojo(model):
'''
Given a H2O model, this function will save it in a directory off the results directory. In addition, it will
return the absolute path of where the mojo file is.
:param model:
:return:
'''
# save model
regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]")
MOJONAME = regex.sub("_", model._id)
print("Downloading Java prediction model code from H2O")
tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath('__file__')), "..", "results", MOJONAME))
os.makedirs(tmpdir)
model.download_mojo(path=tmpdir) # save mojo
return tmpdir
# This file will contain functions used by GLM test only.
def assertEqualRegPaths(keys, pathList, index, onePath, tol=1e-6):
for oneKey in keys:
if (pathList[oneKey] != None):
assert abs(pathList[oneKey][index]-onePath[oneKey][0]) < tol, \
"Expected value: {0}, Actual: {1}".format(pathList[oneKey][index], onePath[oneKey][0])
def assertEqualCoeffDicts(coef1Dict, coef2Dict, tol = 1e-6):
assert len(coef1Dict) == len(coef2Dict), "Length of first coefficient dict: {0}, length of second coefficient " \
"dict: {1} and they are different.".format(len(coef1Dict, len(coef2Dict)))
for key in coef1Dict:
val1 = coef1Dict[key]
val2 = coef2Dict[key]
if (math.isnan(val1)):
assert math.isnan(val2), "Coefficient for {0} from first dict: {1}, from second dict: {2} are different." \
"".format(key, coef1Dict[key], coef2Dict[key])
elif (math.isinf(val1)):
assert math.isinf(val2), "Coefficient for {0} from first dict: {1}, from second dict: {2} are different." \
"".format(key, coef1Dict[key], coef2Dict[key])
else:
assert abs(coef1Dict[key] - coef2Dict[key]) < tol, "Coefficient for {0} from first dict: {1}, from second" \
" dict: {2} and they are different.".format(key,
coef1Dict[
key],
coef2Dict[
key])
def assertEqualModelMetrics(metrics1, metrics2, tol = 1e-6,
keySet=["MSE", "AUC", "Gini", "null_deviance", "logloss", "RMSE",
"pr_auc", "r2"]):
# 1. Check model types
model1_type = metrics1.__class__.__name__
model2_type = metrics2.__class__.__name__
assert model1_type is model2_type, "The model types differ. The first model metric is of type {0} and the second " \
"model metric is of type {1}.".format(model1_type, model2_type)
metricDict1 = metrics1._metric_json
metricDict2 = metrics2._metric_json
for key in keySet:
if key in metricDict1.keys() and (isinstance(metricDict1[key], float)): # only compare floating point metrics
assert abs(metricDict1[key]-metricDict2[key])/max(1,max(metricDict1[key],metricDict2[key])) < tol, \
"ModelMetric {0} from model 1, {1} from model 2 are different.".format(metricDict1[key],metricDict2[key])
# When an array of alpha and/or lambdas are given, a list of submodels are also built. For each submodel built, only
# the coefficients, lambda/alpha/deviance values are returned. The model metrics is calculated from the submodel
# with the best deviance.
#
# In this test, in addition, we build separate models using just one lambda and one alpha values as when building one
# submodel. In theory, the coefficients obtained from the separate models should equal to the submodels. We check
# and compare the followings:
# 1. coefficients from submodels and individual model should match when they are using the same alpha/lambda value;
# 2. training metrics from alpha array should equal to the individual model matching the alpha/lambda value;
def compareSubmodelsNindividualModels(modelWithArray, trainingData, xarray, yindex):
best_submodel_index = modelWithArray._model_json["output"]["best_submodel_index"]
r = H2OGeneralizedLinearEstimator.getGLMRegularizationPath(modelWithArray) # contains all lambda/alpha values of submodels trained.
submodel_num = len(r["lambdas"])
regKeys = ["alphas", "lambdas", "explained_deviance_valid", "explained_deviance_train"]
for submodIndx in range(submodel_num): # manually build glm model and compare to those built before
modelGLM = H2OGeneralizedLinearEstimator(family='binomial', alpha=[r["alphas"][submodIndx]], Lambda=[r["lambdas"][submodIndx]])
modelGLM.train(training_frame=trainingData, x=xarray, y=yindex)
# check coefficients between submodels and model trained with same parameters
assertEqualCoeffDicts(r["coefficients"][submodIndx], modelGLM.coef())
modelGLMr = H2OGeneralizedLinearEstimator.getGLMRegularizationPath(modelGLM) # contains one item only
assertEqualRegPaths(regKeys, r, submodIndx, modelGLMr)
if (best_submodel_index == submodIndx): # check training metrics of modelGLM should equal that of m since it is the best subModel
assertEqualModelMetrics(modelWithArray._model_json["output"]["training_metrics"],
modelGLM._model_json["output"]["training_metrics"])
assertEqualCoeffDicts(modelWithArray.coef(), modelGLM.coef()) # model coefficient should come from best submodel
else: # check and make sure best_submodel_index has lowest deviance
assert modelGLM.residual_deviance() - modelWithArray.residual_deviance() >= 0, \
"Individual model has better residual_deviance than best submodel!"
def extractNextCoeff(cs_norm, orderedCoeffNames, startVal):
for ind in range(0, len(startVal)):
startVal[ind] = cs_norm[orderedCoeffNames[ind]]
return startVal
def assertEqualScoringHistoryIteration(model_long, model_short, col_list_compare, tolerance=1e-6):
scoring_history_long = model_long._model_json["output"]["scoring_history"]
scoring_history_short = model_short._model_json["output"]["scoring_history"]
cv_4th_len = len(scoring_history_short.cell_values) - 1 # ignore last iteration, scoring is performed at different spots
cv_len = len(scoring_history_long.cell_values)
col_2D = scoring_history_short.col_header
iterInd = col_2D.index('iterations')
count = 0
for index in range(cv_4th_len):
iterInd4th = scoring_history_short.cell_values[index][iterInd]
iterIndlong = scoring_history_long.cell_values[count][iterInd]
while not(iterInd4th == None) and (iterInd4th > iterIndlong):
count = count+1
if count >= cv_len:
break
iterIndlong = scoring_history_long.cell_values[count][iterInd]
if not(iterInd4th == None) and not(iterInd4th == '') and (iterInd4th == iterIndlong):
for col_header in col_list_compare:
ind = col_2D.index(col_header)
val_short = scoring_history_short.cell_values[index][ind]
val_long = scoring_history_long.cell_values[count][ind]
if not(val_short == '' or math.isnan(val_short) or val_long == '' or math.isnan(val_long)):
assert abs(scoring_history_short.cell_values[index][ind]-
scoring_history_long.cell_values[count][ind]) < tolerance, \
"{0} expected: {1}, actual: {2}".format(col_header, scoring_history_short.cell_values[index][ind],
scoring_history_long.cell_values[count][ind])
count = count+1
def assertCoefEqual(regCoeff, coeff, coeffClassSet, tol=1e-6):
for key in regCoeff:
temp = key.split('_')
classInd = int(temp[1])
val1 = regCoeff[key]
val2 = coeff[coeffClassSet[classInd]][temp[0]]
assert type(val1)==type(val2), "type of coeff1: {0}, type of coeff2: {1}".format(type(val1), type(val2))
diff = abs(val1-val2)
print("val1: {0}, val2: {1}, tol: {2}".format(val1, val2, tol))
assert diff < tol, "diff {0} exceeds tolerance {1}.".format(diff, tol)
def assertCoefDictEqual(regCoeff, coeff, tol=1e-6):
for key in regCoeff:
val1 = regCoeff[key]
val2 = coeff[key]
assert type(val1)==type(val2), "type of coeff1: {0}, type of coeff2: {1}".format(type(val1), type(val2))
diff = abs(val1-val2)
assert diff < tol, "diff {0} exceeds tolerance {1}.".format(diff, tol)
def assert_equals(expected, actual, message=""):
assert expected == actual, ("{0}\nexpected:{1}\nactual\t:{2}".format(message, expected, actual))
| 51.900986 | 194 | 0.659487 |
fd2736302d493bfef3216488385e0129dc44b446 | 3,669 | py | Python | kuwala/pipelines/google-poi/src/pipeline/search_string_generator.py | arifluthfi16/kuwala | 12fda181195acd06d369e05a976e7c9b917af0fd | [
"Apache-2.0"
] | null | null | null | kuwala/pipelines/google-poi/src/pipeline/search_string_generator.py | arifluthfi16/kuwala | 12fda181195acd06d369e05a976e7c9b917af0fd | [
"Apache-2.0"
] | 4 | 2021-11-30T15:44:58.000Z | 2021-12-20T10:40:46.000Z | kuwala/pipelines/google-poi/src/pipeline/search_string_generator.py | arifluthfi16/kuwala | 12fda181195acd06d369e05a976e7c9b917af0fd | [
"Apache-2.0"
] | 1 | 2022-02-09T09:18:45.000Z | 2022-02-09T09:18:45.000Z | import json
import moment
import os
from geojson import Polygon
from python_utils.src.h3_utils import polyfill_polygon
from pyspark.sql import SparkSession
from pyspark.sql.functions import array, array_contains, col, concat_ws, lit, udf
from pyspark.sql.types import StringType
from python_utils.src.spark_udfs import h3_to_parent
def generate_search_strings(continent, country, country_region, polygon_coords=None, polygon_res=None,
limit=None):
memory = os.getenv('SPARK_MEMORY') or '16g'
spark = SparkSession.builder.appName('google-poi').config('spark.driver.memory', memory).getOrCreate()
script_dir = os.path.dirname(__file__)
file_path = os.path.join(script_dir, f'../../../../tmp/kuwala/osm_files/{continent}/{country}'
f'{f"/{country_region}" if country_region else ""}/parquet/kuwala.parquet')
df = spark.read.parquet(file_path)
if polygon_coords:
polygon_coords = json.loads(polygon_coords)
polygon = Polygon(polygon_coords)
polygon_resolution = 9
if polygon_res:
polygon_resolution = int(polygon_res)
polygon_cells = polyfill_polygon(polygon, resolution=polygon_resolution)
df = df.withColumn('h3_polygon', h3_to_parent(col('h3_index'), lit(polygon_resolution)))
df = df.filter(df.h3_polygon.isin(polygon_cells))
df = df \
.filter(
df.address_street.isNotNull() |
df.address_house_nr.isNotNull() |
df.address_zip_code.isNotNull() |
df.address_city.isNotNull() |
df.address_full.isNotNull()
) \
.select('osm_id', 'osm_type', 'name', 'h3_index', 'address_street', 'address_house_nr', 'address_zip_code',
'address_city', 'address_full', 'categories')
@udf(returnType=StringType())
def concat_search_strings(strings):
strings = list(filter(lambda s: s, strings))
return ', '.join(strings)
with_public_transport = df \
.filter(array_contains('categories', 'public_transportation')) \
.withColumn('station', concat_ws(' ', col('name'), lit('station'))) \
.withColumn(
'query',
concat_search_strings(array(col('station'), concat_ws(' ', col('address_street'), col('address_house_nr')),
col('address_zip_code'), col('address_city')))
) \
.select('osm_id', 'osm_type', 'h3_index', 'name', 'query')
with_address = df \
.filter(~array_contains('categories', 'public_transportation') & col('address_full').isNull()) \
.withColumn(
'query',
concat_search_strings(array(col('name'), concat_ws(' ', col('address_street'), col('address_house_nr')),
col('address_zip_code'), col('address_city')))
) \
.select('osm_id', 'osm_type', 'h3_index', 'name', 'query')
with_address_full = df \
.filter(~array_contains('categories', 'public_transportation') & col('address_full').isNotNull()) \
.withColumn('query', concat_search_strings(array(col('name'), col('address_full')))) \
.select('osm_id', 'osm_type', 'h3_index', 'name', 'query')
union = with_public_transport.union(with_address).union(with_address_full)
if limit is not None:
union = union.limit(limit)
union.write.parquet(f'../../../../tmp/kuwala/google_files/{continent}/{country}'
f'{f"/{country_region}" if country_region else ""}/search_strings/osm_search_strings_'
f'{moment.now().format("YYYY-MM-DDTHH-mm-ss")}.parquet')
| 45.8625 | 119 | 0.634233 |
fe4ae19dea405007a8472683506ce9e7c15c791a | 5,764 | py | Python | plugins/teedoc-plugin-jupyter-notebook-parser/teedoc_plugin_jupyter_notebook_parser/__init__.py | tyhdefu/teedoc | 4a4b4f29e4db73a1d03365ce68b7594917f13fc6 | [
"MIT"
] | null | null | null | plugins/teedoc-plugin-jupyter-notebook-parser/teedoc_plugin_jupyter_notebook_parser/__init__.py | tyhdefu/teedoc | 4a4b4f29e4db73a1d03365ce68b7594917f13fc6 | [
"MIT"
] | null | null | null | plugins/teedoc-plugin-jupyter-notebook-parser/teedoc_plugin_jupyter_notebook_parser/__init__.py | tyhdefu/teedoc | 4a4b4f29e4db73a1d03365ce68b7594917f13fc6 | [
"MIT"
] | null | null | null | import os, sys
import re
from collections import OrderedDict
from datetime import datetime
try:
curr_path = os.path.dirname(os.path.abspath(__file__))
teedoc_project_path = os.path.abspath(os.path.join(curr_path, "..", "..", ".."))
if os.path.basename(teedoc_project_path) == "teedoc":
sys.path.insert(0, teedoc_project_path)
except Exception:
pass
from teedoc import Plugin_Base
from teedoc import Fake_Logger
try:
from .jupyter_convert import convert_ipynb_to_html
except Exception:
from jupyter_convert import convert_ipynb_to_html
__version__ = "1.2.1"
class Plugin(Plugin_Base):
name = "teedoc-plugin-jupyter-notebook-parser"
desc = "jupyter notebook parser plugin for teedoc"
defautl_config = {
"parse_files": ["ipynb"]
}
def on_init(self, config, doc_src_path, site_config, logger = None, multiprocess = True, **kw_args):
'''
@config a dict object
@logger teedoc.logger.Logger object
'''
self.logger = Fake_Logger() if not logger else logger
self.doc_src_path = doc_src_path
self.site_config = site_config
self.config = Plugin.defautl_config
self.config.update(config)
self.logger.i("-- plugin <{}> init".format(self.name))
self.logger.i("-- plugin <{}> config: {}".format(self.name, self.config))
def on_parse_files(self, files):
# result, format must be this
result = {
"ok": False,
"msg": "",
"htmls": OrderedDict()
}
# function parse md file is disabled
if not "ipynb" in self.config["parse_files"]:
result["msg"] = "disabled notebook parse, but only support notebook"
return result
self.logger.d("-- plugin <{}> parse {} files".format(self.name, len(files)))
# self.logger.d("files: {}".format(files))
for file in files:
name = os.path.basename(file)
# ignore temp file
if name.startswith(".~"):
result["htmls"][file] = None
continue
ext = os.path.splitext(file)[1].lower()
if ext.endswith("ipynb"):
html = convert_ipynb_to_html(file)
html.body = self._update_link_html(html.body)
metadata = html.metadata
date = None
ts = int(os.stat(file).st_mtime)
if "date" in metadata:
date = metadata["date"].strip().lower()
# set date to false to disable date display
if date and (date == "false" or date == "none"):
date = ""
else:
GMT_FORMAT = '%Y-%m-%d'
try:
date_obj = datetime.strptime(date, GMT_FORMAT)
ts = int(date_obj.timestamp())
except Exception as e:
pass
if "author" in metadata:
author = metadata["author"]
else:
author = ""
result["htmls"][file] = {
"title": html.title,
"desc": html.desc,
"keywords": html.keywords,
"tags": html.tags,
"body": html.body,
"author": author,
"date": date,
"ts": ts,
"toc": html.toc,
"metadata": metadata,
"raw": html.raw
}
else:
result["htmls"][file] = None
result['ok'] = True
return result
def on_parse_pages(self, files):
result = self.on_parse_files(files)
return result
def on_add_html_header_items(self, type_name):
items = []
items.append('<meta name="html-generator" content="teedoc-plugin-jupyter-notebook-parser">')
return items
def _update_link_html(self, content):
def re_del(c):
ret = c[0]
links = re.findall('href="(.*?)"', c[0])
if len(links) > 0:
for link in links:
if link.startswith(".") or os.path.isabs(link):
ret = re.sub("README.md", "index.html", c[0], flags=re.I)
ret = re.sub(r".md", ".html", ret, re.I)
return ret
return ret
def re_del_ipynb(c):
ret = c[0]
links = re.findall('href="(.*?)"', c[0])
if len(links) > 0:
for link in links:
if link.startswith(".") or os.path.isabs(link):
ret = re.sub("README.ipynb", "index.html", c[0], flags=re.I)
ret = re.sub(r".ipynb", ".html", ret, re.I)
return ret
return ret
# <a class="anchor-link" href="#链接"> </a></h2><p><a href="./syntax_markdown.md">markdown 语法</a>
content = re.sub(r'\<a.*?href=.*?\.md.*?\</a\>', re_del, content, flags=re.I)
content = re.sub(r'\<a.*?href=.*?\.ipynb.*?\</a\>', re_del_ipynb, content, flags=re.I)
return content
if __name__ == "__main__":
config = {
}
plug = Plugin(config, "./", {})
res = plug.on_parse_files(["tests/test.ipynb"])
if not os.path.exists("out"):
os.makedirs("out")
for file, html in res["htmls"].items():
if html:
file = "{}.html".format(os.path.splitext(os.path.basename(file))[0])
with open(os.path.join("out", file), "w") as f:
f.write(html['body'])
| 37.921053 | 117 | 0.501214 |
50d1a83ac87a93488d61e1129daca6a4c2455731 | 115 | py | Python | mmdet/version.py | nguyenquangduc2000/SKU110K-DenseDet | ba2027cb654d77867a65204a3ca83acefc7615ee | [
"Apache-2.0"
] | 78 | 2020-07-17T09:56:12.000Z | 2022-03-09T06:22:50.000Z | mmdet/version.py | nguyenquangduc2000/SKU110K-DenseDet | ba2027cb654d77867a65204a3ca83acefc7615ee | [
"Apache-2.0"
] | 12 | 2020-10-21T13:05:46.000Z | 2022-03-24T09:33:52.000Z | mmdet/version.py | nguyenquangduc2000/SKU110K-DenseDet | ba2027cb654d77867a65204a3ca83acefc7615ee | [
"Apache-2.0"
] | 21 | 2020-08-16T13:21:42.000Z | 2022-03-22T12:16:41.000Z | # GENERATED VERSION FILE
# TIME: Fri Jul 3 10:58:01 2020
__version__ = '1.0rc1+fb983fe'
short_version = '1.0rc1'
| 19.166667 | 32 | 0.713043 |
4e45ccf3ca2fdfaaa47b528d37bf71a67225c7ce | 11,317 | py | Python | task/grovertrainer.py | adamoyoung/grover | 393afbd69482ed1fca3056fa0274626fab3ae789 | [
"MIT"
] | 155 | 2021-01-18T09:29:07.000Z | 2022-03-30T20:50:15.000Z | task/grovertrainer.py | adamoyoung/grover | 393afbd69482ed1fca3056fa0274626fab3ae789 | [
"MIT"
] | 13 | 2021-01-22T09:49:49.000Z | 2022-03-07T12:25:18.000Z | task/grovertrainer.py | adamoyoung/grover | 393afbd69482ed1fca3056fa0274626fab3ae789 | [
"MIT"
] | 38 | 2021-01-22T03:25:15.000Z | 2022-03-09T02:45:24.000Z | """
The GROVER trainer.
"""
import os
import time
from logging import Logger
from typing import List, Tuple
from collections.abc import Callable
import torch
from torch.nn import Module
from torch.utils.data import DataLoader
from grover.model.models import GroverTask
from grover.util.multi_gpu_wrapper import MultiGpuWrapper as mgw
class GROVERTrainer:
def __init__(self,
args,
embedding_model: Module,
atom_vocab_size: int, # atom vocab size
bond_vocab_size: int,
fg_szie: int,
train_dataloader: DataLoader,
test_dataloader: DataLoader,
optimizer_builder: Callable,
scheduler_builder: Callable,
logger: Logger = None,
with_cuda: bool = False,
enable_multi_gpu: bool = False):
"""
The init function of GROVERTrainer
:param args: the input arguments.
:param embedding_model: the model to generate atom/bond embeddings.
:param atom_vocab_size: the vocabulary size of atoms.
:param bond_vocab_size: the vocabulary size of bonds.
:param fg_szie: the size of semantic motifs (functional groups)
:param train_dataloader: the data loader of train data.
:param test_dataloader: the data loader of validation data.
:param optimizer_builder: the function of building the optimizer.
:param scheduler_builder: the function of building the scheduler.
:param logger: the logger
:param with_cuda: enable gpu training.
:param enable_multi_gpu: enable multi_gpu traning.
"""
self.args = args
self.with_cuda = with_cuda
self.grover = embedding_model
self.model = GroverTask(args, embedding_model, atom_vocab_size, bond_vocab_size, fg_szie)
self.loss_func = self.model.get_loss_func(args)
self.enable_multi_gpu = enable_multi_gpu
self.atom_vocab_size = atom_vocab_size
self.bond_vocab_size = bond_vocab_size
self.debug = logger.debug if logger is not None else print
if self.with_cuda:
# print("Using %d GPUs for training." % (torch.cuda.device_count()))
self.model = self.model.cuda()
self.train_data = train_dataloader
self.test_data = test_dataloader
self.optimizer = optimizer_builder(self.model, self.args)
self.scheduler = scheduler_builder(self.optimizer, self.args)
if self.enable_multi_gpu:
self.optimizer = mgw.DistributedOptimizer(self.optimizer,
named_parameters=self.model.named_parameters())
self.args = args
self.n_iter = 0
def broadcast_parameters(self) -> None:
"""
Broadcast parameters before training.
:return: no return.
"""
if self.enable_multi_gpu:
# broadcast parameters & optimizer state.
mgw.broadcast_parameters(self.model.state_dict(), root_rank=0)
mgw.broadcast_optimizer_state(self.optimizer, root_rank=0)
def train(self, epoch: int) -> List:
"""
The training iteration
:param epoch: the current epoch number.
:return: the loss terms of current epoch.
"""
# return self.mock_iter(epoch, self.train_data, train=True)
return self.iter(epoch, self.train_data, train=True)
def test(self, epoch: int) -> List:
"""
The test/validaiion iteration
:param epoch: the current epoch number.
:return: the loss terms as a list
"""
# return self.mock_iter(epoch, self.test_data, train=False)
return self.iter(epoch, self.test_data, train=False)
def mock_iter(self, epoch: int, data_loader: DataLoader, train: bool = True) -> List:
"""
Perform a mock iteration. For test only.
:param epoch: the current epoch number.
:param data_loader: the data loader.
:param train: True: train model, False: validation model.
:return: the loss terms as a list
"""
for _, _ in enumerate(data_loader):
self.scheduler.step()
cum_loss_sum = 0.0
self.n_iter += self.args.batch_size
return self.n_iter, cum_loss_sum, (0, 0, 0, 0, 0, 0)
def iter(self, epoch, data_loader, train=True) -> List:
"""
Perform a training / validation iteration.
:param epoch: the current epoch number.
:param data_loader: the data loader.
:param train: True: train model, False: validation model.
:return: the loss terms as a list
"""
if train:
self.model.train()
else:
self.model.eval()
loss_sum, iter_count = 0, 0
cum_loss_sum, cum_iter_count = 0, 0
av_loss_sum, bv_loss_sum, fg_loss_sum, av_dist_loss_sum, bv_dist_loss_sum, fg_dist_loss_sum = 0, 0, 0, 0, 0, 0
# loss_func = self.model.get_loss_func(self.args)
for _, item in enumerate(data_loader):
batch_graph = item["graph_input"]
targets = item["targets"]
if next(self.model.parameters()).is_cuda:
targets["av_task"] = targets["av_task"].cuda()
targets["bv_task"] = targets["bv_task"].cuda()
targets["fg_task"] = targets["fg_task"].cuda()
preds = self.model(batch_graph)
# # ad-hoc code, for visualizing a model, comment this block when it is not needed
# import dglt.contrib.grover.vis_model as vis_model
# for task in ['av_task', 'bv_task', 'fg_task']:
# vis_graph = vis_model.make_dot(self.model(batch_graph)[task],
# params=dict(self.model.named_parameters()))
# # vis_graph.view()
# vis_graph.render(f"{self.args.backbone}_model_{task}_vis.png", format="png")
# exit()
loss, av_loss, bv_loss, fg_loss, av_dist_loss, bv_dist_loss, fg_dist_loss = self.loss_func(preds, targets)
loss_sum += loss.item()
iter_count += self.args.batch_size
if train:
cum_loss_sum += loss.item()
# Run model
self.model.zero_grad()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
else:
# For eval model, only consider the loss of three task.
cum_loss_sum += av_loss.item()
cum_loss_sum += bv_loss.item()
cum_loss_sum += fg_loss.item()
av_loss_sum += av_loss.item()
bv_loss_sum += bv_loss.item()
fg_loss_sum += fg_loss.item()
av_dist_loss_sum += av_dist_loss.item() if type(av_dist_loss) != float else av_dist_loss
bv_dist_loss_sum += bv_dist_loss.item() if type(bv_dist_loss) != float else bv_dist_loss
fg_dist_loss_sum += fg_dist_loss.item() if type(fg_dist_loss) != float else fg_dist_loss
cum_iter_count += 1
self.n_iter += self.args.batch_size
# Debug only.
# if i % 50 == 0:
# print(f"epoch: {epoch}, batch_id: {i}, av_loss: {av_loss}, bv_loss: {bv_loss}, "
# f"fg_loss: {fg_loss}, av_dist_loss: {av_dist_loss}, bv_dist_loss: {bv_dist_loss}, "
# f"fg_dist_loss: {fg_dist_loss}")
cum_loss_sum /= cum_iter_count
av_loss_sum /= cum_iter_count
bv_loss_sum /= cum_iter_count
fg_loss_sum /= cum_iter_count
av_dist_loss_sum /= cum_iter_count
bv_dist_loss_sum /= cum_iter_count
fg_dist_loss_sum /= cum_iter_count
return self.n_iter, cum_loss_sum, (av_loss_sum, bv_loss_sum, fg_loss_sum, av_dist_loss_sum,
bv_dist_loss_sum, fg_dist_loss_sum)
def save(self, epoch, file_path, name=None) -> str:
"""
Save the intermediate models during training.
:param epoch: the epoch number.
:param file_path: the file_path to save the model.
:return: the output path.
"""
# add specific time in model fine name, in order to distinguish different saved models
now = time.localtime()
if name is None:
name = "_%04d_%02d_%02d_%02d_%02d_%02d" % (
now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
output_path = file_path + name + ".ep%d" % epoch
scaler = None
features_scaler = None
state = {
'args': self.args,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler_step': self.scheduler.current_step,
"epoch": epoch,
'data_scaler': {
'means': scaler.means,
'stds': scaler.stds
} if scaler is not None else None,
'features_scaler': {
'means': features_scaler.means,
'stds': features_scaler.stds
} if features_scaler is not None else None
}
torch.save(state, output_path)
# Is this necessary?
# if self.with_cuda:
# self.model = self.model.cuda()
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
def save_tmp(self, epoch, file_path, rank=0):
"""
Save the models for auto-restore during training.
The model are stored in file_path/tmp folder and will replaced on each epoch.
:param epoch: the epoch number.
:param file_path: the file_path to store the model.
:param rank: the current rank (decrypted).
:return:
"""
store_path = os.path.join(file_path, "tmp")
if not os.path.exists(store_path):
os.makedirs(store_path, exist_ok=True)
store_path = os.path.join(store_path, "model.%d" % rank)
state = {
'args': self.args,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler_step': self.scheduler.current_step,
"epoch": epoch
}
torch.save(state, store_path)
def restore(self, file_path, rank=0) -> Tuple[int, int]:
"""
Restore the training state saved by save_tmp.
:param file_path: the file_path to store the model.
:param rank: the current rank (decrypted).
:return: the restored epoch number and the scheduler_step in scheduler.
"""
cpt_path = os.path.join(file_path, "tmp", "model.%d" % rank)
if not os.path.exists(cpt_path):
print("No checkpoint found %d")
return 0, 0
cpt = torch.load(cpt_path)
self.model.load_state_dict(cpt["state_dict"])
self.optimizer.load_state_dict(cpt["optimizer"])
epoch = cpt["epoch"]
scheduler_step = cpt["scheduler_step"]
self.scheduler.current_step = scheduler_step
print("Restore checkpoint, current epoch: %d" % (epoch))
return epoch, scheduler_step
| 40.417857 | 118 | 0.596183 |
2325a7a4697dd349c33b99c59c43e630fb1581b7 | 20,530 | py | Python | materials/code/python/planet_client.py | agroimpacts/nmeo | 6cf657aa2e47223eff5b90ba175db14e03a07b43 | [
"Apache-2.0"
] | null | null | null | materials/code/python/planet_client.py | agroimpacts/nmeo | 6cf657aa2e47223eff5b90ba175db14e03a07b43 | [
"Apache-2.0"
] | null | null | null | materials/code/python/planet_client.py | agroimpacts/nmeo | 6cf657aa2e47223eff5b90ba175db14e03a07b43 | [
"Apache-2.0"
] | 1 | 2022-01-25T22:47:37.000Z | 2022-01-25T22:47:37.000Z | from planet import api
from planet.api import filters
from geo_utils import GeoUtils
from pprint import pprint
from requests.auth import HTTPBasicAuth
from fixed_thread_pool_executor import FixedThreadPoolExecutor
import os
import ssl
import requests
import time
import urllib.request
import shutil
import boto3
from boto3.s3.transfer import S3Transfer, TransferConfig
import botocore
import concurrent
import logging
import configparser
import json
import multiprocessing
from retry import retry
# PClientV1, class to simplify querying & downloading planet scenes using planet API V1
# We need to consider usage of a new API
class PClientV1():
# def __init__(self, api_key):
# self.api_key = api_key
# self.max_clouds = 0.25
# self.max_bad_pixels = 0.25
# self.max_nodata = 0.25
# self.maximgs = 1
# self.catalog_path = "catalog/"
# self.s3_catalog_bucket = "azavea-africa-test"
# self.s3_catalog_prefix = "planet/images"
# self.products = {
# 'analytic_sr': {
# 'item_type': 'PSScene4Band',
# 'asset_type': 'analytic_sr',
# 'ext': 'tif'
# },
# 'analytic': {
# 'item_type': 'PSScene4Band',
# 'asset_type': 'analytic',
# 'ext': 'tif'
# },
# 'analytic_xml': {
# 'item_type': 'PSScene4Band',
# 'asset_type': 'analytic_xml',
# 'ext': 'xml'
# },
# 'visual': {
# 'item_type': 'PSScene3Band',
# 'asset_type': 'visual',
# 'ext': 'tif'
# }
# }
# self.client = api.ClientV1(api_key = api_key)
# self.output_filename = "output.csv"
# self.output_encoding = "utf-8"
# self.s3client = boto3.client('s3')
# self.with_analytic = False
# self.with_analytic_xml = False
# self.with_visual = False
# self.local_mode = False
# self.s3_only = False
# self.transfer = S3Transfer(self.s3client, TransferConfig(use_threads = False))
# self.transfer_config = TransferConfig(use_threads = False)
# self.logger = logging.getLogger(__name__)
# self.logger.setLevel(logging.INFO)
# self.secondary_uploads_executor = FixedThreadPoolExecutor(size = 5)
# self.with_immediate_cleanup = False
def __init__(self, api_key, config):
imagery_config = config['imagery']
self.api_key = api_key
self.max_clouds_initial = float(imagery_config['max_clouds_initial'])
self.max_clouds = float(imagery_config['max_clouds']) # max proportion of pixels that are clouds
self.max_bad_pixels = float(imagery_config['max_bad_pixels']) # max proportion of bad pixels (transmission errors, etc.)
self.max_nodata = float(imagery_config['max_nodata']) # max nodata values per cellgrid
self.maximgs = int(imagery_config['maximgs']) # 15 #10 #20
self.output_encoding = imagery_config['output_encoding']
# self.output_filename = imagery_config['output_filename']
# self.output_filename_csv = imagery_config['output_filename_csv']
self.catalog_path = imagery_config['catalog_path']
# self.s3_catalog_bucket = imagery_config['s3_catalog_bucket']
# self.s3_catalog_prefix = imagery_config['s3_catalog_prefix']
self.products = {
'analytic_sr': {
'item_type': 'PSScene4Band',
'asset_type': 'analytic_sr',
'ext': 'tif'
},
'analytic': {
'item_type': 'PSScene4Band',
'asset_type': 'analytic',
'ext': 'tif'
},
'analytic_xml': {
'item_type': 'PSScene4Band',
'asset_type': 'analytic_xml',
'ext': 'xml'
},
'visual': {
'item_type': 'PSScene3Band',
'asset_type': 'visual',
'ext': 'tif'
}
}
self.client = api.ClientV1(api_key = self.api_key)
self.s3client = boto3.client('s3')
# self.with_analytic = json.loads(imagery_config['with_analytic'].lower())
# self.with_analytic_xml = json.loads(imagery_config['with_analytic_xml'].lower())
# self.with_visual = json.loads(imagery_config['with_visual'].lower())
# self.with_immediate_cleanup = json.loads(imagery_config['with_immediate_cleanup'].lower())
# self.local_mode = json.loads(imagery_config['local_mode'].lower())
# self.s3_only = json.loads(imagery_config['s3_only'].lower())
# self.transfer = S3Transfer(self.s3client, TransferConfig(use_threads = False))
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
# planet has limitation 5 sec per key (search queries)
threads_number = imagery_config['threads']
if threads_number == 'default':
threads_number = multiprocessing.cpu_count() * 2 + 1
else:
threads_number = int(threads_number)
self.secondary_uploads_executor = FixedThreadPoolExecutor(size = threads_number)
# there are start_date and end_date present as it should be the part of a row retrieved from psql / tiff file
def set_filters_sr(self, aoi, start_date='2017-12-15T00:00:00.000Z', end_date = '2018-03-15T00:00:00.000Z', id=''):
# add an asset_filter for only those scenes that have an analytic_sr asset available
date_filter = {
'type': 'DateRangeFilter',
'field_name': 'acquired',
'config': {
'gte': start_date,
'lte': end_date
}
}
cloud_filter = {
'type': 'RangeFilter',
'field_name': 'cloud_cover',
'config': {
'lte': self.max_clouds_initial
}
}
bad_pixel_filter = {
'type': 'RangeFilter',
'field_name': 'anomalous_pixels',
'config': {
'lte': self.max_bad_pixels
}
}
location_filter = api.filters.geom_filter(aoi)
geometry_filter = {
"type": "GeometryFilter",
"field_name": "geometry",
"config": aoi
}
asset_filter = {
"type": "PermissionFilter",
"config": ["assets.analytic_sr:download"]
}
string_filter = {
"type": "StringInFilter",
"field_name": "id",
"config": [id]
}
filters_list = [date_filter, cloud_filter,geometry_filter, bad_pixel_filter, asset_filter]
if (id != ''):
filters_list.append(string_filter)
# combine filters:
query = {
'type': 'AndFilter',
'config': filters_list
}
return query
# there are start_date and end_date present as it should be the part of a row retrieved from psql / tiff file
def set_filters_id(self, id=''):
asset_filter = {
"type": "PermissionFilter",
"config": ["assets.analytic_sr:download"]
}
string_filter = {
"type": "StringInFilter",
"field_name": "id",
"config": [id]
}
filters_list = [asset_filter, string_filter]
# combine filters:
query = {
'type': 'AndFilter',
'config': filters_list
}
return query
@retry(tries = 10, delay = 2, backoff = 2)
def request_intersecting_scenes(self, query):
# build the request
item_types = ['PSScene4Band'] # params["lst_item_types"]
request = api.filters.build_search_request(query, item_types)
# post the request
results = self.client.quick_search(request)
return results
# returns a full URI here
def download_localfs_generic(self, scene_id, season = '', asset_type = 'analytic_sr', ext = 'tif', item_type='PSScene4Band'):
output_file = "{}{}/{}/{}.{}".format(self.catalog_path, asset_type, season, scene_id, ext)
if not os.path.exists(output_file):
# activation & download
session = requests.Session()
session.auth = (self.api_key, '')
assets_uri = ("https://api.planet.com/data/v1/item-types/{}/items/{}/assets/").format(item_type, scene_id)
assets_query_result = session.get(assets_uri)
self.logger.info(assets_query_result.status_code)
item_activation_json = assets_query_result.json()
# self.logger.info(item_activation_json)
item_activation_url = item_activation_json[asset_type]["_links"]["activate"]
response = session.post(item_activation_url)
self.logger.info(response.status_code)
while response.status_code!=204:
time.sleep(30)
response = session.post(item_activation_url)
response.status_code = response.status_code
self.logger.info(response.status_code)
item_url = 'https://api.planet.com/data/v1/item-types/{}/items/{}/assets/'.format(item_type, scene_id)
result = requests.get(item_url, auth = HTTPBasicAuth(self.api_key, ''))
if result.status_code != 200:
self.logger.info(result.content.decode('utf-8'))
download_url = result.json()[asset_type]['location']
# download
with urllib.request.urlopen(download_url) as response, open(output_file, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
return output_file
# TODO: lots of copy pasting happens there, abstract over it?
# returns a full S3 URI here
def download_s3_generic(self, scene_id, season = '', asset_type = 'analytic_sr', ext = 'tif', item_type='PSScene4Band'):
output_key = "{}/{}/{}/{}.{}".format(self.s3_catalog_prefix, asset_type, season, scene_id, ext)
result_path = 's3://{}/{}'.format(self.s3_catalog_bucket, output_key)
try:
self.s3client.head_object(Bucket = self.s3_catalog_bucket, Key = output_key)
except botocore.exceptions.ClientError:
self.logger.exception('Error Encountered')
self.logger.info("Downloading {}...".format(scene_id))
# activation & download
session = requests.Session()
session.auth = (self.api_key, '')
assets_uri = ("https://api.planet.com/data/v1/item-types/{}/items/{}/assets/").format(item_type, scene_id)
assets_query_result = session.get(assets_uri)
self.logger.info(assets_query_result.status_code)
item_activation_json = assets_query_result.json()
# self.logger.info(item_activation_json)
item_activation_url = item_activation_json[asset_type]["_links"]["activate"]
response = session.post(item_activation_url)
self.logger.info(response.status_code)
while response.status_code!=204:
time.sleep(30)
response = session.post(item_activation_url)
response.status_code = response.status_code
self.logger.info(response.status_code)
item_url = 'https://api.planet.com/data/v1/item-types/{}/items/{}/assets/'.format(item_type, scene_id)
result = requests.get(item_url, auth = HTTPBasicAuth(self.api_key, ''))
if result.status_code != 200:
self.logger.info(result.content.decode('utf-8'))
download_url = result.json()[asset_type]['location']
# upload on s3 directly from the response
with urllib.request.urlopen(download_url) as response:
self.s3client.put_object(Body = response.read(), Bucket = self.s3_catalog_bucket, Key = output_key)
# finished
self.logger.info("Downloaded {}".format(scene_id))
return result_path
# returns a full URI here
def download_localfs_product(self, product_type, scene_id, season = ''):
cfg = self.products[product_type]
return self.download_localfs_generic(
scene_id = scene_id,
season = season,
asset_type = cfg['asset_type'],
ext = cfg['ext'],
item_type= cfg['item_type']
)
# returns a full URI here
def download_s3_product(self, product_type, scene_id, season = ''):
cfg = self.products[product_type]
return self.download_s3_generic(
scene_id = scene_id,
season = season,
asset_type = cfg['asset_type'],
ext = cfg['ext'],
item_type= cfg['item_type']
)
def download_localfs_analytic_sr(self, scene_id, season = ''):
return self.download_localfs_product('analytic_sr', scene_id, season)
def download_s3_analytic_sr(self, scene_id, season = ''):
return self.download_s3_product('analytic_sr', scene_id, season)
def download_localfs_analytic(self, scene_id, season = ''):
return self.download_localfs_product('analytic', scene_id, season)
def download_s3_analytic(self, scene_id, season = ''):
return self.download_s3_product('analytic', scene_id, season)
def download_localfs_analytic_xml(self, scene_id, season = ''):
return self.download_localfs_product('analytic_xml', scene_id, season)
def download_s3_analytic_xml(self, scene_id, season = ''):
return self.download_s3_product('analytic_xml', scene_id, season)
def download_localfs_visual(self, scene_id, season = ''):
return self.download_localfs_product('visual', scene_id, season)
def download_s3_visual(self, scene_id, season = ''):
return self.download_s3_product('visual', scene_id, season)
def upload_s3_csv(self):
result = ''
if not self.local_mode:
output_key = "{}/{}".format(self.s3_catalog_prefix, self.output_filename.split('/')[-1])
result = 's3://{}/{}'.format(self.s3_catalog_bucket, output_key)
self.transfer.upload_file(self.output_filename, self.s3_catalog_bucket, output_key)
else:
result = self.output_filename
return result
def upload_s3_csv_csv(self):
output_key = "{}/{}".format(self.s3_catalog_prefix, self.output_filename_csv.split('/')[-1])
result = 's3://{}/{}'.format(self.s3_catalog_bucket, output_key)
self.transfer.upload_file(self.output_filename_csv, self.s3_catalog_bucket, output_key)
return result
def download_localfs_s3_product(self, scene_id, season = '', product_type = 'analytic_sr'):
cfg = self.products[product_type]
asset_type = cfg['asset_type']
ext = cfg['ext']
item_type = cfg['item_type']
filepath = ''
output_key = "{}/{}/{}/{}.{}".format(self.s3_catalog_prefix, asset_type, season, scene_id, ext)
s3_result = 's3://{}/{}'.format(self.s3_catalog_bucket, output_key)
local_result = "{}{}/{}/{}.{}".format(self.catalog_path, asset_type, season, scene_id, ext)
if not self.s3_only:
if not os.path.exists(local_result):
if not self.local_mode:
try:
# if we have file in our s3 bucket, let's pull it down from the S3 (faster)
self.s3client.head_object(Bucket = self.s3_catalog_bucket, Key = output_key)
filepath = s3_result
# self.logger.info("Downloading {} from the internal S3 storage...".format(scene_id))
# self.transfer.download_file(self.s3_catalog_bucket, output_key, local_result)
# filepath = local_result # filepath = s3_result
except botocore.exceptions.ClientError:
self.logger.exception('Error Encountered')
filepath = self.download_localfs_product(product_type, scene_id, season)
self.logger.info("Uploading {}...".format(scene_id))
self.s3client.put_object(Bucket = self.s3_catalog_bucket, Key = output_key, Body = open(filepath, 'rb'))
# self.transfer.upload_file(filepath, self.s3_catalog_bucket, output_key)
else:
filepath = self.download_localfs_product(product_type, scene_id, season)
s3_result = local_result
else:
filepath = local_result
if self.local_mode:
s3_result = local_result
else:
try:
self.s3client.head_object(Bucket = self.s3_catalog_bucket, Key = output_key)
except botocore.exceptions.ClientError:
self.logger.exception('Error Encountered')
self.logger.info("Uploading {}...".format(scene_id))
self.s3client.put_object(Bucket = self.s3_catalog_bucket, Key = output_key, Body = open(filepath, 'rb'))
# self.transfer.upload_file(filepath, self.s3_catalog_bucket, output_key)
else:
s3_result = self.download_s3_product('analytic_sr', scene_id, season)
filepath = s3_result
return filepath, s3_result
def download_localfs_s3(self, scene_id, season = ''):
sub_products = []
if self.with_analytic:
sub_products.append('with_analytic')
if self.with_analytic_xml:
sub_products.append('with_analytic_xml')
if self.with_visual:
sub_products.append('with_visual')
for sub_product in sub_products:
self.secondary_uploads_executor.submit(self.download_localfs_s3_product, scene_id, season, sub_product)
return self.download_localfs_s3_product(scene_id, season)
def drain(self):
self.secondary_uploads_executor.drain()
# def cleanup_catalog(self):
# self.logger.info("Catalog cleanup...")
# if self.with_immediate_cleanup & (not self.s3_only):
# for product_type in ['analytic', 'analytic_sr', 'analytic_xml', 'visual']:
# for season in ['OS', 'GS']:
# lpath = "{}{}/{}".format(self.catalog_path, product_type, season)
# try:
# shutil.rmtree(lpath, ignore_errors = False)
# os.makedirs(lpath)
# except:
# self.logger.exception('Error Encountered')
# self.logger.info("Could not remove a folder: {}".format(lpath))
def close(self):
self.secondary_uploads_executor.close()
self.cleanup_catalog()
if __name__ == "__main__":
# disable ssl
ssl._create_default_https_context = ssl._create_unverified_context
# read config
config = configparser.ConfigParser()
config.read('cfg/config.ini')
planet_config = config['planet']
imagery_config = config['imagery']
# logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logging.basicConfig(format = '%(message)s', datefmt = '%m-%d %H:%M')
api_key = planet_config['api_key']
# pclient init
pclient = PClientV1(api_key, config)
# planet_filters = pclient.set_filters_id('20170828_092138_0f4b')
# res = pclient.request_intersecting_scenes(planet_filters)
# pick up scene id and its geometry
# for item in res.items_iter(1):
# each item is a GeoJSON feature
# scene_id = item["id"]
# print(item)
# activation & download
# it should be sync, to allow async check of neighbours
# output_localfile = pclient.download_localfsV2(scene_id, asset_type = 'visual', item_type='PSScene3Band')
# output_localfile = pclient.download_localfsV2(scene_id, asset_type = 'visual_xml', item_type='PSScene3Band', ext = 'xml')
# output_localfile = pclient.download_localfsV2(scene_id, asset_type = 'analytic')
# output_localfile = pclient.download_localfsV2(scene_id, asset_type = 'analytic_xml', ext = 'xml')
# use custom cloud detection function to calculate clouds and shadows
| 41.558704 | 131 | 0.602874 |
b9d5a48e01de306f7a57a1d019b6ff94c0ef3701 | 2,965 | py | Python | examples/mnist/mnist_mlp_candle.py | vgutta/Benchmarks | f739c1fb2b02dd8fb310e2182fa8c4baaaea7caf | [
"MIT"
] | 51 | 2017-01-24T20:57:27.000Z | 2022-02-15T00:33:45.000Z | examples/mnist/mnist_mlp_candle.py | vgutta/Benchmarks | f739c1fb2b02dd8fb310e2182fa8c4baaaea7caf | [
"MIT"
] | 59 | 2017-08-21T22:19:44.000Z | 2021-11-01T16:05:35.000Z | examples/mnist/mnist_mlp_candle.py | vgutta/Benchmarks | f739c1fb2b02dd8fb310e2182fa8c4baaaea7caf | [
"MIT"
] | 90 | 2016-11-22T03:57:07.000Z | 2022-01-11T04:43:23.000Z | # from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras import backend as K
import mnist
import candle
def initialize_parameters():
mnist_common = mnist.MNIST(
mnist.file_path,
'mnist_params.txt',
'keras',
prog='mnist_mlp',
desc='MNIST example'
)
# Initialize parameters
gParameters = candle.finalize_parameters(mnist_common)
# csv_logger = CSVLogger('{}/params.log'.format(gParameters))
return gParameters
def run(gParameters):
##########################################
# Your DL start here. See mnist_mlp.py #
##########################################
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
# from __future__ import print_function
from tensorflow import keras
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
batch_size = gParameters['batch_size']
num_classes = 10
epochs = gParameters['epochs']
activation = gParameters['activation']
optimizer = gParameters['optimizer']
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation=activation, input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation=activation))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
##########################################
# End of mnist_mlp.py ####################
##########################################
return history
def main():
gParameters = initialize_parameters()
run(gParameters)
if __name__ == '__main__':
main()
try:
K.clear_session()
except AttributeError:
pass
| 28.786408 | 68 | 0.609106 |
4f3daf9c48e96e2148c76b3072bd3ca82daeb14e | 1,355 | py | Python | app/core/tests/test_models.py | devmansurov/recipe-app-api | 7adb702ae0b75981359d3f598742a1375eee7fde | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | devmansurov/recipe-app-api | 7adb702ae0b75981359d3f598742a1375eee7fde | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | devmansurov/recipe-app-api | 7adb702ae0b75981359d3f598742a1375eee7fde | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'test@gmail.com'
password = 'secret'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@GMAIL.COM'
user = get_user_model().objects.create_user(email, 'test456')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test456')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = \
get_user_model().objects.create_superuser(
'test_superuser@gmail.com',
'secret'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 28.829787 | 69 | 0.635424 |
1b0279bb4fd9a9661da48a5261787f6c594fdc8a | 4,066 | py | Python | official/mnist/dataset.py | jieming2002/models-quiz8 | 421dc407a10444cab4bd88c25599077acca96bdb | [
"Apache-2.0"
] | 7 | 2018-04-29T03:54:00.000Z | 2021-08-17T13:27:28.000Z | official/mnist/dataset.py | jieming2002/models-quiz8 | 421dc407a10444cab4bd88c25599077acca96bdb | [
"Apache-2.0"
] | null | null | null | official/mnist/dataset.py | jieming2002/models-quiz8 | 421dc407a10444cab4bd88c25599077acca96bdb | [
"Apache-2.0"
] | 3 | 2018-10-23T01:46:50.000Z | 2019-03-19T02:51:51.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tf.data.Dataset interface to the MNIST dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import shutil
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
zipped_filepath = filepath + '.gz'
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
| 35.051724 | 80 | 0.686178 |
c328caacc8465f0095141c79008ed9f9f82e2122 | 3,948 | py | Python | PyInquirer/prompts/editor.py | e3rd/PyInquirer | 99a6d28d937c456526d93387fac706158f99775f | [
"MIT"
] | null | null | null | PyInquirer/prompts/editor.py | e3rd/PyInquirer | 99a6d28d937c456526d93387fac706158f99775f | [
"MIT"
] | null | null | null | PyInquirer/prompts/editor.py | e3rd/PyInquirer | 99a6d28d937c456526d93387fac706158f99775f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
`input` type question
"""
import os
import sys
from __future__ import print_function, unicode_literals
from prompt_toolkit.token import Token
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.validation import Validator, ValidationError
from prompt_toolkit.layout.lexers import SimpleLexer
from .common import default_style
# use std prompt-toolkit control
WIN = sys.platform.startswith('win')
class Editor(object):
def __init__(self, editor=None, env=None, require_save=True,
extension='.txt'):
self.editor = editor
self.env = env
self.require_save = require_save
self.extension = extension
def get_editor(self):
if self.editor is not None:
return self.editor
for key in 'VISUAL', 'EDITOR':
rv = os.environ.get(key)
if rv:
return rv
if WIN:
return 'notepad'
for editor in 'vim', 'nano':
if os.system('which %s >/dev/null 2>&1' % editor) == 0:
return editor
return 'vi'
def edit_file(self, filename):
import subprocess
editor = self.get_editor()
if self.env:
environ = os.environ.copy()
environ.update(self.env)
else:
environ = None
try:
c = subprocess.Popen('%s "%s"' % (editor, filename),
env=environ, shell=True)
exit_code = c.wait()
if exit_code != 0:
raise Exception('%s: Editing failed!' % editor)
except OSError as e:
raise Exception('%s: Editing failed: %s' % (editor, e))
def edit(self, text):
import tempfile
text = text or ''
if text and not text.endswith('\n'):
text += '\n'
fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
try:
if WIN:
encoding = 'utf-8-sig'
text = text.replace('\n', '\r\n')
else:
encoding = 'utf-8'
text = text.encode(encoding)
f = os.fdopen(fd, 'wb')
f.write(text)
f.close()
timestamp = os.path.getmtime(name)
self.edit_file(name)
if self.require_save \
and os.path.getmtime(name) == timestamp:
return None
f = open(name, 'rb')
try:
rv = f.read()
finally:
f.close()
return rv.decode('utf-8-sig').replace('\r\n', '\n')
finally:
os.unlink(name)
def question(message, **kwargs):
default = kwargs.pop('default', '')
validate_prompt = kwargs.pop('validate', None)
if validate_prompt:
if issubclass(validate_prompt, Validator):
kwargs['validator'] = validate_prompt()
elif callable(validate_prompt):
class _InputValidator(Validator):
def validate(self, document):
verdict = validate_prompt(document.text)
if not verdict == True:
if verdict == False:
verdict = 'invalid input'
raise ValidationError(
message=verdict,
cursor_position=len(document.text))
kwargs['validator'] = _InputValidator()
# TODO style defaults on detail level
kwargs['style'] = kwargs.pop('style', default_style)
qmark = kwargs.pop('qmark', '?')
def _get_prompt_tokens(cli):
return [
(Token.QuestionMark, qmark),
(Token.Question, ' %s ' % message)
]
return create_prompt_application(
get_prompt_tokens=_get_prompt_tokens,
lexer=SimpleLexer(Token.Answer),
default=default,
**kwargs
)
| 29.909091 | 76 | 0.536221 |
cb1d42d54dff017d017d58ae2e983e58a574e1d8 | 794 | py | Python | mail_multi_website/wizard/mail_compose_message.py | brain-tec/mail-addons | 92efb62ad5c4d9843654ae3e49b120a8759ff2bf | [
"MIT"
] | null | null | null | mail_multi_website/wizard/mail_compose_message.py | brain-tec/mail-addons | 92efb62ad5c4d9843654ae3e49b120a8759ff2bf | [
"MIT"
] | 1 | 2019-03-15T14:45:46.000Z | 2019-03-15T14:45:46.000Z | mail_multi_website/wizard/mail_compose_message.py | brain-tec/mail-addons | 92efb62ad5c4d9843654ae3e49b120a8759ff2bf | [
"MIT"
] | 1 | 2021-08-28T11:18:33.000Z | 2021-08-28T11:18:33.000Z | # Copyright 2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
from odoo import api, models
from odoo.http import request
class MailComposer(models.TransientModel):
_inherit = "mail.compose.message"
@api.model
def create(self, vals):
"""Workaround for https://github.com/odoo/odoo/pull/26589"""
if "website_id" not in self.env.context:
website = (
request and hasattr(request, "website") and request.website or None
)
if not website:
website = self.env["website"].get_current_website()
if website:
self = self.with_context(website_id=website.id)
return super(MailComposer, self).create(vals)
| 34.521739 | 83 | 0.639798 |
e854a95db23e8f335ecab2141cb6cb668d593f0a | 1,870 | py | Python | hl/hlist.py | DmitryOlshansky/hl | 5f652ee16dbd3813ad997d860b2a0a60869d5d30 | [
"MIT"
] | null | null | null | hl/hlist.py | DmitryOlshansky/hl | 5f652ee16dbd3813ad997d860b2a0a60869d5d30 | [
"MIT"
] | null | null | null | hl/hlist.py | DmitryOlshansky/hl | 5f652ee16dbd3813ad997d860b2a0a60869d5d30 | [
"MIT"
] | null | null | null | import json
from hl import search
def ensure(cond, err):
if not cond: raise Exception(err)
def check_host(h):
ensure(type(h['host']) is str, "host name must be string")
ensure(type(h['tags']) is list, "tags is a list")
ensure(type(h['kv']) is dict, "key-value pairs is object")
class HList(object):
def __init__(self, nd_json=None):
if not nd_json:
self.hosts = []
else:
self.hosts = [json.loads(line) for line in nd_json.split("\n") if line.strip() != '']
for h in self.hosts: check_host(h)
def __str__(self):
return "\n".join([json.dumps(h) for h in self.hosts])
"""
Adds or replace one properly structured host entry
"""
def add(self, host):
check_host(host)
for i in range(len(self.hosts)):
if self.hosts[i] == host:
self.hosts[i] = host
return
self.hosts.append(host)
"""
Remove hosts matching query
"""
def remove_by_query(self, query):
to_remove = set(self.select(query))
to_keep = [self.hosts[i] for i in range(len(self.hosts)) if i not in to_remove]
self.hosts = to_keep
"""
List hosts matching query
"""
def list(self, query):
return [self.hosts[i] for i in self.select(query)]
"""
Return indices of best matching host entries
"""
def select(self, query):
if query == "":
return range(len(self.hosts))
if len(self.hosts) == 0:
return []
qt = search.terms(query)
hts = [search.terms(h['host']) for h in self.hosts]
scores = [search.score(qt, ht) for ht in hts]
best = max(scores)
if best == 0:
return []
else:
return [pair[0] for pair in enumerate(scores) if pair[1] == best]
| 28.769231 | 97 | 0.553476 |
2711ae5601ccf6278ba86408420b2431f45ea3ad | 4,096 | py | Python | src/modelling/LoNGAE/utils_gcn.py | 1997alireza/Movie-Casting-Problems | df555e57401ec1b120d8e9d3c2d51b1d3a070f21 | [
"MIT"
] | 3 | 2021-04-20T06:02:34.000Z | 2021-04-24T04:16:45.000Z | src/modelling/LoNGAE/utils_gcn.py | 1997alireza/Movie-Casting-Problems | df555e57401ec1b120d8e9d3c2d51b1d3a070f21 | [
"MIT"
] | null | null | null | src/modelling/LoNGAE/utils_gcn.py | 1997alireza/Movie-Casting-Problems | df555e57401ec1b120d8e9d3c2d51b1d3a070f21 | [
"MIT"
] | null | null | null | """
The MIT License
Copyright (c) 2017 Thomas Kipf
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
Modified from https://github.com/tkipf/gae to work with citation network data.
"""
import numpy as np
import scipy.sparse as sp
np.random.seed(1982)
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def split_adjacency_data(adj):
"""
Function to build test set with 10% positive links and
the same number of randomly sampled negative links.
NOTE: Splits are randomized and results might slightly deviate
from reported numbers in the paper.
"""
# Remove diagonal elements
adj = sp.csr_matrix(adj)
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
adj_triu = sp.triu(adj)
adj_tuple = sparse_to_tuple(adj_triu)
edges = adj_tuple[0]
edges_all = sparse_to_tuple(adj)[0]
num_test = int(np.floor(edges.shape[0] / 10.))
num_val = int(np.floor(edges.shape[0] / 20.))
all_edge_idx = list(range(edges.shape[0]))
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
test_edges = edges[test_edge_idx]
val_edges = edges[val_edge_idx]
train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)
def ismember(a, b, tol=5):
rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)
return (np.all(np.any(rows_close, axis=-1), axis=-1) and
np.all(np.any(rows_close, axis=0), axis=0))
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], edges_all):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
val_edges_false = []
while len(val_edges_false) < len(val_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], train_edges):
continue
if ismember([idx_j, idx_i], train_edges):
continue
if ismember([idx_i, idx_j], val_edges):
continue
if ismember([idx_j, idx_i], val_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
val_edges_false.append([idx_i, idx_j])
assert ~ismember(test_edges_false, edges_all)
assert ~ismember(val_edges_false, edges_all)
assert ~ismember(val_edges, train_edges)
assert ~ismember(test_edges, train_edges)
assert ~ismember(val_edges, test_edges)
# data = np.ones(train_edges.shape[0])
# NOTE: the edge list only contains single direction of edge!
return np.concatenate([test_edges, np.asarray(test_edges_false)], axis=0)
| 35.617391 | 84 | 0.664063 |
6c78b646b05fca7225c4bdd2844f7e13e0034b50 | 919 | py | Python | first_app/views/unequip.py | CONSOLNY/rglk | bc3df20f32a29ecdd66534d4cd698c1601669bbd | [
"MIT"
] | null | null | null | first_app/views/unequip.py | CONSOLNY/rglk | bc3df20f32a29ecdd66534d4cd698c1601669bbd | [
"MIT"
] | 1 | 2016-06-02T04:50:48.000Z | 2017-02-06T03:16:27.000Z | first_app/views/unequip.py | CONSOLNY/rglk | bc3df20f32a29ecdd66534d4cd698c1601669bbd | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.utils.http import urlquote
from django.utils.http import urlencode
from django.http import HttpResponse
from django.db.models import Max
from math import sqrt
from first_app.models import *
from first_app.utils import *
from first_app.views.draw_world import draw_world
def unequip(request):
item_id = int(request.POST.get("Equipped", -1))
if item_id == -1:
return draw_world(request)
char = Character.objects.first()
slot = SlotCharacter.objects.get(slot=InventoryCharacter.objects.get(id=item_id))
if slot.slot_type.item == 'Weapon':
attack_stats = slot.slot.content.item_stats
char.attack -= attack_stats
elif slot.slot_type.item == 'Armor':
defense_stats = slot.slot.content.item_stats
char.defense -= defense_stats
slot.slot = None
slot.save()
char.save()
return draw_world(request)
| 34.037037 | 85 | 0.725789 |
4d46e1c65c380acb4123345dc4a12942b32d5ce3 | 6,733 | py | Python | erdantic/base.py | adsharma/erdantic | 722a98eda6c7f3f69189407a949d071467a2db97 | [
"MIT"
] | null | null | null | erdantic/base.py | adsharma/erdantic | 722a98eda6c7f3f69189407a949d071467a2db97 | [
"MIT"
] | null | null | null | erdantic/base.py | adsharma/erdantic | 722a98eda6c7f3f69189407a949d071467a2db97 | [
"MIT"
] | null | null | null | import inspect
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Generic, List, Type, TypeVar, Union
from erdantic.etyping import Final, GenericAlias, repr_type, repr_type_with_mro
_row_template = """<tr><td>{name}</td><td port="{name}">{type_name}</td></tr>"""
FT = TypeVar("FT", bound=Any, covariant=True)
"""Type variable for a field object adapted by adapter class
[`Field`][erdantic.base.Field]."""
class Field(ABC, Generic[FT]):
"""Abstract base class that adapts a field object of a data model class to work with erdantic.
Concrete implementations should subclass and implement abstract methods.
Attributes:
field (FT): Field object on a data model class associated with this adapter
"""
@abstractmethod
def __init__(self, field: FT):
"""Initialize Field adapter instance.
Args:
field: Field object to associate with this adapter instance
"""
self.field: Final[FT] = field
@property
@abstractmethod
def name(self) -> str: # pragma: no cover
"""Name of this field on the parent data model."""
@property
@abstractmethod
def type_obj(self) -> Union[type, GenericAlias]:
"""Python type object for this field."""
pass
@abstractmethod
def is_many(self) -> bool: # pragma: no cover
"""Check whether this field represents a one-to-one or one-to-many relationship.
Returns:
bool: True if one-to-many relationship, else False.
"""
pass
@abstractmethod
def is_nullable(self) -> bool: # pragma: no cover
"""Check whether this field is nullable, i.e., can be `None`.
Returns:
bool: True if nullable, else False.
"""
pass
@property
def type_name(self) -> str: # pragma: no cover
"""String representation of the Python type annotation for this field."""
return repr_type(self.type_obj)
def dot_row(self) -> str:
"""Returns the DOT language "HTML-like" syntax specification of a row detailing this field
that is part of a table describing the field's parent data model. It is used as part the
`label` attribute of data model's node in the graph's DOT representation.
Returns:
str: DOT language for table row
"""
return _row_template.format(name=self.name, type_name=self.type_name)
def __eq__(self, other: Any) -> bool:
return isinstance(other, type(self)) and hash(self) == hash(other)
def __hash__(self) -> int:
return id(self.field)
def __repr__(self) -> str:
return f"<{type(self).__name__}: '{self.name}', {self.type_name}>"
_table_template = """
<<table border="0" cellborder="1" cellspacing="0">
<tr><td port="_root" colspan="2"><b>{name}</b></td></tr>
{rows}
</table>>
"""
MT = TypeVar("MT", bound=type, covariant=True)
"""Type variable for a data model class adapted by adapter class
[`Model`][erdantic.base.Model]. Bounded by `type`."""
class Model(ABC, Generic[MT]):
"""Abstract base class that adapts a data model class to work with erdantic. Instances
represent a node in our entity relationship diagram graph. Concrete implementations should
subclass and implement abstract methods.
Attributes:
model (MT): Data model class associated with this adapter
"""
@abstractmethod
def __init__(self, model: MT):
"""Initialize model adapter instance.
Args:
model: Data model class to associate with this adapter instance
"""
self.model: Final[MT] = model
@property
@abstractmethod
def fields(self) -> List[Field]: # pragma: no cover
"""List of fields defined on this data model."""
pass
@staticmethod
@abstractmethod
def is_model_type(obj: Any) -> bool: # pragma: no cover
"""Check if object is the type of data model class that this model adapter works with."""
pass
@property
def name(self) -> str: # pragma: no cover
"""Name of this data model."""
return self.model.__name__
@property
def docstring(self) -> str:
"""Docstring for this data model."""
out = f"{self.model.__module__}.{self.model.__qualname__}"
docstring = inspect.getdoc(self.model)
if docstring:
out += "\n\n" + docstring
return out
@property
def key(self) -> str:
"""Human-readable unique identifier for this data model. Should be stable across
sessions."""
return f"{self.model.__module__}.{self.model.__qualname__}"
def dot_label(self) -> str:
"""Returns the DOT language "HTML-like" syntax specification of a table for this data
model. It is used as the `label` attribute of data model's node in the graph's DOT
representation.
Returns:
str: DOT language for table
"""
rows = "\n".join(field.dot_row() for field in self.fields)
return _table_template.format(name=self.name, rows=rows).replace("\n", "")
def __eq__(self, other) -> bool:
return isinstance(other, type(self)) and hash(self) == hash(other)
def __hash__(self) -> int:
return hash(self.key)
def __lt__(self, other) -> bool:
if not isinstance(other, Model):
raise ValueError(
f"Can only compare between instances of Model. Given: {repr_type_with_mro(other)}"
)
return self.key < other.key
def __repr__(self) -> str:
return f"{type(self).__name__}({self.name})"
model_adapter_registry: Dict[str, Type[Model]] = {}
"""Registry of concrete [`Model`][erdantic.base.Model] adapter subclasses. A concrete `Model`
subclass must be registered for it to be available to the diagram creation workflow."""
def register_model_adapter(type_name: str) -> Callable[[Type[Model]], Type[Model]]:
"""Create decorator to register a concrete [`Model`][erdantic.base.Model] adapter subclass
that will be identified under the key `type_name`. A concrete `Model` subclass must be
registered for it to be available to the diagram creation workflow.
Args:
type_name (str): Key used to identify concrete `Model` adapter subclass
Returns:
Callable[[Type[Model]], Type[Model]]: A registration decorator for a concrete `Model`
adapter subclass
"""
def decorator(cls: type) -> type:
global model_adapter_registry
if not issubclass(cls, Model):
raise ValueError("Only subclasses of Model can be registered.")
model_adapter_registry[type_name] = cls
return cls
return decorator
| 33.167488 | 98 | 0.643844 |
8d5f344c5c4f4bf52110ad4fd6e014af6edb2d68 | 1,277 | py | Python | handlers/gcast.py | BROKENCOBRA/VOICE-CHAT-DIRECT-PLAY | f549495e8ad070636df29a6f80f4adffdd9fb036 | [
"Apache-2.0"
] | null | null | null | handlers/gcast.py | BROKENCOBRA/VOICE-CHAT-DIRECT-PLAY | f549495e8ad070636df29a6f80f4adffdd9fb036 | [
"Apache-2.0"
] | null | null | null | handlers/gcast.py | BROKENCOBRA/VOICE-CHAT-DIRECT-PLAY | f549495e8ad070636df29a6f80f4adffdd9fb036 | [
"Apache-2.0"
] | null | null | null |
import asyncio
from pyrogram import Client, filters
from pyrogram.types import Message
from config import SUDO_USERS
from callsmusic.callsmusic import client as USER
@Client.on_message(filters.command(["broadcast"]))
async def broadcast(_, message: Message):
sent = 0
failed = 0
if message.from_user.id not in SUDO_USERS:
return
else:
wtf = await message.reply("`Starting a broadcast...`")
if not message.reply_to_message:
await wtf.edit("Please Reply to a Message to broadcast!")
return
lmao = message.reply_to_message.text
async for dialog in USER.iter_dialogs():
try:
await USER.send_message(dialog.chat.id, lmao)
sent = sent + 1
await wtf.edit(
f"`broadcasting...` \n\n**Sent to:** `{sent}` Chats \n**Failed in:** {failed} Chats"
)
await asyncio.sleep(3)
except:
failed = failed + 1
# await wtf.edit(f"`broadcasting...` \n\n**Sent to:** `{sent}` Chats \n**Failed in:** {failed} Chats")
await message.reply_text(
f"`Broadcast Finished ` \n\n**Sent to:** `{sent}` Chats \n**Failed in:** {failed} Chats"
)
| 33.605263 | 118 | 0.576351 |
0fdaf5f6da13800f3292a9594607cb058f70e077 | 2,958 | py | Python | code_pack/geometry/obj_processor.py | DedaoLiu/Postprocessing-3D | e89822a1110f20f02b652d5a8a2ef4ec08697aac | [
"MIT"
] | null | null | null | code_pack/geometry/obj_processor.py | DedaoLiu/Postprocessing-3D | e89822a1110f20f02b652d5a8a2ef4ec08697aac | [
"MIT"
] | null | null | null | code_pack/geometry/obj_processor.py | DedaoLiu/Postprocessing-3D | e89822a1110f20f02b652d5a8a2ef4ec08697aac | [
"MIT"
] | 1 | 2020-08-27T17:07:27.000Z | 2020-08-27T17:07:27.000Z | from shutil import copyfile
from math import ceil
import numpy as np
import time
def vertex_on_surface(vertex,nodes,ap):
"""
This function tries to locate the vertex to its closest node.
nodes is a list of coordinates: [[x0,y0,z0],[x1,y1,z1],[x2,y2,z2]]
nodes is sorted in an ascending order.
vertex is a point coordinates: [xa,ya,za]
need to find the closest node to the vertex
:param vertex:
:param nodes:
:return:
"""
# time0 = time.time()
print("checking vertex",vertex)
# nodes.sort()
# matching_point = [None,None,None]
# time1 = time.time()
gap = ap['element_size']*0.001
# time2 = time.time()
# nodes = np.array(nodes)
# time3 = time.time()
vertex = np.array(vertex)
# time4 = time.time()
dst = np.linalg.norm(np.subtract(nodes,vertex),axis=1)
# time5 = time.time()
dst_min = np.amin(dst)
# time6 = time.time()
# dst_min_size = np.linalg.norm(dst_min)
# timers = [time1-time0,time2-time1,time3-time2,time4-time3,time5-time4,time6-time5]
# with open(ap["test_dir_path"]+"timer.txt","a+") as f:
# f.write(str(timers).strip("[").strip("]")+"\n")
if dst_min < gap:
return True
else:
return False
def vertex_on_surface_1(vertex,nodes,ap):
"""
This function tries to locate the vertex to its closest node.
nodes is a list of coordinates: [[x0,y0,z0],[x1,y1,z1],[x2,y2,z2]]
nodes is sorted in an ascending order.
vertex is a point coordinates: [xa,ya,za]
need to find the closest node to the vertex
:param vertex:
:param nodes:
:return:
"""
print("checking vertex",vertex)
# nodes.sort()
# matching_point = [None,None,None]
gap = ap['element_size']*0.001
nodes = np.array(nodes)
vertex = np.array(vertex)
dst = np.linalg.norm(np.subtract(nodes,vertex),axis=1)
dst_min = np.amin(dst)
# dst_min_size = np.linalg.norm(dst_min)
if dst_min < gap:
return True
else:
return False
def add_color(ap,surf_nodes):
surf_nodes = np.array(surf_nodes)
# obj_color_path = ap['test_dir_path']+ap['inp_name']+'c.obj'
# copyfile(ap['obj_path'], obj_color_path)
test_path = ap['test_dir_path']
with open(test_path+ap['inp_name']+'.obj','r') as f:
lines = f.readlines()
f.close()
with open(test_path+ap['inp_name']+'_c.obj','w') as f:
for idx in range(len(lines)):
line = lines[idx]
if line[:2] == "v ":
vertex = line.strip("v").strip().split()
vertex = [float(vertex[0]), float(vertex[1]), float(vertex[2])]
if vertex_on_surface(vertex, surf_nodes, ap):
lines[idx]=line.strip()+" 1 0 0"+"\n"
else:
lines[idx]=line.strip()+" 0 0 0"+"\n"
else:
pass
for line in lines:
f.write(line)
f.close()
| 30.494845 | 88 | 0.588911 |
72479158c4169991579fa9823273482c564c1e60 | 1,929 | py | Python | assignment2.py | seraphinatarrant/edinburgh_coursework | 4d2ddc10204d6605abf755df263c4821bd3344cc | [
"CC0-1.0"
] | null | null | null | assignment2.py | seraphinatarrant/edinburgh_coursework | 4d2ddc10204d6605abf755df263c4821bd3344cc | [
"CC0-1.0"
] | null | null | null | assignment2.py | seraphinatarrant/edinburgh_coursework | 4d2ddc10204d6605abf755df263c4821bd3344cc | [
"CC0-1.0"
] | null | null | null | import argparse
import numpy as np
import spacy
from spacy.language import Language
from spacy.pipeline import Pipe
### GLOBALS ###
# Choices of combination function: average, sum, first, last, maxpool
COMBINATION_FUNCTION = "average"
class ContextualVectors(Pipe):
def __init__(self, nlp):
self._nlp = nlp
self.combination_function = COMBINATION_FUNCTION ### modify this here for different versions of part 3
def __call__(self, doc):
if type(doc) == str:
doc = self._nlp(doc)
self.lengths = doc._.trf_data.align.lengths
self.tensors = doc._.trf_data.tensors
doc.user_token_hooks["vector"] = self.vector
return doc
### HERE is where vectors are set
def vector(self, token):
trf_vector = []
for len_idx in range(self.lengths[token.i]):
try:
trf_vector.append(self.tensors[0][0][token.i+len_idx])
except IndexError:
print("Error")
print(token)
return None
trf_vector = np.array(trf_vector)
return self.combine_vectors(trf_vector)
def combine_vectors(self, trf_vector):
if self.combination_function == "first":
return trf_vector[0]
if self.combination_function == "last":
return trf_vector[-1]
if self.combination_function == "maxpool":
return np.maximum(trf_vector, axis=0)
if self.combination_function == "average":
return np.average(trf_vector, axis=0)
if self.combination_function == "sum":
return np.sum(trf_vector, axis=0)
@Language.factory("trf_vector_hook", assigns=["doc.user_token_hooks"])
def create_contextual_hook(nlp, name):
return ContextualVectors(nlp)
def setup_argparse():
p = argparse.ArgumentParser()
return p.parse_args()
if __name__ == "__main__":
args = setup_argparse()
| 29.676923 | 110 | 0.639191 |
4c3d911ece42b44ade148f99eb3aa2e58eb57f71 | 1,088 | py | Python | cava/nightwatch/indent.py | arthurp/lapis2 | 1c0644cf1eeb2ddc5a735ca002d561e308ed6014 | [
"BSD-2-Clause"
] | null | null | null | cava/nightwatch/indent.py | arthurp/lapis2 | 1c0644cf1eeb2ddc5a735ca002d561e308ed6014 | [
"BSD-2-Clause"
] | null | null | null | cava/nightwatch/indent.py | arthurp/lapis2 | 1c0644cf1eeb2ddc5a735ca002d561e308ed6014 | [
"BSD-2-Clause"
] | null | null | null | import subprocess
_indent_options = ("-nbad -bap -bc -bbo -hnl -br -brs -c50 -cd50 -ncdb -ce -ci4 -cli0 -d0 -di1 -nfc1 "
"-i4 -ip0 -l120 -nlp -npcs -nprs -psl -sai -saf -saw -ncs -nsc -sob -nfca -cp50 -ss "
"-ts8 -il1 -cbi0 -nut".strip().split())
def indent_c(code):
try:
with subprocess.Popen(["indent"] + _indent_options, encoding="utf-8" if hasattr(code, "encode") else None,
stdout=subprocess.PIPE, stdin=subprocess.PIPE) as proc:
stdout, _ = proc.communicate(code)
return stdout
except FileNotFoundError:
# Couldn't find indent, to just continue with raw code
return code
def write_file_c(filename, data, indent=True, filename_prefix=""):
with open(filename_prefix + filename, "w" if hasattr(data, "encode") else "wb") as fi:
fi.write(indent_c(data) if indent else data)
def write_file_py(filename, data, filename_prefix=""):
with open(filename_prefix + filename, "w" if hasattr(data, "encode") else "wb") as fi:
fi.write(data)
| 40.296296 | 114 | 0.617647 |
133aad08134d7a9512553a9292d46e1c2d7253e7 | 260,797 | py | Python | instances/passenger_demand/pas-20210422-1717-int14000000000000001e/39.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210422-1717-int14000000000000001e/39.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210422-1717-int14000000000000001e/39.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 26581
passenger_arriving = (
(7, 9, 6, 6, 5, 3, 4, 0, 7, 0, 0, 2, 0, 10, 9, 1, 7, 8, 3, 1, 2, 1, 3, 2, 1, 0), # 0
(8, 10, 11, 8, 8, 2, 1, 1, 4, 1, 1, 1, 0, 5, 7, 3, 6, 5, 4, 4, 2, 1, 1, 1, 2, 0), # 1
(6, 7, 5, 8, 6, 4, 3, 3, 3, 1, 0, 0, 0, 13, 4, 3, 4, 3, 6, 3, 2, 4, 0, 0, 0, 0), # 2
(9, 9, 8, 8, 8, 1, 4, 4, 4, 3, 2, 2, 0, 5, 3, 9, 6, 7, 9, 8, 4, 4, 0, 0, 0, 0), # 3
(15, 12, 6, 13, 7, 1, 3, 3, 6, 3, 1, 0, 0, 15, 13, 6, 8, 6, 3, 2, 4, 7, 5, 3, 0, 0), # 4
(12, 9, 5, 5, 12, 3, 3, 2, 0, 1, 1, 0, 0, 8, 7, 3, 2, 6, 3, 7, 1, 5, 2, 2, 1, 0), # 5
(10, 6, 4, 11, 8, 3, 2, 2, 3, 3, 0, 0, 0, 12, 10, 9, 4, 9, 3, 5, 5, 4, 3, 3, 1, 0), # 6
(8, 9, 5, 6, 7, 4, 0, 0, 4, 3, 2, 0, 0, 11, 9, 10, 6, 4, 1, 4, 2, 3, 3, 0, 2, 0), # 7
(8, 14, 6, 14, 8, 8, 5, 4, 7, 2, 0, 0, 0, 13, 19, 8, 6, 6, 4, 3, 1, 3, 2, 3, 1, 0), # 8
(9, 8, 8, 12, 6, 2, 4, 5, 5, 1, 3, 1, 0, 10, 16, 9, 6, 13, 4, 1, 3, 3, 4, 1, 0, 0), # 9
(11, 5, 8, 18, 9, 4, 2, 3, 4, 2, 0, 0, 0, 10, 9, 5, 3, 6, 7, 6, 2, 6, 4, 0, 3, 0), # 10
(6, 7, 13, 8, 7, 10, 4, 3, 3, 2, 2, 0, 0, 9, 10, 4, 6, 7, 3, 3, 5, 6, 2, 2, 2, 0), # 11
(6, 11, 14, 9, 9, 1, 1, 2, 5, 5, 2, 0, 0, 9, 11, 8, 10, 9, 9, 6, 7, 5, 2, 2, 2, 0), # 12
(11, 11, 16, 13, 8, 4, 6, 4, 4, 2, 2, 0, 0, 13, 20, 13, 5, 17, 6, 10, 3, 7, 4, 0, 0, 0), # 13
(16, 11, 13, 9, 10, 4, 5, 7, 7, 3, 1, 1, 0, 18, 11, 11, 2, 6, 12, 4, 6, 8, 2, 4, 2, 0), # 14
(11, 13, 12, 20, 13, 2, 6, 6, 5, 4, 2, 0, 0, 15, 14, 12, 5, 15, 10, 6, 7, 5, 1, 0, 1, 0), # 15
(8, 16, 11, 12, 11, 4, 3, 9, 11, 3, 2, 0, 0, 10, 13, 6, 9, 16, 7, 10, 2, 6, 3, 0, 4, 0), # 16
(11, 8, 14, 10, 11, 3, 8, 7, 6, 1, 0, 1, 0, 12, 11, 9, 9, 7, 8, 4, 8, 7, 5, 0, 0, 0), # 17
(20, 13, 9, 6, 12, 5, 9, 3, 12, 7, 3, 0, 0, 15, 10, 12, 11, 4, 9, 6, 2, 5, 6, 3, 2, 0), # 18
(9, 11, 8, 13, 6, 8, 3, 2, 5, 1, 0, 1, 0, 18, 11, 11, 5, 12, 6, 8, 2, 7, 0, 2, 1, 0), # 19
(16, 12, 8, 12, 2, 4, 3, 7, 5, 1, 2, 0, 0, 14, 7, 5, 6, 10, 5, 8, 3, 3, 4, 0, 0, 0), # 20
(17, 17, 16, 12, 8, 4, 4, 7, 4, 3, 2, 2, 0, 12, 19, 11, 15, 9, 4, 4, 5, 9, 3, 3, 1, 0), # 21
(18, 18, 13, 7, 8, 6, 3, 4, 6, 0, 4, 1, 0, 19, 13, 14, 8, 5, 7, 6, 7, 8, 3, 2, 1, 0), # 22
(7, 15, 10, 19, 10, 5, 4, 6, 2, 0, 3, 0, 0, 19, 16, 12, 7, 13, 9, 5, 5, 2, 5, 4, 1, 0), # 23
(16, 10, 13, 12, 10, 6, 4, 2, 5, 0, 0, 1, 0, 11, 15, 16, 12, 14, 11, 7, 3, 4, 5, 3, 1, 0), # 24
(12, 15, 15, 9, 9, 6, 6, 12, 4, 0, 3, 0, 0, 10, 11, 11, 8, 8, 5, 2, 6, 6, 4, 5, 3, 0), # 25
(16, 19, 16, 13, 12, 6, 6, 4, 6, 6, 3, 0, 0, 17, 12, 9, 6, 13, 7, 5, 6, 5, 3, 2, 0, 0), # 26
(16, 17, 8, 6, 14, 10, 3, 15, 5, 2, 1, 1, 0, 7, 10, 10, 7, 13, 7, 5, 5, 4, 3, 4, 1, 0), # 27
(12, 25, 14, 10, 15, 5, 8, 2, 8, 3, 1, 0, 0, 13, 8, 10, 5, 12, 8, 4, 5, 3, 2, 3, 1, 0), # 28
(15, 16, 15, 10, 10, 2, 8, 3, 9, 1, 3, 2, 0, 9, 17, 10, 3, 11, 8, 4, 2, 9, 1, 5, 1, 0), # 29
(23, 13, 16, 15, 9, 0, 4, 3, 3, 2, 0, 2, 0, 23, 12, 5, 8, 6, 10, 11, 4, 7, 6, 3, 1, 0), # 30
(16, 13, 19, 15, 8, 3, 10, 7, 12, 3, 0, 0, 0, 12, 18, 8, 7, 9, 11, 7, 4, 4, 5, 1, 0, 0), # 31
(18, 15, 7, 9, 12, 4, 3, 4, 5, 7, 1, 1, 0, 11, 13, 4, 10, 8, 9, 6, 4, 1, 4, 3, 0, 0), # 32
(17, 12, 17, 16, 10, 7, 8, 7, 3, 2, 2, 1, 0, 8, 15, 9, 5, 13, 10, 4, 2, 8, 4, 5, 1, 0), # 33
(18, 9, 18, 10, 8, 6, 11, 9, 4, 3, 0, 1, 0, 9, 18, 11, 12, 11, 9, 6, 1, 1, 1, 1, 1, 0), # 34
(15, 8, 16, 14, 14, 9, 3, 6, 5, 2, 2, 1, 0, 12, 14, 10, 7, 16, 5, 3, 5, 8, 5, 1, 1, 0), # 35
(9, 9, 11, 9, 15, 10, 3, 4, 4, 3, 1, 0, 0, 12, 15, 10, 8, 9, 3, 6, 1, 4, 4, 2, 0, 0), # 36
(25, 7, 12, 14, 5, 7, 5, 8, 7, 2, 1, 4, 0, 14, 12, 7, 6, 11, 5, 6, 2, 5, 7, 2, 2, 0), # 37
(14, 13, 13, 14, 9, 4, 5, 6, 2, 0, 2, 1, 0, 15, 9, 14, 7, 9, 2, 3, 7, 5, 5, 1, 1, 0), # 38
(7, 11, 16, 17, 13, 11, 5, 6, 7, 3, 1, 1, 0, 25, 8, 13, 9, 4, 3, 2, 1, 6, 2, 3, 2, 0), # 39
(21, 15, 13, 13, 8, 4, 5, 7, 3, 2, 3, 1, 0, 9, 11, 9, 7, 11, 8, 5, 5, 5, 4, 3, 1, 0), # 40
(20, 15, 8, 21, 7, 5, 3, 6, 5, 2, 1, 3, 0, 17, 5, 12, 9, 9, 4, 5, 2, 5, 3, 1, 5, 0), # 41
(11, 13, 11, 15, 11, 4, 5, 4, 3, 1, 2, 0, 0, 17, 13, 7, 9, 7, 4, 4, 5, 3, 6, 4, 0, 0), # 42
(13, 17, 13, 15, 6, 5, 7, 7, 5, 4, 5, 2, 0, 20, 8, 15, 6, 6, 7, 6, 4, 3, 6, 2, 3, 0), # 43
(17, 14, 18, 11, 15, 4, 5, 3, 14, 0, 1, 1, 0, 14, 10, 11, 6, 8, 15, 7, 3, 7, 4, 1, 3, 0), # 44
(20, 14, 10, 14, 9, 4, 8, 5, 4, 1, 1, 1, 0, 14, 13, 14, 3, 13, 8, 6, 1, 6, 3, 2, 1, 0), # 45
(15, 14, 12, 17, 13, 3, 3, 4, 6, 3, 1, 0, 0, 12, 14, 16, 9, 13, 6, 4, 4, 10, 3, 0, 1, 0), # 46
(20, 18, 9, 23, 14, 5, 10, 6, 12, 0, 0, 0, 0, 18, 11, 10, 6, 16, 5, 7, 5, 5, 3, 2, 0, 0), # 47
(14, 9, 11, 19, 11, 7, 7, 6, 6, 2, 1, 1, 0, 15, 13, 7, 8, 9, 9, 5, 5, 6, 3, 1, 5, 0), # 48
(12, 8, 13, 10, 13, 5, 10, 4, 2, 3, 2, 3, 0, 15, 14, 6, 9, 12, 9, 4, 5, 6, 3, 3, 2, 0), # 49
(15, 16, 7, 9, 11, 2, 3, 4, 7, 2, 1, 2, 0, 21, 14, 12, 6, 14, 4, 5, 1, 5, 8, 6, 2, 0), # 50
(18, 12, 12, 12, 10, 8, 4, 4, 2, 3, 5, 2, 0, 21, 16, 11, 10, 15, 9, 7, 5, 5, 3, 4, 1, 0), # 51
(15, 12, 14, 11, 12, 6, 10, 2, 2, 1, 0, 0, 0, 8, 7, 8, 7, 6, 5, 5, 2, 3, 2, 0, 1, 0), # 52
(10, 14, 12, 21, 8, 7, 4, 4, 6, 4, 1, 3, 0, 20, 10, 9, 11, 16, 13, 4, 6, 6, 6, 2, 0, 0), # 53
(17, 11, 12, 11, 12, 3, 6, 5, 4, 2, 1, 1, 0, 13, 17, 14, 7, 13, 3, 11, 3, 5, 3, 0, 1, 0), # 54
(18, 20, 18, 10, 13, 5, 4, 5, 3, 1, 3, 2, 0, 20, 14, 6, 9, 4, 7, 5, 2, 6, 3, 2, 0, 0), # 55
(16, 16, 3, 18, 17, 3, 7, 6, 8, 1, 1, 0, 0, 20, 12, 12, 13, 2, 7, 6, 8, 3, 6, 3, 0, 0), # 56
(14, 14, 14, 11, 10, 4, 7, 4, 7, 3, 0, 0, 0, 12, 16, 11, 8, 13, 4, 7, 4, 7, 3, 3, 2, 0), # 57
(20, 14, 15, 13, 10, 5, 6, 5, 3, 1, 3, 0, 0, 14, 11, 2, 3, 13, 6, 5, 0, 4, 4, 4, 0, 0), # 58
(14, 15, 9, 14, 13, 5, 3, 5, 3, 1, 2, 1, 0, 13, 11, 8, 9, 15, 8, 7, 4, 3, 3, 3, 3, 0), # 59
(11, 14, 8, 10, 12, 5, 9, 3, 8, 3, 2, 0, 0, 11, 9, 8, 6, 9, 5, 8, 4, 3, 1, 1, 1, 0), # 60
(15, 10, 8, 10, 7, 10, 8, 5, 2, 3, 2, 0, 0, 9, 10, 11, 4, 10, 4, 2, 2, 5, 5, 0, 3, 0), # 61
(14, 21, 12, 13, 6, 7, 3, 2, 7, 0, 2, 1, 0, 13, 13, 4, 12, 12, 6, 5, 3, 8, 4, 1, 2, 0), # 62
(12, 11, 7, 9, 7, 6, 12, 5, 4, 3, 2, 0, 0, 7, 11, 8, 9, 9, 1, 6, 4, 5, 5, 2, 1, 0), # 63
(20, 11, 13, 8, 13, 3, 5, 3, 3, 3, 2, 1, 0, 16, 13, 10, 5, 8, 3, 4, 1, 4, 2, 2, 0, 0), # 64
(11, 14, 16, 13, 9, 4, 7, 3, 8, 3, 2, 0, 0, 21, 14, 7, 7, 12, 5, 3, 2, 7, 3, 1, 1, 0), # 65
(18, 7, 19, 12, 7, 5, 8, 7, 5, 2, 6, 1, 0, 24, 13, 9, 10, 19, 3, 9, 4, 6, 6, 1, 0, 0), # 66
(15, 11, 16, 12, 5, 4, 4, 6, 10, 1, 4, 0, 0, 11, 10, 8, 6, 16, 5, 3, 3, 5, 5, 3, 0, 0), # 67
(8, 11, 12, 13, 7, 6, 3, 6, 5, 3, 0, 1, 0, 8, 10, 11, 10, 7, 5, 7, 1, 3, 5, 2, 2, 0), # 68
(19, 10, 15, 10, 14, 7, 7, 1, 4, 2, 3, 2, 0, 16, 16, 4, 12, 15, 3, 5, 2, 5, 7, 2, 1, 0), # 69
(22, 21, 7, 15, 10, 8, 6, 3, 3, 2, 2, 2, 0, 17, 16, 8, 3, 8, 8, 3, 4, 12, 7, 4, 3, 0), # 70
(15, 14, 8, 11, 9, 8, 4, 9, 7, 1, 2, 1, 0, 12, 14, 12, 10, 12, 4, 3, 0, 5, 6, 1, 0, 0), # 71
(16, 12, 9, 12, 12, 6, 5, 8, 6, 2, 1, 2, 0, 14, 13, 7, 10, 14, 4, 4, 5, 4, 2, 3, 3, 0), # 72
(16, 8, 19, 15, 13, 5, 5, 2, 10, 2, 6, 1, 0, 18, 8, 11, 6, 8, 4, 6, 4, 3, 4, 3, 4, 0), # 73
(18, 15, 12, 15, 11, 3, 6, 4, 2, 3, 2, 0, 0, 14, 12, 8, 6, 14, 4, 4, 2, 5, 3, 2, 1, 0), # 74
(17, 13, 15, 8, 7, 6, 11, 4, 6, 3, 3, 1, 0, 12, 13, 11, 11, 7, 3, 4, 1, 11, 2, 4, 1, 0), # 75
(6, 14, 9, 12, 9, 4, 2, 0, 4, 2, 1, 1, 0, 10, 17, 6, 7, 11, 7, 6, 3, 8, 5, 6, 0, 0), # 76
(20, 13, 11, 14, 15, 9, 9, 0, 4, 3, 0, 1, 0, 11, 10, 11, 7, 2, 4, 2, 4, 4, 4, 2, 1, 0), # 77
(12, 7, 17, 14, 13, 6, 5, 5, 5, 1, 2, 3, 0, 11, 16, 9, 6, 7, 4, 6, 2, 5, 6, 5, 0, 0), # 78
(21, 12, 6, 9, 13, 10, 5, 4, 6, 4, 2, 0, 0, 14, 11, 8, 9, 9, 5, 3, 2, 4, 3, 0, 1, 0), # 79
(16, 13, 10, 18, 14, 9, 2, 6, 7, 1, 0, 4, 0, 14, 13, 9, 9, 19, 2, 7, 4, 5, 3, 3, 0, 0), # 80
(13, 12, 11, 9, 13, 6, 3, 2, 4, 3, 0, 3, 0, 12, 14, 10, 5, 13, 5, 5, 7, 7, 3, 4, 0, 0), # 81
(19, 15, 10, 11, 12, 7, 5, 2, 3, 2, 2, 2, 0, 16, 11, 8, 4, 10, 7, 4, 2, 9, 1, 3, 3, 0), # 82
(20, 14, 9, 16, 10, 2, 6, 3, 5, 2, 5, 2, 0, 22, 10, 7, 9, 12, 4, 6, 3, 7, 5, 1, 1, 0), # 83
(12, 14, 15, 9, 5, 4, 6, 5, 7, 3, 1, 0, 0, 14, 11, 11, 7, 6, 5, 5, 4, 5, 3, 0, 2, 0), # 84
(14, 10, 10, 14, 12, 6, 7, 4, 6, 1, 0, 0, 0, 12, 9, 10, 8, 14, 4, 9, 4, 6, 1, 2, 1, 0), # 85
(15, 15, 11, 10, 10, 9, 7, 3, 4, 3, 3, 1, 0, 15, 10, 8, 3, 18, 7, 3, 7, 5, 3, 4, 3, 0), # 86
(9, 15, 13, 12, 9, 3, 5, 3, 8, 0, 1, 1, 0, 12, 12, 7, 10, 17, 8, 3, 1, 2, 3, 3, 0, 0), # 87
(10, 14, 16, 10, 14, 7, 3, 3, 3, 2, 1, 2, 0, 18, 9, 13, 3, 12, 3, 4, 2, 6, 6, 3, 2, 0), # 88
(26, 9, 19, 15, 7, 5, 7, 3, 2, 1, 1, 0, 0, 13, 14, 13, 8, 14, 6, 6, 4, 3, 4, 3, 1, 0), # 89
(11, 12, 13, 16, 9, 6, 4, 1, 6, 2, 0, 2, 0, 15, 9, 11, 2, 7, 8, 2, 2, 9, 5, 2, 3, 0), # 90
(11, 13, 12, 14, 9, 5, 4, 2, 1, 1, 1, 0, 0, 14, 10, 8, 4, 9, 6, 4, 4, 5, 5, 3, 2, 0), # 91
(6, 13, 3, 8, 11, 5, 4, 3, 5, 1, 3, 0, 0, 10, 16, 10, 7, 8, 8, 6, 4, 7, 3, 2, 3, 0), # 92
(15, 13, 11, 12, 5, 5, 5, 2, 3, 3, 1, 1, 0, 15, 13, 9, 9, 9, 6, 8, 7, 9, 3, 1, 2, 0), # 93
(12, 12, 5, 10, 6, 3, 6, 0, 5, 2, 0, 0, 0, 11, 11, 11, 5, 6, 9, 4, 1, 8, 4, 1, 0, 0), # 94
(16, 10, 10, 12, 12, 10, 6, 8, 2, 1, 0, 2, 0, 7, 11, 4, 5, 12, 5, 8, 2, 4, 6, 2, 1, 0), # 95
(11, 11, 11, 13, 6, 3, 6, 3, 5, 0, 2, 4, 0, 16, 13, 8, 7, 17, 6, 2, 4, 2, 6, 2, 1, 0), # 96
(11, 11, 11, 10, 11, 2, 7, 6, 8, 2, 1, 1, 0, 15, 7, 7, 5, 8, 4, 6, 2, 4, 8, 0, 0, 0), # 97
(21, 9, 10, 14, 12, 6, 2, 1, 5, 1, 0, 1, 0, 18, 10, 11, 2, 14, 4, 3, 4, 4, 2, 1, 0, 0), # 98
(11, 9, 8, 12, 10, 4, 4, 3, 4, 3, 1, 0, 0, 12, 10, 8, 7, 15, 10, 7, 2, 9, 6, 3, 3, 0), # 99
(11, 14, 11, 8, 4, 7, 3, 4, 9, 2, 3, 0, 0, 17, 12, 8, 10, 10, 2, 4, 2, 3, 4, 0, 0, 0), # 100
(11, 11, 10, 13, 13, 6, 9, 1, 8, 5, 1, 4, 0, 16, 11, 10, 6, 12, 10, 5, 1, 4, 4, 4, 2, 0), # 101
(16, 5, 14, 12, 21, 7, 8, 6, 4, 4, 1, 0, 0, 16, 9, 5, 5, 8, 4, 4, 1, 4, 3, 0, 0, 0), # 102
(17, 12, 16, 7, 16, 3, 3, 3, 9, 2, 1, 1, 0, 26, 10, 7, 3, 9, 2, 2, 3, 4, 6, 4, 2, 0), # 103
(12, 11, 17, 15, 11, 3, 2, 5, 5, 3, 2, 0, 0, 14, 13, 9, 8, 8, 4, 3, 3, 6, 2, 3, 3, 0), # 104
(12, 18, 8, 16, 12, 0, 4, 4, 2, 1, 1, 2, 0, 14, 9, 10, 6, 9, 8, 6, 8, 5, 4, 5, 0, 0), # 105
(13, 12, 9, 16, 5, 3, 9, 2, 5, 5, 0, 3, 0, 19, 8, 6, 5, 7, 3, 2, 10, 4, 3, 2, 2, 0), # 106
(12, 11, 15, 12, 8, 6, 6, 3, 1, 3, 2, 1, 0, 11, 11, 8, 6, 7, 2, 6, 5, 6, 1, 1, 0, 0), # 107
(9, 6, 7, 14, 10, 5, 3, 2, 12, 1, 1, 0, 0, 9, 5, 8, 5, 10, 7, 2, 5, 7, 5, 1, 0, 0), # 108
(11, 7, 10, 9, 13, 3, 5, 5, 8, 0, 2, 1, 0, 14, 12, 10, 5, 11, 9, 5, 5, 4, 2, 2, 1, 0), # 109
(11, 11, 14, 14, 6, 6, 6, 3, 3, 1, 1, 3, 0, 13, 10, 10, 4, 9, 2, 4, 4, 3, 6, 1, 1, 0), # 110
(14, 8, 13, 7, 12, 2, 2, 3, 3, 1, 1, 1, 0, 13, 13, 17, 6, 10, 4, 4, 5, 4, 5, 2, 0, 0), # 111
(12, 10, 15, 23, 12, 1, 6, 3, 5, 0, 1, 0, 0, 11, 6, 5, 11, 12, 5, 3, 2, 2, 4, 5, 0, 0), # 112
(11, 9, 9, 7, 10, 3, 2, 4, 4, 4, 1, 1, 0, 6, 10, 3, 12, 8, 6, 1, 7, 4, 4, 1, 2, 0), # 113
(10, 12, 9, 10, 7, 3, 4, 2, 4, 2, 0, 1, 0, 10, 9, 6, 8, 16, 4, 6, 2, 2, 4, 7, 0, 0), # 114
(10, 9, 5, 13, 15, 4, 2, 3, 2, 1, 4, 1, 0, 8, 10, 10, 6, 13, 4, 7, 3, 5, 2, 2, 1, 0), # 115
(16, 12, 4, 16, 5, 3, 1, 5, 6, 2, 2, 2, 0, 9, 12, 6, 7, 10, 6, 4, 5, 8, 6, 2, 2, 0), # 116
(17, 13, 12, 7, 7, 8, 4, 2, 4, 2, 2, 2, 0, 11, 11, 3, 6, 9, 4, 3, 1, 4, 5, 4, 0, 0), # 117
(12, 3, 8, 8, 6, 3, 1, 6, 6, 4, 0, 1, 0, 16, 11, 7, 8, 12, 2, 4, 3, 5, 2, 3, 2, 0), # 118
(13, 12, 10, 7, 12, 6, 8, 0, 3, 2, 2, 3, 0, 9, 13, 11, 6, 9, 2, 8, 4, 5, 3, 2, 1, 0), # 119
(8, 10, 7, 11, 16, 3, 3, 2, 2, 3, 3, 2, 0, 8, 8, 2, 6, 18, 6, 4, 5, 6, 6, 1, 1, 0), # 120
(16, 11, 16, 11, 7, 2, 3, 6, 3, 1, 1, 0, 0, 14, 14, 6, 6, 9, 5, 3, 4, 3, 3, 3, 4, 0), # 121
(14, 10, 9, 6, 14, 6, 4, 5, 6, 2, 2, 3, 0, 16, 10, 10, 8, 9, 3, 5, 4, 2, 5, 2, 1, 0), # 122
(17, 11, 18, 10, 6, 2, 3, 3, 1, 2, 1, 1, 0, 11, 9, 8, 3, 11, 5, 4, 3, 3, 5, 0, 1, 0), # 123
(14, 8, 6, 10, 6, 11, 6, 2, 6, 0, 0, 0, 0, 8, 9, 9, 4, 9, 2, 3, 5, 7, 2, 3, 0, 0), # 124
(7, 8, 10, 12, 9, 6, 2, 2, 4, 0, 3, 0, 0, 12, 12, 11, 8, 15, 4, 5, 7, 2, 3, 2, 2, 0), # 125
(10, 11, 6, 11, 4, 6, 5, 3, 4, 0, 4, 0, 0, 7, 13, 9, 8, 6, 4, 2, 5, 0, 4, 2, 1, 0), # 126
(9, 7, 10, 12, 11, 2, 2, 3, 5, 3, 2, 2, 0, 7, 9, 7, 6, 9, 6, 1, 3, 5, 2, 0, 1, 0), # 127
(14, 11, 16, 3, 5, 4, 5, 2, 6, 4, 1, 1, 0, 19, 15, 10, 1, 19, 5, 8, 4, 10, 2, 1, 0, 0), # 128
(8, 3, 16, 10, 3, 4, 8, 4, 4, 1, 3, 2, 0, 6, 11, 18, 3, 10, 5, 3, 3, 8, 2, 0, 0, 0), # 129
(11, 9, 11, 6, 11, 4, 4, 4, 4, 3, 2, 0, 0, 12, 8, 10, 10, 4, 4, 4, 3, 5, 7, 3, 0, 0), # 130
(14, 9, 10, 11, 9, 6, 2, 3, 5, 1, 3, 0, 0, 10, 8, 8, 11, 15, 4, 8, 4, 2, 5, 2, 0, 0), # 131
(11, 2, 12, 10, 11, 8, 2, 5, 7, 4, 1, 2, 0, 12, 10, 7, 6, 8, 2, 4, 4, 5, 1, 1, 1, 0), # 132
(9, 5, 10, 17, 13, 7, 3, 4, 7, 0, 2, 0, 0, 15, 14, 8, 6, 17, 4, 3, 2, 4, 1, 0, 1, 0), # 133
(11, 13, 11, 8, 7, 6, 6, 2, 3, 3, 1, 0, 0, 11, 10, 7, 7, 4, 6, 1, 2, 4, 5, 2, 2, 0), # 134
(8, 12, 21, 5, 11, 6, 6, 6, 8, 4, 4, 2, 0, 10, 6, 12, 4, 12, 4, 2, 5, 5, 4, 2, 0, 0), # 135
(16, 8, 8, 15, 5, 3, 8, 2, 7, 0, 0, 2, 0, 14, 6, 9, 4, 11, 1, 4, 3, 5, 4, 2, 1, 0), # 136
(11, 10, 12, 14, 11, 2, 3, 6, 4, 2, 0, 0, 0, 16, 10, 7, 5, 9, 2, 3, 2, 3, 4, 3, 0, 0), # 137
(17, 6, 7, 6, 9, 4, 2, 2, 5, 0, 3, 1, 0, 8, 12, 7, 7, 10, 4, 2, 3, 2, 2, 1, 2, 0), # 138
(9, 8, 10, 11, 9, 6, 1, 3, 10, 3, 1, 0, 0, 11, 14, 3, 4, 11, 6, 4, 2, 4, 3, 2, 0, 0), # 139
(12, 9, 8, 12, 7, 2, 6, 5, 10, 1, 1, 1, 0, 11, 11, 9, 3, 11, 2, 9, 3, 2, 1, 3, 1, 0), # 140
(9, 7, 11, 10, 7, 7, 2, 9, 5, 1, 1, 0, 0, 13, 5, 8, 6, 7, 7, 2, 5, 5, 6, 1, 2, 0), # 141
(7, 4, 12, 10, 7, 4, 2, 3, 3, 0, 2, 2, 0, 14, 12, 13, 7, 11, 4, 3, 3, 3, 7, 4, 1, 0), # 142
(9, 4, 5, 9, 10, 1, 4, 3, 3, 1, 1, 1, 0, 23, 5, 10, 6, 4, 6, 2, 7, 5, 1, 2, 0, 0), # 143
(11, 5, 9, 10, 8, 6, 5, 2, 5, 1, 2, 0, 0, 15, 12, 3, 9, 9, 5, 4, 3, 3, 2, 3, 0, 0), # 144
(13, 11, 5, 16, 8, 5, 3, 1, 5, 2, 0, 0, 0, 11, 7, 6, 4, 12, 1, 8, 0, 2, 0, 2, 0, 0), # 145
(10, 11, 3, 10, 12, 5, 2, 2, 6, 0, 2, 1, 0, 14, 5, 11, 2, 12, 6, 3, 1, 11, 5, 1, 1, 0), # 146
(12, 11, 7, 11, 7, 3, 2, 2, 3, 1, 0, 0, 0, 8, 17, 9, 3, 12, 7, 9, 0, 3, 6, 0, 2, 0), # 147
(8, 9, 10, 10, 12, 5, 3, 2, 3, 0, 1, 0, 0, 11, 14, 6, 4, 10, 5, 2, 2, 3, 2, 2, 1, 0), # 148
(8, 7, 10, 14, 14, 3, 4, 3, 5, 1, 0, 1, 0, 9, 7, 8, 8, 11, 8, 9, 1, 5, 3, 2, 1, 0), # 149
(11, 9, 6, 11, 6, 4, 4, 2, 4, 2, 1, 0, 0, 18, 12, 9, 3, 4, 4, 0, 4, 2, 4, 1, 1, 0), # 150
(14, 5, 11, 16, 7, 4, 4, 2, 4, 1, 2, 0, 0, 11, 11, 2, 5, 7, 8, 2, 5, 5, 2, 5, 1, 0), # 151
(10, 9, 6, 11, 10, 5, 1, 3, 2, 1, 1, 0, 0, 17, 5, 8, 6, 9, 6, 4, 2, 5, 5, 2, 0, 0), # 152
(15, 6, 12, 7, 10, 1, 5, 2, 7, 3, 0, 2, 0, 11, 13, 1, 4, 11, 6, 4, 5, 6, 3, 2, 0, 0), # 153
(14, 6, 10, 9, 8, 3, 3, 4, 1, 3, 1, 0, 0, 12, 13, 4, 3, 10, 4, 2, 0, 1, 4, 0, 0, 0), # 154
(11, 6, 8, 7, 8, 8, 1, 3, 8, 1, 2, 1, 0, 16, 12, 9, 2, 6, 4, 4, 2, 6, 5, 1, 0, 0), # 155
(13, 10, 16, 12, 11, 2, 4, 2, 7, 0, 1, 3, 0, 7, 9, 8, 9, 7, 5, 4, 3, 6, 2, 1, 0, 0), # 156
(13, 3, 9, 7, 11, 2, 6, 1, 5, 2, 1, 0, 0, 7, 10, 7, 4, 5, 4, 5, 1, 4, 4, 1, 1, 0), # 157
(9, 7, 6, 7, 3, 4, 1, 3, 4, 3, 0, 0, 0, 11, 2, 7, 7, 8, 6, 1, 3, 3, 1, 1, 3, 0), # 158
(14, 5, 12, 13, 8, 2, 4, 3, 0, 3, 2, 0, 0, 16, 8, 4, 4, 6, 2, 2, 2, 3, 4, 5, 0, 0), # 159
(8, 6, 11, 9, 13, 8, 5, 2, 2, 1, 0, 0, 0, 10, 5, 2, 6, 5, 5, 1, 1, 5, 5, 3, 0, 0), # 160
(9, 5, 10, 6, 10, 3, 3, 2, 5, 2, 0, 0, 0, 4, 6, 14, 6, 9, 7, 6, 2, 3, 4, 2, 2, 0), # 161
(9, 6, 9, 14, 5, 2, 6, 5, 5, 0, 1, 1, 0, 11, 6, 7, 4, 6, 7, 2, 0, 4, 0, 5, 3, 0), # 162
(10, 6, 3, 7, 7, 7, 6, 3, 5, 6, 0, 2, 0, 11, 8, 9, 3, 6, 5, 2, 3, 6, 4, 2, 0, 0), # 163
(4, 5, 4, 2, 13, 3, 6, 4, 4, 1, 0, 0, 0, 9, 12, 2, 3, 13, 4, 5, 4, 8, 2, 0, 0, 0), # 164
(9, 7, 10, 6, 11, 3, 3, 3, 7, 0, 1, 1, 0, 10, 10, 4, 3, 6, 1, 1, 1, 1, 3, 2, 0, 0), # 165
(4, 5, 10, 4, 7, 7, 3, 3, 6, 0, 1, 3, 0, 3, 9, 7, 4, 5, 3, 5, 2, 7, 4, 2, 0, 0), # 166
(5, 4, 6, 10, 5, 4, 4, 2, 4, 0, 2, 0, 0, 6, 6, 14, 5, 8, 4, 1, 3, 8, 3, 1, 2, 0), # 167
(11, 9, 6, 8, 8, 6, 2, 2, 3, 1, 2, 1, 0, 11, 12, 9, 2, 4, 3, 0, 5, 4, 0, 0, 0, 0), # 168
(11, 6, 5, 3, 11, 0, 1, 4, 4, 0, 0, 0, 0, 11, 10, 7, 2, 8, 3, 3, 3, 2, 5, 0, 0, 0), # 169
(6, 3, 7, 10, 9, 4, 3, 3, 5, 1, 0, 2, 0, 10, 6, 3, 4, 4, 5, 3, 2, 1, 1, 2, 1, 0), # 170
(9, 9, 10, 8, 3, 1, 1, 4, 0, 0, 1, 0, 0, 8, 10, 3, 3, 9, 4, 2, 5, 3, 3, 1, 0, 0), # 171
(4, 5, 5, 7, 8, 2, 4, 0, 6, 0, 0, 0, 0, 7, 8, 6, 2, 9, 4, 1, 1, 1, 0, 0, 0, 0), # 172
(4, 3, 4, 8, 4, 2, 4, 0, 3, 0, 0, 0, 0, 6, 7, 4, 0, 5, 2, 2, 4, 2, 1, 2, 2, 0), # 173
(11, 7, 2, 13, 7, 6, 1, 1, 2, 2, 2, 0, 0, 6, 4, 2, 6, 6, 6, 1, 2, 6, 2, 1, 0, 0), # 174
(5, 5, 10, 9, 1, 2, 0, 1, 4, 0, 3, 0, 0, 5, 3, 2, 4, 5, 6, 3, 0, 4, 4, 1, 1, 0), # 175
(7, 1, 3, 5, 7, 2, 0, 1, 3, 1, 1, 1, 0, 5, 1, 10, 5, 3, 5, 1, 2, 2, 0, 1, 0, 0), # 176
(6, 7, 3, 10, 1, 4, 3, 0, 3, 3, 2, 0, 0, 8, 3, 3, 5, 2, 3, 1, 3, 6, 1, 0, 1, 0), # 177
(4, 3, 5, 3, 5, 2, 0, 3, 6, 0, 0, 0, 0, 9, 4, 4, 3, 4, 1, 3, 3, 4, 1, 1, 1, 0), # 178
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 179
)
station_arriving_intensity = (
(7.029211809720476, 7.735403983570434, 7.29579652145751, 8.700534883408807, 7.776559850653457, 4.394116904852274, 5.804449861523481, 6.514446642171193, 8.52613868703521, 5.541221021731318, 5.887371229439844, 6.857081109628643, 7.117432297609708), # 0
(7.496058012827964, 8.246084971802663, 7.777485227862214, 9.275201954587263, 8.291486472463932, 4.684377017659578, 6.187256517769172, 6.943319212067992, 9.089143456866074, 5.90657296918801, 6.2763345903385845, 7.309703325140097, 7.587708306415797), # 1
(7.9614122125716245, 8.754739239247371, 8.257259199766379, 9.847582786530712, 8.804548163249642, 4.9734791603174235, 6.568545911144986, 7.370475347066188, 9.64990152962857, 6.270479285028765, 6.663752408286839, 7.760525712874277, 8.056110759493567), # 2
(8.423460910405188, 9.259348702711026, 8.733215217047796, 10.415406970544904, 9.313726346402664, 5.260276871619158, 6.946805098307138, 7.79422162049231, 10.206189225289531, 6.631495777796654, 7.0480877765583365, 8.207759958902646, 8.520781928755916), # 3
(8.880390607782374, 9.757895279000085, 9.203450059584252, 10.976404097935598, 9.81700244531509, 5.543623690358135, 7.320521135911843, 8.212864605672882, 10.75578286381579, 6.988178256034751, 7.4278037884268056, 8.64961774929667, 8.979864086115745), # 4
(9.330387806156915, 10.248360884921025, 9.666060507253526, 11.528303760008551, 10.312357883378994, 5.822373155327701, 7.688181080615314, 8.62471087593443, 11.296458765174183, 7.339082528286129, 7.801363537165986, 9.084310770127807, 9.43149950348596), # 5
(9.771639006982534, 10.728727437280302, 10.119143339933412, 12.068835548069513, 10.79777408398646, 6.09537880532121, 8.048271989073768, 9.028067004603484, 11.825993249331543, 7.682764403093862, 8.167230116049597, 9.510050707467531, 9.87383045277945), # 6
(10.202330711712957, 11.196976852884385, 10.56079533750169, 12.595729053424249, 11.271232470529577, 6.36149417913201, 8.39928091794342, 9.421239565006573, 12.342162636254702, 8.017779689001022, 8.523866618351377, 9.925049247387301, 10.304999205909127), # 7
(10.62064942180191, 11.651091048539739, 10.989113279836156, 13.1067138673785, 11.730714466400421, 6.619572815553446, 8.739694923880478, 9.802535130470215, 12.842743245910489, 8.342684194550685, 8.86973613734505, 10.327518075958585, 10.723148034787885), # 8
(11.02478163870312, 12.089051941052832, 11.402193946814586, 13.599519581238038, 12.174201494991074, 6.868468253378878, 9.068001063541168, 10.170260274320949, 13.325511398265744, 8.65603372828592, 9.20330176630435, 10.71566887925284, 11.126419211328628), # 9
(11.412913863870306, 12.508841447230123, 11.798134118314776, 14.071875786308604, 12.599674979693622, 7.107034031401651, 9.382686393581697, 10.522721569885295, 13.7882434132873, 8.956384098749801, 9.523026598503003, 11.087713343341534, 11.512955007444255), # 10
(11.783232598757209, 12.90844148387809, 12.175030574214501, 14.521512073895957, 13.005116343900148, 7.334123688415116, 9.682237970658283, 10.85822559048978, 14.228715610941991, 9.242291114485408, 9.82737372721475, 11.441863154296136, 11.880897695047656), # 11
(12.133924344817538, 13.285833967803178, 12.530980094391557, 14.946158035305858, 13.38850701100273, 7.5485907632126175, 9.965142851427137, 11.17507890946093, 14.644704311196652, 9.512310584035802, 10.114806245713309, 11.776329998188096, 12.22838954605175), # 12
(12.463175603505027, 13.639000815811869, 12.864079458723728, 15.343543261844063, 13.747828404393443, 7.749288794587514, 10.22988809254448, 11.471588100125276, 15.033985834018106, 9.764998315944066, 10.383787247272418, 12.08932556108889, 12.55357283236943), # 13
(12.769172876273403, 13.965923944710624, 13.172425447088806, 15.71139734481631, 14.081061947464386, 7.935071321333148, 10.474960750666526, 11.746059735809345, 15.39433649937319, 9.998910118753269, 10.6327798251658, 12.379061529069986, 12.85458982591359), # 14
(13.050102664576398, 14.264585271305906, 13.45411483936456, 16.047449875528383, 14.386189063607633, 8.104791882242878, 10.698847882449478, 11.99680038983966, 15.723532627228748, 10.212601801006487, 10.860247072667189, 12.64374958820284, 13.129582798597134), # 15
(13.30415146986772, 14.532966712404187, 13.707244415428796, 16.349430445286004, 14.661191176215267, 8.257304016110044, 10.900036544549568, 12.222116635542745, 16.019350537551603, 10.404629171246796, 11.06465208305032, 12.881601424558916, 13.376694022332964), # 16
(13.529505793601107, 14.769050184811926, 13.929910955159293, 16.61506864539496, 14.904049708679375, 8.391461261728, 11.077013793622996, 12.420315046245145, 16.27956655030858, 10.573548038017254, 11.24445794958892, 13.090828724209679, 13.594065769033982), # 17
(13.724352137230287, 14.970817605335585, 14.120211238433834, 16.842094067160993, 15.112746084392025, 8.506117157890104, 11.228266686325993, 12.589702195273366, 16.501956985466535, 10.717914209860952, 11.398127765556712, 13.269643173226603, 13.779840310613086), # 18
(13.88687700220898, 15.136250890781643, 14.27624204513021, 17.02823630188984, 15.285261726745313, 8.600125243389693, 11.352282279314753, 12.728584655953943, 16.68429816299229, 10.83628349532096, 11.52412462422743, 13.416256457681136, 13.932159918983176), # 19
(14.015266889990915, 15.263331957956549, 14.396100155126206, 17.171224940887296, 15.419578059131322, 8.672339057020126, 11.44754762924551, 12.835269001613405, 16.82436640285268, 10.927211702940342, 11.62091161887481, 13.528880263644748, 14.049166866057154), # 20
(14.107708302029813, 15.350042723666784, 14.477882348299607, 17.26878957545908, 15.513676504942126, 8.72161213757475, 11.512549792774463, 12.908061805578273, 16.91993802501453, 10.989254641262178, 11.686951842772585, 13.60572627718891, 14.12900342374791), # 21
(14.162387739779412, 15.394365104718803, 14.5196854045282, 17.31865979691097, 15.565538487569807, 8.746798023846914, 11.54577582655784, 12.945269641175082, 16.968789349444684, 11.02096811882954, 11.720708389194478, 13.645006184385087, 14.16981186396836), # 22
(14.182550708679697, 15.39961303155007, 14.524892455418383, 17.324903137860087, 15.578824878445637, 8.75, 11.549725603163076, 12.949291358024693, 16.974896728395063, 11.024709181527207, 11.724941252026436, 13.649856607224509, 14.175), # 23
(14.197417378247815, 15.396551851851854, 14.524040740740743, 17.324134722222226, 15.586350659060795, 8.75, 11.547555337690634, 12.943700000000002, 16.974078333333335, 11.02241086419753, 11.724474410774413, 13.648720987654322, 14.175), # 24
(14.211970122296213, 15.390517832647463, 14.522359396433473, 17.322614454732513, 15.593710923832306, 8.75, 11.543278463648836, 12.932716049382718, 16.97246141975309, 11.01788637402835, 11.723548759196907, 13.646479195244629, 14.175), # 25
(14.226207826667249, 15.381603155006863, 14.519871467764064, 17.320359619341563, 15.600905415789548, 8.75, 11.53696140563221, 12.916546913580248, 16.97006672839506, 11.011210992226795, 11.722172677391198, 13.643161957018751, 14.175), # 26
(14.240129377203292, 15.3699, 14.5166, 17.3173875, 15.607933877961901, 8.75, 11.528670588235297, 12.895400000000002, 16.966915, 11.00246, 11.720354545454546, 13.638800000000003, 14.175), # 27
(14.253733659746702, 15.355500548696845, 14.51256803840878, 17.313715380658437, 15.614796053378763, 8.75, 11.518472436052612, 12.869482716049385, 16.963026975308644, 10.9917086785551, 11.718102743484225, 13.633424051211708, 14.175), # 28
(14.26701956013985, 15.338496982167355, 14.50779862825789, 17.30936054526749, 15.62149168506951, 8.75, 11.506433373678693, 12.839002469135803, 16.95842339506173, 10.979032309099225, 11.715425651577503, 13.627064837677183, 14.175), # 29
(14.279985964225098, 15.318981481481483, 14.502314814814815, 17.30434027777778, 15.628020516063533, 8.75, 11.492619825708061, 12.804166666666665, 16.953125, 10.964506172839508, 11.71233164983165, 13.619753086419752, 14.175), # 30
(14.292631757844802, 15.297046227709194, 14.496139643347053, 17.29867186213992, 15.634382289390214, 8.75, 11.477098216735257, 12.765182716049384, 16.947152530864198, 10.948205550983083, 11.708829118343933, 13.611519524462738, 14.175), # 31
(14.304955826841338, 15.27278340192044, 14.489296159122084, 17.29237258230453, 15.640576748078935, 8.75, 11.4599349713548, 12.72225802469136, 16.940526728395064, 10.930205724737084, 11.704926437211622, 13.602394878829449, 14.175), # 32
(14.316957057057056, 15.246285185185185, 14.481807407407409, 17.28545972222222, 15.646603635159089, 8.75, 11.441196514161222, 12.675600000000001, 16.933268333333334, 10.910581975308643, 11.700631986531986, 13.59240987654321, 14.175), # 33
(14.328634334334335, 15.217643758573388, 14.473696433470508, 17.27795056584362, 15.652462693660054, 8.75, 11.420949269749054, 12.625416049382716, 16.925398086419758, 10.889409583904893, 11.695954146402293, 13.581595244627344, 14.175), # 34
(14.339986544515531, 15.186951303155007, 14.464986282578877, 17.26986239711934, 15.65815366661122, 8.75, 11.399259662712824, 12.571913580246914, 16.916936728395065, 10.866763831732968, 11.690901296919815, 13.569981710105168, 14.175), # 35
(14.35101257344301, 15.1543, 14.455700000000002, 17.2612125, 15.663676297041972, 8.75, 11.37619411764706, 12.515300000000002, 16.907905, 10.84272, 11.685481818181819, 13.557600000000003, 14.175), # 36
(14.361711306959135, 15.119782030178326, 14.445860631001374, 17.252018158436215, 15.669030327981691, 8.75, 11.351819059146292, 12.455782716049384, 16.89832364197531, 10.817353369913125, 11.679704090285574, 13.544480841335163, 14.175), # 37
(14.372081630906267, 15.083489574759948, 14.43549122085048, 17.242296656378603, 15.674215502459768, 8.75, 11.326200911805053, 12.393569135802473, 16.88821339506173, 10.790739222679472, 11.673576493328346, 13.530654961133976, 14.175), # 38
(14.382122431126781, 15.045514814814815, 14.424614814814818, 17.232065277777778, 15.679231563505585, 8.75, 11.299406100217867, 12.328866666666666, 16.877595000000003, 10.762952839506175, 11.667107407407409, 13.516153086419752, 14.175), # 39
(14.39183259346303, 15.005949931412895, 14.413254458161866, 17.221341306584364, 15.684078254148528, 8.75, 11.271501048979264, 12.261882716049385, 16.866489197530868, 10.734069501600368, 11.660305212620028, 13.501005944215823, 14.175), # 40
(14.40121100375738, 14.964887105624143, 14.401433196159124, 17.210142026748972, 15.688755317417984, 8.75, 11.242552182683774, 12.192824691358027, 16.85491672839506, 10.704164490169182, 11.653178289063476, 13.485244261545498, 14.175), # 41
(14.410256547852201, 14.922418518518521, 14.389174074074077, 17.198484722222226, 15.693262496343333, 8.75, 11.212625925925927, 12.121900000000002, 16.842898333333338, 10.673313086419753, 11.645735016835017, 13.4688987654321, 14.175), # 42
(14.418968111589852, 14.878636351165984, 14.376500137174213, 17.186386676954736, 15.697599533953966, 8.75, 11.181788703300251, 12.049316049382718, 16.83045475308642, 10.641590571559215, 11.637983776031925, 13.452000182898951, 14.175), # 43
(14.427344580812699, 14.83363278463649, 14.363434430727025, 17.173865174897124, 15.701766173279264, 8.75, 11.150106939401276, 11.975280246913583, 16.817606728395063, 10.609072226794698, 11.629932946751465, 13.434579240969367, 14.175), # 44
(14.435384841363105, 14.787500000000001, 14.350000000000001, 17.160937500000003, 15.705762157348616, 8.75, 11.11764705882353, 11.9, 16.804375, 10.575833333333335, 11.62159090909091, 13.416666666666666, 14.175), # 45
(14.443087779083434, 14.740330178326476, 14.336219890260631, 17.147620936213993, 15.709587229191404, 8.75, 11.084475486161544, 11.823682716049385, 16.790780308641974, 10.541949172382258, 11.612966043147525, 13.398293187014175, 14.175), # 46
(14.45045227981605, 14.692215500685872, 14.322117146776408, 17.133932767489714, 15.713241131837016, 8.75, 11.050658646009847, 11.746535802469136, 16.776843395061732, 10.507495025148607, 11.604066729018582, 13.37948952903521, 14.175), # 47
(14.457477229403315, 14.64324814814815, 14.307714814814817, 17.11989027777778, 15.716723608314837, 8.75, 11.016262962962964, 11.668766666666668, 16.762585, 10.472546172839506, 11.594901346801347, 13.360286419753088, 14.175), # 48
(14.464161513687602, 14.593520301783265, 14.29303593964335, 17.10551075102881, 15.720034401654251, 8.75, 10.981354861615428, 11.590582716049383, 16.748025864197533, 10.437177896662096, 11.585478276593093, 13.340714586191131, 14.175), # 49
(14.470504018511264, 14.543124142661183, 14.278103566529495, 17.090811471193415, 15.723173254884642, 8.75, 10.94600076656177, 11.512191358024692, 16.73318672839506, 10.401465477823503, 11.575805898491085, 13.32080475537266, 14.175), # 50
(14.476503629716676, 14.492151851851853, 14.262940740740742, 17.075809722222225, 15.726139911035398, 8.75, 10.910267102396515, 11.433800000000002, 16.718088333333338, 10.365484197530865, 11.565892592592595, 13.30058765432099, 14.175), # 51
(14.482159233146191, 14.440695610425243, 14.247570507544584, 17.060522788065846, 15.728934113135901, 8.75, 10.874220293714194, 11.355616049382716, 16.70275141975309, 10.329309336991313, 11.555746738994888, 13.280094010059445, 14.175), # 52
(14.487469714642183, 14.388847599451307, 14.232015912208508, 17.0449679526749, 15.731555604215542, 8.75, 10.837926765109337, 11.277846913580248, 16.687196728395065, 10.293016177411982, 11.545376717795238, 13.259354549611341, 14.175), # 53
(14.492433960047004, 14.336700000000002, 14.2163, 17.0291625, 15.734004127303704, 8.75, 10.801452941176471, 11.2007, 16.671445000000002, 10.256680000000001, 11.534790909090908, 13.2384, 14.175), # 54
(14.497050855203032, 14.284344993141291, 14.200445816186559, 17.01312371399177, 15.736279425429768, 8.75, 10.764865246510128, 11.124382716049384, 16.655516975308643, 10.220376085962506, 11.523997692979176, 13.217261088248744, 14.175), # 55
(14.501319285952622, 14.231874759945132, 14.184476406035667, 16.996868878600825, 15.738381241623124, 8.75, 10.728230105704835, 11.049102469135804, 16.63943339506173, 10.184179716506632, 11.513005449557303, 13.195968541380887, 14.175), # 56
(14.505238138138138, 14.179381481481483, 14.168414814814819, 16.98041527777778, 15.740309318913155, 8.75, 10.69161394335512, 10.975066666666669, 16.623215000000002, 10.148166172839508, 11.50182255892256, 13.174553086419753, 14.175), # 57
(14.508806297601952, 14.126957338820304, 14.152284087791497, 16.96378019547325, 15.742063400329245, 8.75, 10.655083184055517, 10.902482716049382, 16.606882530864198, 10.112410736168268, 11.490457401172218, 13.153045450388662, 14.175), # 58
(14.51202265018642, 14.07469451303155, 14.136107270233198, 16.946980915637862, 15.743643228900785, 8.75, 10.61870425240055, 10.83155802469136, 16.590456728395065, 10.076988687700048, 11.478918356403542, 13.131476360310929, 14.175), # 59
(14.51488608173391, 14.022685185185187, 14.119907407407407, 16.930034722222224, 15.745048547657152, 8.75, 10.582543572984749, 10.762500000000001, 16.573958333333337, 10.041975308641977, 11.467213804713806, 13.109876543209879, 14.175), # 60
(14.517395478086781, 13.971021536351168, 14.10370754458162, 16.912958899176957, 15.746279099627737, 8.75, 10.546667570402647, 10.695516049382718, 16.557408086419755, 10.00744588020119, 11.455352126200275, 13.088276726108827, 14.175), # 61
(14.519549725087407, 13.919795747599453, 14.087530727023323, 16.89577073045268, 15.74733462784193, 8.75, 10.51114266924877, 10.630813580246915, 16.540826728395064, 9.973475683584821, 11.44334170096022, 13.066707636031095, 14.175), # 62
(14.521347708578144, 13.869100000000001, 14.071400000000002, 16.878487500000002, 15.7482148753291, 8.75, 10.476035294117647, 10.568600000000002, 16.524235, 9.94014, 11.43119090909091, 13.045200000000001, 14.175), # 63
(14.522788314401359, 13.819026474622772, 14.05533840877915, 16.86112649176955, 15.74891958511865, 8.75, 10.44141186960381, 10.509082716049384, 16.50765364197531, 9.907514110653864, 11.41890813068961, 13.023784545038868, 14.175), # 64
(14.523870428399414, 13.769667352537724, 14.03936899862826, 16.843704989711934, 15.749448500239955, 8.75, 10.407338820301785, 10.45246913580247, 16.49110339506173, 9.875673296753543, 11.4065017458536, 13.00249199817101, 14.175), # 65
(14.524592936414676, 13.721114814814818, 14.023514814814817, 16.826240277777778, 15.749801363722403, 8.75, 10.373882570806101, 10.398966666666668, 16.474605000000004, 9.844692839506173, 11.393980134680135, 12.981353086419755, 14.175), # 66
(14.524954724289511, 13.673461042524005, 14.00779890260631, 16.808749639917696, 15.749977918595382, 8.75, 10.341109545711289, 10.348782716049385, 16.458179197530864, 9.814648020118886, 11.381351677266494, 12.960398536808412, 14.175), # 67
(14.524708260273156, 13.626548095048452, 13.99216832990398, 16.7910984366613, 15.749829137416285, 8.74983761621704, 10.308921272761506, 10.301681390032009, 16.44172298811157, 9.785468618306034, 11.368400383956526, 12.939542030659641, 14.174825210048013), # 68
(14.522398389694043, 13.578943727598569, 13.976183796296295, 16.772396920289854, 15.748474945533768, 8.748553909465022, 10.27637545388526, 10.25513827160494, 16.424516975308645, 9.756328946986201, 11.35380797448166, 12.918106562703056, 14.17344039351852), # 69
(14.517840102582454, 13.5304294437807, 13.95977580589849, 16.752521973966722, 15.74579903978052, 8.746025758268557, 10.243324188385918, 10.208733424782809, 16.40646404892547, 9.727087334247829, 11.337408441136512, 12.895991865809934, 14.170705268347055), # 70
(14.511097524900102, 13.481034236028144, 13.942950120027435, 16.731502905260335, 15.74183531025579, 8.742294131992075, 10.209782323354585, 10.162482213077277, 16.387591095107457, 9.697744503079695, 11.319262319097408, 12.873214112097802, 14.166655842764062), # 71
(14.502234782608697, 13.430787096774193, 13.9257125, 16.709369021739132, 15.736617647058825, 8.737400000000001, 10.175764705882354, 10.1164, 16.367925000000003, 9.668301176470589, 11.299430143540672, 12.849789473684211, 14.161328125), # 72
(14.491316001669949, 13.379717018452144, 13.90806870713306, 16.686149630971553, 15.730179940288872, 8.73138433165676, 10.141286183060329, 10.070502149062644, 16.347492649748517, 9.63875807740929, 11.277972449642624, 12.825734122686688, 14.154758123285324), # 73
(14.478405308045566, 13.32785299349529, 13.890024502743485, 16.661874040526033, 15.722556080045187, 8.72428809632678, 10.106361601979613, 10.024804023776863, 16.3263209304984, 9.609115928884586, 11.254949772579598, 12.801064231222776, 14.146981845850483), # 74
(14.463566827697262, 13.275224014336917, 13.871585648148148, 16.636571557971017, 15.713779956427018, 8.716152263374488, 10.0710058097313, 9.979320987654322, 16.30443672839506, 9.579375453885259, 11.23042264752791, 12.775795971410007, 14.138035300925928), # 75
(14.44686468658675, 13.22185907341033, 13.852757904663925, 16.610271490874936, 15.703885459533609, 8.707017802164305, 10.035233653406493, 9.934068404206677, 16.281866929583906, 9.549537375400092, 11.20445160966389, 12.749945515365916, 14.127954496742113), # 76
(14.428363010675731, 13.167787163148816, 13.833547033607681, 16.583003146806227, 15.692906479464213, 8.696925682060662, 9.999059980096293, 9.88906163694559, 16.258638420210335, 9.519602416417872, 11.177097194163862, 12.723529035208049, 14.116775441529496), # 77
(14.408125925925928, 13.113037275985667, 13.813958796296298, 16.554795833333333, 15.680876906318085, 8.685916872427983, 9.962499636891796, 9.844316049382718, 16.23477808641975, 9.489571299927379, 11.148419936204148, 12.696562703053933, 14.10453414351852), # 78
(14.386217558299041, 13.057638404354178, 13.793998954046641, 16.525678858024694, 15.667830630194468, 8.674032342630696, 9.925567470884102, 9.799847005029722, 16.210312814357568, 9.4594447489174, 11.118480370961072, 12.669062691021107, 14.091266610939643), # 79
(14.362702033756786, 13.001619540687642, 13.773673268175584, 16.495681528448742, 15.653801541192612, 8.661313062033226, 9.888278329164315, 9.755669867398264, 16.185269490169183, 9.429223486376719, 11.087339033610965, 12.64104517122711, 14.07700885202332), # 80
(14.337643478260873, 12.945009677419357, 13.752987500000001, 16.464833152173917, 15.638823529411765, 8.6478, 9.85064705882353, 9.711800000000002, 16.159675, 9.398908235294119, 11.055056459330146, 12.612526315789475, 14.061796875), # 81
(14.311106017773009, 12.887837806982612, 13.731947410836765, 16.433163036768654, 15.622930484951183, 8.633534125895444, 9.812688506952853, 9.668252766346594, 16.133556229995428, 9.368499718658382, 11.02169318329494, 12.583522296825743, 14.045666688100141), # 82
(14.283153778254908, 12.8301329218107, 13.710558762002744, 16.400700489801395, 15.606156297910111, 8.618556409083983, 9.774417520643375, 9.625043529949703, 16.10694006630087, 9.337998659458297, 10.987309740681672, 12.554049286453447, 14.028654299554185), # 83
(14.253850885668278, 12.77192401433692, 13.688827314814816, 16.36747481884058, 15.588534858387801, 8.602907818930042, 9.735848946986202, 9.582187654320988, 16.07985339506173, 9.307405780682645, 10.951966666666667, 12.524123456790125, 14.010795717592593), # 84
(14.223261465974833, 12.713240076994557, 13.666758830589849, 16.333515331454645, 15.5701000564835, 8.58662932479805, 9.696997633072435, 9.53970050297211, 16.05232310242341, 9.276721805320209, 10.915724496426252, 12.493760979953313, 13.992126950445819), # 85
(14.191449645136279, 12.654110102216913, 13.644359070644722, 16.298851335212028, 15.550885782296458, 8.569761896052432, 9.65787842599317, 9.497597439414724, 16.024376074531325, 9.245947456359774, 10.878643765136749, 12.462978028060553, 13.97268400634431), # 86
(14.15847954911433, 12.594563082437277, 13.621633796296296, 16.26351213768116, 15.53092592592593, 8.552346502057613, 9.618506172839506, 9.455893827160494, 15.996039197530868, 9.215083456790124, 10.840785007974482, 12.43179077322937, 13.95250289351852), # 87
(14.124415303870702, 12.534628010088941, 13.598588768861456, 16.22752704643049, 15.510254377471155, 8.534424112178023, 9.578895720702548, 9.414605029721079, 15.967339357567447, 9.184130529600042, 10.802208760115779, 12.400215387577312, 13.931619620198905), # 88
(14.089321035367092, 12.474333877605204, 13.575229749657066, 16.19092536902845, 15.488905027031391, 8.516035695778085, 9.539061916673392, 9.37374641060814, 15.938303440786468, 9.153089397778317, 10.762975556736963, 12.36826804322191, 13.910070194615912), # 89
(14.053260869565218, 12.413709677419357, 13.551562500000001, 16.153736413043482, 15.466911764705886, 8.497222222222224, 9.499019607843138, 9.333333333333334, 15.908958333333336, 9.121960784313726, 10.723145933014354, 12.335964912280703, 13.887890625), # 90
(14.016298932426789, 12.352784401964689, 13.527592781207133, 16.11598948604402, 15.444308480593882, 8.478024660874867, 9.458783641302887, 9.293381161408323, 15.879330921353455, 9.090745412195057, 10.682780424124285, 12.303322166871226, 13.865116919581618), # 91
(13.978499349913523, 12.2915870436745, 13.503326354595337, 16.0777138955985, 15.421129064794641, 8.458483981100443, 9.418368864143739, 9.253905258344766, 15.84944809099223, 9.059444004411093, 10.641939565243074, 12.270355979111017, 13.841785086591221), # 92
(13.939926247987117, 12.230146594982081, 13.478768981481483, 16.038938949275366, 15.397407407407409, 8.438641152263374, 9.37779012345679, 9.214920987654322, 15.819336728395063, 9.028057283950616, 10.600683891547051, 12.23708252111761, 13.81793113425926), # 93
(13.900643752609293, 12.168492048320722, 13.453926423182445, 15.999693954643051, 15.37317739853143, 8.418537143728091, 9.337062266333147, 9.176443712848654, 15.789023719707364, 8.996585973802416, 10.559073938212535, 12.203517965008546, 13.793591070816188), # 94
(13.860715989741754, 12.106652396123724, 13.42880444101509, 15.960008219269996, 15.34847292826596, 8.398212924859017, 9.296200139863902, 9.138488797439416, 15.758535951074533, 8.96503079695527, 10.517170240415854, 12.169678482901354, 13.768800904492457), # 95
(13.820207085346219, 12.044656630824377, 13.403408796296299, 15.91991105072464, 15.32332788671024, 8.377709465020576, 9.25521859114016, 9.101071604938273, 15.727900308641976, 8.933392476397968, 10.475033333333334, 12.135580246913582, 13.74359664351852), # 96
(13.779181165384388, 11.98253374485597, 13.377745250342937, 15.879431756575416, 15.297776163963531, 8.357067733577198, 9.21413246725302, 9.064207498856883, 15.6971436785551, 8.901671735119288, 10.432723752141296, 12.101239429162758, 13.718014296124831), # 97
(13.737702355817978, 11.9203127306518, 13.35181956447188, 15.83859964439077, 15.271851650125074, 8.336328699893311, 9.17295661529358, 9.027911842706905, 15.666292946959304, 8.86986929610802, 10.390302032016068, 12.066672201766417, 13.69208987054184), # 98
(13.695834782608697, 11.858022580645162, 13.325637500000003, 15.797444021739132, 15.24558823529412, 8.315533333333335, 9.131705882352943, 8.9922, 15.635375000000002, 8.83798588235294, 10.347828708133973, 12.031894736842107, 13.665859375000002), # 99
(13.653642571718258, 11.795692287269347, 13.29920481824417, 15.755994196188944, 15.21901980956992, 8.294722603261699, 9.090395115522204, 8.957087334247829, 15.60441672382259, 8.806022216842843, 10.305364315671335, 11.996923206507354, 13.639358817729768), # 100
(13.611189849108369, 11.733350842957654, 13.272527280521263, 15.714279475308645, 15.192180263051725, 8.273937479042829, 9.049039161892468, 8.922589208962048, 15.573445004572475, 8.773979022566504, 10.262969389804478, 11.961773782879694, 13.612624206961591), # 101
(13.568540740740744, 11.67102724014337, 13.245610648148148, 15.67232916666667, 15.165103485838781, 8.253218930041154, 9.00765286855483, 8.888720987654322, 15.542486728395062, 8.741857022512711, 10.22070446570973, 11.926462638076675, 13.585691550925928), # 102
(13.525759372577088, 11.60875047125979, 13.218460682441702, 15.630172577831457, 15.137823368030341, 8.232607925621096, 8.966251082600394, 8.855498033836307, 15.511568781435757, 8.709656939670245, 10.178630078563414, 11.891005944215824, 13.558596857853223), # 103
(13.482909870579116, 11.546549528740211, 13.191083144718794, 15.587839016371445, 15.110373799725652, 8.212145435147082, 8.924848651120257, 8.822935711019662, 15.480718049839965, 8.677379497027893, 10.13680676354185, 11.855419873414677, 13.53137613597394), # 104
(13.440056360708535, 11.484453405017922, 13.163483796296298, 15.545357789855073, 15.082788671023966, 8.19187242798354, 8.883460421205521, 8.79104938271605, 15.449961419753087, 8.64502541757444, 10.095295055821373, 11.819720597790775, 13.50406539351852), # 105
(13.39726296892706, 11.42249109252622, 13.135668398491084, 15.50275820585078, 15.055101872024531, 8.171829873494895, 8.842101239947283, 8.759854412437129, 15.41932577732053, 8.612595424298663, 10.054155490578298, 11.783924289461654, 13.476700638717421), # 106
(13.3545938211964, 11.360691583698395, 13.10764271262003, 15.460069571927, 15.027347292826596, 8.152058741045574, 8.800785954436646, 8.72936616369456, 15.388838008687703, 8.580090240189355, 10.013448602988953, 11.748047120544847, 13.449317879801098), # 107
(13.312113043478263, 11.299083870967744, 13.079412500000002, 15.417321195652177, 14.999558823529412, 8.132600000000002, 8.759529411764706, 8.699600000000002, 15.358525000000002, 8.547510588235296, 9.973234928229665, 11.712105263157897, 13.421953125000002), # 108
(13.26988476173436, 11.237696946767558, 13.050983521947876, 15.374542384594738, 14.97177035423223, 8.113494619722603, 8.718346459022568, 8.670571284865114, 15.328413637402836, 8.514857191425268, 9.933575001476758, 11.676114889418335, 13.394642382544584), # 109
(13.227973101926404, 11.176559803531132, 13.022361539780524, 15.331762446323136, 14.944015775034297, 8.094783569577809, 8.677251943301325, 8.642295381801555, 15.29853080704161, 8.482130772748057, 9.894529357906551, 11.640092171443701, 13.367421660665297), # 110
(13.186442190016104, 11.11570143369176, 12.993552314814819, 15.2890106884058, 14.91632897603486, 8.076507818930043, 8.636260711692085, 8.614787654320988, 15.26890339506173, 8.449332055192448, 9.856158532695375, 11.60405328135153, 13.340326967592594), # 111
(13.14535615196517, 11.055150829682729, 12.96456160836763, 15.246316418411165, 14.888743847333174, 8.05870833714373, 8.595387611285942, 8.588063465935072, 15.239558287608595, 8.416461761747223, 9.818523061019553, 11.568014391259355, 13.313394311556928), # 112
(13.104705913184263, 10.995038066300333, 12.935464959552897, 15.203767435488858, 14.861245952243188, 8.04141767690032, 8.554736349119478, 8.562193596292849, 15.21059793576207, 8.383626631257822, 9.781693468614014, 11.5320701111062, 13.286621461180511), # 113
(13.064073257060091, 10.935956056935751, 12.906663945030267, 15.161705189788272, 14.833550696392859, 8.024596451941862, 8.514825491774811, 8.537495763307168, 15.182466649998286, 8.351441235077896, 9.745742071958476, 11.496677040958165, 13.25978557982405), # 114
(13.023338864205595, 10.877926078156266, 12.878175705790246, 15.120118307254492, 14.805570749044042, 8.008200917498272, 8.475683510268187, 8.513963715990194, 15.155174970136306, 8.319955459183308, 9.710616315997932, 11.461852615582393, 13.232809284324528), # 115
(12.982451822532688, 10.820863593808383, 12.849945065977423, 15.078932610372966, 14.777263936937292, 7.992192428201937, 8.43724674453905, 8.491532438058591, 15.128653874918964, 8.289110701829367, 9.676248303780074, 11.427532476482286, 13.205650163658248), # 116
(12.941361219953283, 10.76468406773861, 12.82191684973638, 15.038073921629142, 14.748588086813156, 7.976532338685248, 8.399451534526854, 8.47013691322902, 15.102834343089086, 8.258848361271381, 9.642570138352598, 11.39365226516125, 13.178265806801516), # 117
(12.900016144379297, 10.709302963793455, 12.794035881211714, 14.997468063508467, 14.71950102541218, 7.9611820035805945, 8.362234220171041, 8.449712125218136, 15.07764735338951, 8.229109835764664, 9.609513922763194, 11.36014762312269, 13.150613802730636), # 118
(12.858365683722639, 10.654635745819421, 12.766246984548014, 14.95704085849639, 14.689960579474912, 7.946102777520366, 8.325531141411059, 8.430193057742605, 15.053023884563062, 8.199836523564521, 9.577011760059559, 11.326954191870009, 13.122651740421906), # 119
(12.816358925895228, 10.600597877663022, 12.738494983889867, 14.916718129078353, 14.659924575741897, 7.931256015136952, 8.289278638186355, 8.41151469451908, 15.028894915352582, 8.170969822926269, 9.544995753289383, 11.294007612906617, 13.094337208851638), # 120
(12.773944958808976, 10.547104823170763, 12.710724703381864, 14.876425697739808, 14.629350840953688, 7.9166030710627435, 8.253413050436373, 8.39361201926423, 15.0051914245009, 8.142451132105215, 9.513398005500363, 11.261243527735912, 13.065627796996127), # 121
(12.731072870375797, 10.494072046189146, 12.682880967168597, 14.836089386966199, 14.598197201850828, 7.902105299930128, 8.217870718100565, 8.376420015694709, 14.981844390750846, 8.11422184935667, 9.482150619740192, 11.228597577861303, 13.036481093831679), # 122
(12.687691748507607, 10.441415010564684, 12.65490859939465, 14.795635019242972, 14.56642148517387, 7.887724056371495, 8.182587981118376, 8.359873667527177, 14.958784792845258, 8.086223372935942, 9.451185699056563, 11.19600540478619, 13.0068546883346), # 123
(12.643750681116316, 10.389049180143882, 12.62675242420462, 14.754988417055582, 14.533981517663353, 7.873420695019235, 8.147501179429248, 8.343907958478297, 14.935943609526962, 8.058397101098347, 9.420435346497168, 11.163402650013985, 12.976706169481197), # 124
(12.599198756113843, 10.33689001877325, 12.598357265743093, 14.714075402889465, 14.500835126059833, 7.859156570505739, 8.112546652972636, 8.328457872264728, 14.913251819538791, 8.030684432099187, 9.389831665109703, 11.130724955048088, 12.94599312624776), # 125
(12.553985061412101, 10.284852990299292, 12.56966794815466, 14.672821799230077, 14.466940137103851, 7.844893037463395, 8.077660741687978, 8.31345839260313, 14.890640401623585, 8.00302676419378, 9.359306757941859, 11.097907961391908, 12.91467314761061), # 126
(12.508058684923006, 10.232853558568515, 12.540629295583907, 14.63115342856286, 14.432254377535958, 7.830591450524592, 8.042779785514732, 8.298844503210164, 14.86804033452417, 7.975365495637434, 9.32879272804133, 11.064887310548842, 12.88270382254604), # 127
(12.461368714558466, 10.18080718742743, 12.51118613217543, 14.588996113373266, 14.396735674096707, 7.816213164321722, 8.007840124392336, 8.284551187802489, 14.845382596983379, 7.947642024685458, 9.298221678455814, 11.031598644022305, 12.850042740030352), # 128
(12.413864238230394, 10.128629340722538, 12.481283282073816, 14.546275676146736, 14.360341853526638, 7.801719533487173, 7.972778098260239, 8.270513430096765, 14.822598167744045, 7.919797749593164, 9.267525712233, 10.997977603315691, 12.816647489039854), # 129
(12.365494343850713, 10.076235482300353, 12.450865569423652, 14.502917939368722, 14.3230307425663, 7.7870719126533325, 7.937530047057888, 8.256666213809652, 14.799618025549002, 7.89177406861586, 9.236636932420582, 10.963959829932413, 12.78247565855085), # 130
(12.316208119331334, 10.023541076007378, 12.419877818369534, 14.458848725524668, 14.284760167956243, 7.772231656452593, 7.902032310724733, 8.24294452265781, 14.776373149141081, 7.86351238000886, 9.205487442066255, 10.929480965375875, 12.747484837539638), # 131
(12.265954652584163, 9.970461585690122, 12.388264853056045, 14.413993857100023, 14.245487956437017, 7.757160119517344, 7.8662212292002165, 8.229283340357902, 14.752794517263117, 7.834954082027471, 9.17400934421771, 10.894476651149478, 12.711632614982527), # 132
(12.21468303152113, 9.91691247519509, 12.355971497627777, 14.368279156580234, 14.205171934749162, 7.741818656479974, 7.830033142423786, 8.215617650626585, 14.728813108657938, 7.806040572927006, 9.142134741922645, 10.85888252875663, 12.674876579855821), # 133
(12.162342344054133, 9.862809208368793, 12.322942576229327, 14.321630446450746, 14.163769929633231, 7.726168621972872, 7.79340439033489, 8.201882437180522, 14.704359902068381, 7.776713250962773, 9.109795738228751, 10.822634239700733, 12.637174321135817), # 134
(12.108881678095097, 9.808067249057736, 12.289122913005274, 14.273973549197011, 14.12123976782977, 7.710171370628429, 7.756271312872975, 8.18801268373637, 14.679365876237274, 7.746913514390087, 9.07692443618372, 10.785667425485194, 12.59848342779883), # 135
(12.05425012155593, 9.752602061108423, 12.254457332100213, 14.225234287304469, 14.077539276079325, 7.693788257079036, 7.718570249977489, 8.173943374010788, 14.65376200990745, 7.716582761464252, 9.043452938835248, 10.747917727613418, 12.558761488821151), # 136
(11.998396762348548, 9.696329108367367, 12.218890657658735, 14.175338483258576, 14.032626281122448, 7.6769806359570785, 7.6802375415878785, 8.159609491720442, 14.627479281821747, 7.685662390440583, 9.009313349231029, 10.709320787588808, 12.517966093179089), # 137
(11.941270688384867, 9.639163854681073, 12.182367713825425, 14.12421195954477, 13.986458609699687, 7.6597098618949495, 7.6412095276435865, 8.144946020581987, 14.600448670722995, 7.654093799574386, 8.974437770418753, 10.66981224691477, 12.476054829848946), # 138
(11.882820987576796, 9.581021763896047, 12.144833324744877, 14.071780538648504, 13.938994088551583, 7.641937289525037, 7.601422548084064, 8.129887944312085, 14.572601155354022, 7.621818387120976, 8.938758305446116, 10.62932774709471, 12.432985287807028), # 139
(11.822996747836257, 9.521818299858795, 12.106232314561684, 14.017970043055223, 13.890190544418692, 7.623624273479732, 7.560812942848756, 8.114370246627395, 14.543867714457667, 7.588777551335661, 8.902207057360812, 10.58780292963203, 12.38871505602964), # 140
(11.761747057075162, 9.46146892641583, 12.066509507420426, 13.962706295250376, 13.840005804041555, 7.604732168391422, 7.519317051877113, 8.09832791124458, 14.514179326776754, 7.554912690473753, 8.864716129210535, 10.545173436030137, 12.34320172349308), # 141
(11.69902100320542, 9.399889107413653, 12.0256097274657, 13.90591511771941, 13.788397694160723, 7.585222328892499, 7.476871215108577, 8.081695921880296, 14.48346697105412, 7.52016520279056, 8.826217624042977, 10.501374907792433, 12.296402879173653), # 142
(11.634767674138946, 9.336994306698774, 11.983477798842097, 13.847522332947767, 13.735324041516742, 7.56505610961535, 7.4334117724825965, 8.064409262251205, 14.451661626032607, 7.484476486541395, 8.786643644905832, 10.456342986422326, 12.248276112047666), # 143
(11.56893615778766, 9.2726999881177, 11.9400585456942, 13.787453763420901, 13.680742672850162, 7.544194865192366, 7.3888750639386185, 8.04640291607397, 14.418694270455035, 7.4477879399815645, 8.745926294846791, 10.41001331342322, 12.198779011091421), # 144
(11.501475542063469, 9.20692161551694, 11.895296792166606, 13.725635231624254, 13.624611414901528, 7.5225999502559375, 7.343197429416091, 8.027611867065247, 14.384495883064238, 7.410040961366383, 8.703997676913554, 10.36232153029852, 12.14786916528122), # 145
(11.432334914878291, 9.139574652742999, 11.849137362403903, 13.661992560043277, 13.566888094411391, 7.500232719438453, 7.2963152088544625, 8.007971098941699, 14.34899744260305, 7.37117694895116, 8.660789894153808, 10.313203278551628, 12.095504163593366), # 146
(11.361463364144042, 9.070574563642383, 11.801525080550675, 13.596451571163414, 13.507530538120294, 7.477054527372301, 7.2481647421931745, 7.987415595419982, 14.312129927814308, 7.331137300991204, 8.616235049615252, 10.262594199685955, 12.041641595004167), # 147
(11.288809977772631, 8.999836812061604, 11.752404770751518, 13.528938087470117, 13.446496572768787, 7.453026728689875, 7.198682369371678, 7.965880340216761, 14.273824317440841, 7.289863415741826, 8.570265246345576, 10.210429935204898, 11.986239048489919), # 148
(11.214323843675977, 8.927276861847163, 11.701721257151021, 13.459377931448826, 13.38374402509742, 7.42811067802356, 7.147804430329418, 7.943300317048694, 14.234011590225474, 7.247296691458339, 8.522812587392474, 10.156646126611868, 11.929254113026934), # 149
(11.137954049765991, 8.852810176845571, 11.649419363893772, 13.387696925584994, 13.319230721846738, 7.402267730005749, 7.0954672650058415, 7.91961050963244, 14.192622724911054, 7.2033785263960475, 8.473809175803641, 10.101178415410269, 11.870644377591507), # 150
(11.059649683954586, 8.776352220903336, 11.59544391512436, 13.313820892364063, 13.252914489757288, 7.375459239268828, 7.041607213340397, 7.8947459016846615, 14.149588700240406, 7.15805031881027, 8.423187114626767, 10.043962443103501, 11.810367431159946), # 151
(10.979359834153682, 8.697818457866962, 11.539739734987382, 13.237675654271488, 13.184753155569618, 7.34764656044519, 6.986160615272531, 7.8686414769220185, 14.10484049495636, 7.11125346695631, 8.37087850690955, 9.984933851194974, 11.748380862708558), # 152
(10.897033588275185, 8.61712435158296, 11.482251647627416, 13.159187033792707, 13.11470454602428, 7.318791048167222, 6.929063810741687, 7.841232219061167, 14.058309087801755, 7.062929369089481, 8.316815455699683, 9.92402828118809, 11.68464226121364), # 153
(10.81262003423102, 8.534185365897834, 11.422924477189063, 13.078280853413174, 13.042726487861813, 7.288854057067317, 6.87025313968732, 7.8124531118187726, 14.009925457519413, 7.013019423465095, 8.260930064044857, 9.861181374586256, 11.6191092156515), # 154
(10.72606825993309, 8.448916964658093, 11.361703047816906, 12.99488293561833, 12.968776807822776, 7.257796941777861, 6.809664942048866, 7.782239138911491, 13.95962058285218, 6.9614650283384565, 8.203154434992767, 9.796328772892876, 11.551739314998438), # 155
(10.637327353293314, 8.361234611710243, 11.298532183655539, 12.908919102893627, 12.892813332647707, 7.225581056931246, 6.74723555776578, 7.750525284055986, 13.907325442542877, 6.9082075819648825, 8.143420671591107, 9.729406117611353, 11.48249014823076), # 156
(10.546346402223609, 8.271053770900794, 11.233356708849547, 12.820315177724513, 12.81479388907716, 7.19216775715986, 6.6829013267775075, 7.717246530968915, 13.852971015334345, 6.853188482599679, 8.08166087688757, 9.660349050245092, 11.411319304324769), # 157
(10.450553324967336, 8.176634369081162, 11.163028735463298, 12.725677414311741, 12.731153548219398, 7.155434266843955, 6.615149409299001, 7.680115733289122, 13.792326928238738, 6.794712282807602, 8.01583405355452, 9.586639389872076, 11.335080203181485), # 158
(10.335201473769764, 8.06829144743927, 11.069432945764184, 12.605568022303835, 12.62126783369428, 7.103165507209945, 6.535497868740003, 7.626098945870136, 13.700998165711002, 6.723193391738244, 7.934383709866593, 9.493907533156353, 11.235598705688274), # 159
(10.198820932866035, 7.945135419957, 10.950689341138245, 12.458008514572404, 12.482988183885514, 7.034077814466758, 6.443141247737298, 7.553838865338286, 13.576395318120113, 6.637687912608051, 7.8361633120533565, 9.380702728442985, 11.110988852451014), # 160
(10.042510876420344, 7.8079692153126565, 10.808065760674433, 12.28440150525942, 12.317750373994958, 6.94900813819844, 6.338754024409627, 7.464240746353693, 13.420161673798626, 6.5389214704393135, 7.7220383164395905, 9.248074456470599, 10.962523662746737), # 161
(9.8673704785969, 7.657595762184535, 10.642830043461695, 12.086149608506858, 12.126990179224487, 6.848793427989039, 6.223010676875733, 7.358209843576484, 13.233940521079093, 6.427619690254325, 7.592874179350069, 9.09707219797781, 10.791476155852466), # 162
(9.674498913559898, 7.494817989250934, 10.456250028588983, 11.864655438456708, 11.912143374775964, 6.734270633422602, 6.096585683254362, 7.2366514116667755, 13.019375148294069, 6.304508197075376, 7.449536357109572, 8.928745433703247, 10.599119351045232), # 163
(9.464995355473539, 7.320438825190149, 10.249593555145248, 11.621321609250947, 11.674645735851264, 6.606276704083181, 5.960153521664253, 7.100470705284697, 12.778108843776113, 6.170312615924756, 7.292890306042875, 8.744143644385526, 10.386726267602059), # 164
(9.239958978502024, 7.135261198680485, 10.024128462219437, 11.357550735031554, 11.415933037652254, 6.465648589554821, 5.814388670224151, 6.950572979090365, 12.511784895857772, 6.02575857182476, 7.123801482474756, 8.544316310763268, 10.155569924799979), # 165
(9.000488956809557, 6.940088038400237, 9.7811225889005, 11.074745429940503, 11.137441055380801, 6.313223239421572, 5.659965607052801, 6.787863487743908, 12.222046592871603, 5.871571689797677, 6.943135342729992, 8.330312913575103, 9.906923341916015), # 166
(8.747684464560333, 6.735722273027703, 9.521843774277388, 10.774308308119782, 10.840605564238773, 6.149837603267482, 5.497558810268945, 6.613247485905448, 11.91053722315016, 5.7084775948658, 6.751757343133359, 8.103182933559642, 9.642059538227196), # 167
(8.482644675918554, 6.52296683124118, 9.247559857439049, 10.457641983711365, 10.526862339428039, 5.9763286306765995, 5.327842757991326, 6.427630228235103, 11.578900075025999, 5.5372019120514215, 6.550532940009634, 7.863975851455517, 9.362251533010546), # 168
(8.206468765048422, 6.302624641718972, 8.959538677474432, 10.126149070857236, 10.197647156150468, 5.793533271232973, 5.151491928338689, 6.231916969393004, 11.228778436831673, 5.358470266376831, 6.3403275896835956, 7.613741148001342, 9.0687723455431), # 169
(7.9202559061141375, 6.0754986331393726, 8.659048073472489, 9.781232183699368, 9.854395789607928, 5.60228847452065, 4.9691807994297745, 6.027012964039266, 10.861815596899735, 5.173008282864322, 6.122006748480023, 7.353528303935743, 8.762894995101878), # 170
(7.6251052732799005, 5.842391734180682, 8.34735588452217, 9.424293936379751, 9.498544015002288, 5.403431190123678, 4.781583849383328, 5.813823466834017, 10.47965484356274, 4.981541586536184, 5.896435872723688, 7.0843867999973416, 8.445892500963913), # 171
(7.322116040709912, 5.604106873521197, 8.025729949712423, 9.056736943040356, 9.131527607535416, 5.197798367626108, 4.5893755563180925, 5.593253732437379, 10.083939465153241, 4.784795802414712, 5.664480418739371, 6.80736611692476, 8.119037882406225), # 172
(7.012387382568372, 5.3614469798392195, 7.695438108132197, 8.679963817823166, 8.754782342409182, 4.9862269566119855, 4.39323039835281, 5.366209015509473, 9.676312750003792, 4.583496555522195, 5.427005842851849, 6.523515735456615, 7.783604158705848), # 173
(6.697018473019482, 5.115214981813045, 7.357748198870443, 8.295377174870158, 8.369743994825454, 4.76955390666536, 4.193822853606226, 5.133594570710425, 9.25841798644695, 4.3783694708809255, 5.1848776013858995, 6.233885136331535, 7.440864349139807), # 174
(6.377108486227438, 4.866213808120973, 7.013928061016112, 7.904379628323315, 7.977848339986097, 4.54861616737028, 3.9918274001970815, 4.896315652700355, 8.831898462815268, 4.170140173513194, 4.938961150666297, 5.939523800288141, 7.092091472985131), # 175
(6.053756596356447, 4.615246387441302, 6.66524553365815, 7.508373792324615, 7.580531153092983, 4.324250688310793, 3.787918516244121, 4.655277516139389, 8.3983974674413, 3.959534288441294, 4.690121947017822, 5.641481208065051, 6.738558549518844), # 176
(5.7280619775707065, 4.363115648452332, 6.3129684558855095, 7.108762281016037, 7.179228209347984, 4.097294419070949, 3.582770679866088, 4.411385415687646, 7.959558288657599, 3.7472774406875144, 4.43922544676525, 5.340806840400891, 6.381538598017975), # 177
(5.401123804034416, 4.11062451983236, 5.95836466678714, 6.7069477085395635, 6.775375283952959, 3.8685843092347962, 3.3770583691817246, 4.165544606005252, 7.51702421479672, 3.5340952552741505, 4.187137106233358, 5.038550178034279, 6.022304637759553), # 178
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 179
)
passenger_arriving_acc = (
(7, 9, 6, 6, 5, 3, 4, 0, 7, 0, 0, 2, 0, 10, 9, 1, 7, 8, 3, 1, 2, 1, 3, 2, 1, 0), # 0
(15, 19, 17, 14, 13, 5, 5, 1, 11, 1, 1, 3, 0, 15, 16, 4, 13, 13, 7, 5, 4, 2, 4, 3, 3, 0), # 1
(21, 26, 22, 22, 19, 9, 8, 4, 14, 2, 1, 3, 0, 28, 20, 7, 17, 16, 13, 8, 6, 6, 4, 3, 3, 0), # 2
(30, 35, 30, 30, 27, 10, 12, 8, 18, 5, 3, 5, 0, 33, 23, 16, 23, 23, 22, 16, 10, 10, 4, 3, 3, 0), # 3
(45, 47, 36, 43, 34, 11, 15, 11, 24, 8, 4, 5, 0, 48, 36, 22, 31, 29, 25, 18, 14, 17, 9, 6, 3, 0), # 4
(57, 56, 41, 48, 46, 14, 18, 13, 24, 9, 5, 5, 0, 56, 43, 25, 33, 35, 28, 25, 15, 22, 11, 8, 4, 0), # 5
(67, 62, 45, 59, 54, 17, 20, 15, 27, 12, 5, 5, 0, 68, 53, 34, 37, 44, 31, 30, 20, 26, 14, 11, 5, 0), # 6
(75, 71, 50, 65, 61, 21, 20, 15, 31, 15, 7, 5, 0, 79, 62, 44, 43, 48, 32, 34, 22, 29, 17, 11, 7, 0), # 7
(83, 85, 56, 79, 69, 29, 25, 19, 38, 17, 7, 5, 0, 92, 81, 52, 49, 54, 36, 37, 23, 32, 19, 14, 8, 0), # 8
(92, 93, 64, 91, 75, 31, 29, 24, 43, 18, 10, 6, 0, 102, 97, 61, 55, 67, 40, 38, 26, 35, 23, 15, 8, 0), # 9
(103, 98, 72, 109, 84, 35, 31, 27, 47, 20, 10, 6, 0, 112, 106, 66, 58, 73, 47, 44, 28, 41, 27, 15, 11, 0), # 10
(109, 105, 85, 117, 91, 45, 35, 30, 50, 22, 12, 6, 0, 121, 116, 70, 64, 80, 50, 47, 33, 47, 29, 17, 13, 0), # 11
(115, 116, 99, 126, 100, 46, 36, 32, 55, 27, 14, 6, 0, 130, 127, 78, 74, 89, 59, 53, 40, 52, 31, 19, 15, 0), # 12
(126, 127, 115, 139, 108, 50, 42, 36, 59, 29, 16, 6, 0, 143, 147, 91, 79, 106, 65, 63, 43, 59, 35, 19, 15, 0), # 13
(142, 138, 128, 148, 118, 54, 47, 43, 66, 32, 17, 7, 0, 161, 158, 102, 81, 112, 77, 67, 49, 67, 37, 23, 17, 0), # 14
(153, 151, 140, 168, 131, 56, 53, 49, 71, 36, 19, 7, 0, 176, 172, 114, 86, 127, 87, 73, 56, 72, 38, 23, 18, 0), # 15
(161, 167, 151, 180, 142, 60, 56, 58, 82, 39, 21, 7, 0, 186, 185, 120, 95, 143, 94, 83, 58, 78, 41, 23, 22, 0), # 16
(172, 175, 165, 190, 153, 63, 64, 65, 88, 40, 21, 8, 0, 198, 196, 129, 104, 150, 102, 87, 66, 85, 46, 23, 22, 0), # 17
(192, 188, 174, 196, 165, 68, 73, 68, 100, 47, 24, 8, 0, 213, 206, 141, 115, 154, 111, 93, 68, 90, 52, 26, 24, 0), # 18
(201, 199, 182, 209, 171, 76, 76, 70, 105, 48, 24, 9, 0, 231, 217, 152, 120, 166, 117, 101, 70, 97, 52, 28, 25, 0), # 19
(217, 211, 190, 221, 173, 80, 79, 77, 110, 49, 26, 9, 0, 245, 224, 157, 126, 176, 122, 109, 73, 100, 56, 28, 25, 0), # 20
(234, 228, 206, 233, 181, 84, 83, 84, 114, 52, 28, 11, 0, 257, 243, 168, 141, 185, 126, 113, 78, 109, 59, 31, 26, 0), # 21
(252, 246, 219, 240, 189, 90, 86, 88, 120, 52, 32, 12, 0, 276, 256, 182, 149, 190, 133, 119, 85, 117, 62, 33, 27, 0), # 22
(259, 261, 229, 259, 199, 95, 90, 94, 122, 52, 35, 12, 0, 295, 272, 194, 156, 203, 142, 124, 90, 119, 67, 37, 28, 0), # 23
(275, 271, 242, 271, 209, 101, 94, 96, 127, 52, 35, 13, 0, 306, 287, 210, 168, 217, 153, 131, 93, 123, 72, 40, 29, 0), # 24
(287, 286, 257, 280, 218, 107, 100, 108, 131, 52, 38, 13, 0, 316, 298, 221, 176, 225, 158, 133, 99, 129, 76, 45, 32, 0), # 25
(303, 305, 273, 293, 230, 113, 106, 112, 137, 58, 41, 13, 0, 333, 310, 230, 182, 238, 165, 138, 105, 134, 79, 47, 32, 0), # 26
(319, 322, 281, 299, 244, 123, 109, 127, 142, 60, 42, 14, 0, 340, 320, 240, 189, 251, 172, 143, 110, 138, 82, 51, 33, 0), # 27
(331, 347, 295, 309, 259, 128, 117, 129, 150, 63, 43, 14, 0, 353, 328, 250, 194, 263, 180, 147, 115, 141, 84, 54, 34, 0), # 28
(346, 363, 310, 319, 269, 130, 125, 132, 159, 64, 46, 16, 0, 362, 345, 260, 197, 274, 188, 151, 117, 150, 85, 59, 35, 0), # 29
(369, 376, 326, 334, 278, 130, 129, 135, 162, 66, 46, 18, 0, 385, 357, 265, 205, 280, 198, 162, 121, 157, 91, 62, 36, 0), # 30
(385, 389, 345, 349, 286, 133, 139, 142, 174, 69, 46, 18, 0, 397, 375, 273, 212, 289, 209, 169, 125, 161, 96, 63, 36, 0), # 31
(403, 404, 352, 358, 298, 137, 142, 146, 179, 76, 47, 19, 0, 408, 388, 277, 222, 297, 218, 175, 129, 162, 100, 66, 36, 0), # 32
(420, 416, 369, 374, 308, 144, 150, 153, 182, 78, 49, 20, 0, 416, 403, 286, 227, 310, 228, 179, 131, 170, 104, 71, 37, 0), # 33
(438, 425, 387, 384, 316, 150, 161, 162, 186, 81, 49, 21, 0, 425, 421, 297, 239, 321, 237, 185, 132, 171, 105, 72, 38, 0), # 34
(453, 433, 403, 398, 330, 159, 164, 168, 191, 83, 51, 22, 0, 437, 435, 307, 246, 337, 242, 188, 137, 179, 110, 73, 39, 0), # 35
(462, 442, 414, 407, 345, 169, 167, 172, 195, 86, 52, 22, 0, 449, 450, 317, 254, 346, 245, 194, 138, 183, 114, 75, 39, 0), # 36
(487, 449, 426, 421, 350, 176, 172, 180, 202, 88, 53, 26, 0, 463, 462, 324, 260, 357, 250, 200, 140, 188, 121, 77, 41, 0), # 37
(501, 462, 439, 435, 359, 180, 177, 186, 204, 88, 55, 27, 0, 478, 471, 338, 267, 366, 252, 203, 147, 193, 126, 78, 42, 0), # 38
(508, 473, 455, 452, 372, 191, 182, 192, 211, 91, 56, 28, 0, 503, 479, 351, 276, 370, 255, 205, 148, 199, 128, 81, 44, 0), # 39
(529, 488, 468, 465, 380, 195, 187, 199, 214, 93, 59, 29, 0, 512, 490, 360, 283, 381, 263, 210, 153, 204, 132, 84, 45, 0), # 40
(549, 503, 476, 486, 387, 200, 190, 205, 219, 95, 60, 32, 0, 529, 495, 372, 292, 390, 267, 215, 155, 209, 135, 85, 50, 0), # 41
(560, 516, 487, 501, 398, 204, 195, 209, 222, 96, 62, 32, 0, 546, 508, 379, 301, 397, 271, 219, 160, 212, 141, 89, 50, 0), # 42
(573, 533, 500, 516, 404, 209, 202, 216, 227, 100, 67, 34, 0, 566, 516, 394, 307, 403, 278, 225, 164, 215, 147, 91, 53, 0), # 43
(590, 547, 518, 527, 419, 213, 207, 219, 241, 100, 68, 35, 0, 580, 526, 405, 313, 411, 293, 232, 167, 222, 151, 92, 56, 0), # 44
(610, 561, 528, 541, 428, 217, 215, 224, 245, 101, 69, 36, 0, 594, 539, 419, 316, 424, 301, 238, 168, 228, 154, 94, 57, 0), # 45
(625, 575, 540, 558, 441, 220, 218, 228, 251, 104, 70, 36, 0, 606, 553, 435, 325, 437, 307, 242, 172, 238, 157, 94, 58, 0), # 46
(645, 593, 549, 581, 455, 225, 228, 234, 263, 104, 70, 36, 0, 624, 564, 445, 331, 453, 312, 249, 177, 243, 160, 96, 58, 0), # 47
(659, 602, 560, 600, 466, 232, 235, 240, 269, 106, 71, 37, 0, 639, 577, 452, 339, 462, 321, 254, 182, 249, 163, 97, 63, 0), # 48
(671, 610, 573, 610, 479, 237, 245, 244, 271, 109, 73, 40, 0, 654, 591, 458, 348, 474, 330, 258, 187, 255, 166, 100, 65, 0), # 49
(686, 626, 580, 619, 490, 239, 248, 248, 278, 111, 74, 42, 0, 675, 605, 470, 354, 488, 334, 263, 188, 260, 174, 106, 67, 0), # 50
(704, 638, 592, 631, 500, 247, 252, 252, 280, 114, 79, 44, 0, 696, 621, 481, 364, 503, 343, 270, 193, 265, 177, 110, 68, 0), # 51
(719, 650, 606, 642, 512, 253, 262, 254, 282, 115, 79, 44, 0, 704, 628, 489, 371, 509, 348, 275, 195, 268, 179, 110, 69, 0), # 52
(729, 664, 618, 663, 520, 260, 266, 258, 288, 119, 80, 47, 0, 724, 638, 498, 382, 525, 361, 279, 201, 274, 185, 112, 69, 0), # 53
(746, 675, 630, 674, 532, 263, 272, 263, 292, 121, 81, 48, 0, 737, 655, 512, 389, 538, 364, 290, 204, 279, 188, 112, 70, 0), # 54
(764, 695, 648, 684, 545, 268, 276, 268, 295, 122, 84, 50, 0, 757, 669, 518, 398, 542, 371, 295, 206, 285, 191, 114, 70, 0), # 55
(780, 711, 651, 702, 562, 271, 283, 274, 303, 123, 85, 50, 0, 777, 681, 530, 411, 544, 378, 301, 214, 288, 197, 117, 70, 0), # 56
(794, 725, 665, 713, 572, 275, 290, 278, 310, 126, 85, 50, 0, 789, 697, 541, 419, 557, 382, 308, 218, 295, 200, 120, 72, 0), # 57
(814, 739, 680, 726, 582, 280, 296, 283, 313, 127, 88, 50, 0, 803, 708, 543, 422, 570, 388, 313, 218, 299, 204, 124, 72, 0), # 58
(828, 754, 689, 740, 595, 285, 299, 288, 316, 128, 90, 51, 0, 816, 719, 551, 431, 585, 396, 320, 222, 302, 207, 127, 75, 0), # 59
(839, 768, 697, 750, 607, 290, 308, 291, 324, 131, 92, 51, 0, 827, 728, 559, 437, 594, 401, 328, 226, 305, 208, 128, 76, 0), # 60
(854, 778, 705, 760, 614, 300, 316, 296, 326, 134, 94, 51, 0, 836, 738, 570, 441, 604, 405, 330, 228, 310, 213, 128, 79, 0), # 61
(868, 799, 717, 773, 620, 307, 319, 298, 333, 134, 96, 52, 0, 849, 751, 574, 453, 616, 411, 335, 231, 318, 217, 129, 81, 0), # 62
(880, 810, 724, 782, 627, 313, 331, 303, 337, 137, 98, 52, 0, 856, 762, 582, 462, 625, 412, 341, 235, 323, 222, 131, 82, 0), # 63
(900, 821, 737, 790, 640, 316, 336, 306, 340, 140, 100, 53, 0, 872, 775, 592, 467, 633, 415, 345, 236, 327, 224, 133, 82, 0), # 64
(911, 835, 753, 803, 649, 320, 343, 309, 348, 143, 102, 53, 0, 893, 789, 599, 474, 645, 420, 348, 238, 334, 227, 134, 83, 0), # 65
(929, 842, 772, 815, 656, 325, 351, 316, 353, 145, 108, 54, 0, 917, 802, 608, 484, 664, 423, 357, 242, 340, 233, 135, 83, 0), # 66
(944, 853, 788, 827, 661, 329, 355, 322, 363, 146, 112, 54, 0, 928, 812, 616, 490, 680, 428, 360, 245, 345, 238, 138, 83, 0), # 67
(952, 864, 800, 840, 668, 335, 358, 328, 368, 149, 112, 55, 0, 936, 822, 627, 500, 687, 433, 367, 246, 348, 243, 140, 85, 0), # 68
(971, 874, 815, 850, 682, 342, 365, 329, 372, 151, 115, 57, 0, 952, 838, 631, 512, 702, 436, 372, 248, 353, 250, 142, 86, 0), # 69
(993, 895, 822, 865, 692, 350, 371, 332, 375, 153, 117, 59, 0, 969, 854, 639, 515, 710, 444, 375, 252, 365, 257, 146, 89, 0), # 70
(1008, 909, 830, 876, 701, 358, 375, 341, 382, 154, 119, 60, 0, 981, 868, 651, 525, 722, 448, 378, 252, 370, 263, 147, 89, 0), # 71
(1024, 921, 839, 888, 713, 364, 380, 349, 388, 156, 120, 62, 0, 995, 881, 658, 535, 736, 452, 382, 257, 374, 265, 150, 92, 0), # 72
(1040, 929, 858, 903, 726, 369, 385, 351, 398, 158, 126, 63, 0, 1013, 889, 669, 541, 744, 456, 388, 261, 377, 269, 153, 96, 0), # 73
(1058, 944, 870, 918, 737, 372, 391, 355, 400, 161, 128, 63, 0, 1027, 901, 677, 547, 758, 460, 392, 263, 382, 272, 155, 97, 0), # 74
(1075, 957, 885, 926, 744, 378, 402, 359, 406, 164, 131, 64, 0, 1039, 914, 688, 558, 765, 463, 396, 264, 393, 274, 159, 98, 0), # 75
(1081, 971, 894, 938, 753, 382, 404, 359, 410, 166, 132, 65, 0, 1049, 931, 694, 565, 776, 470, 402, 267, 401, 279, 165, 98, 0), # 76
(1101, 984, 905, 952, 768, 391, 413, 359, 414, 169, 132, 66, 0, 1060, 941, 705, 572, 778, 474, 404, 271, 405, 283, 167, 99, 0), # 77
(1113, 991, 922, 966, 781, 397, 418, 364, 419, 170, 134, 69, 0, 1071, 957, 714, 578, 785, 478, 410, 273, 410, 289, 172, 99, 0), # 78
(1134, 1003, 928, 975, 794, 407, 423, 368, 425, 174, 136, 69, 0, 1085, 968, 722, 587, 794, 483, 413, 275, 414, 292, 172, 100, 0), # 79
(1150, 1016, 938, 993, 808, 416, 425, 374, 432, 175, 136, 73, 0, 1099, 981, 731, 596, 813, 485, 420, 279, 419, 295, 175, 100, 0), # 80
(1163, 1028, 949, 1002, 821, 422, 428, 376, 436, 178, 136, 76, 0, 1111, 995, 741, 601, 826, 490, 425, 286, 426, 298, 179, 100, 0), # 81
(1182, 1043, 959, 1013, 833, 429, 433, 378, 439, 180, 138, 78, 0, 1127, 1006, 749, 605, 836, 497, 429, 288, 435, 299, 182, 103, 0), # 82
(1202, 1057, 968, 1029, 843, 431, 439, 381, 444, 182, 143, 80, 0, 1149, 1016, 756, 614, 848, 501, 435, 291, 442, 304, 183, 104, 0), # 83
(1214, 1071, 983, 1038, 848, 435, 445, 386, 451, 185, 144, 80, 0, 1163, 1027, 767, 621, 854, 506, 440, 295, 447, 307, 183, 106, 0), # 84
(1228, 1081, 993, 1052, 860, 441, 452, 390, 457, 186, 144, 80, 0, 1175, 1036, 777, 629, 868, 510, 449, 299, 453, 308, 185, 107, 0), # 85
(1243, 1096, 1004, 1062, 870, 450, 459, 393, 461, 189, 147, 81, 0, 1190, 1046, 785, 632, 886, 517, 452, 306, 458, 311, 189, 110, 0), # 86
(1252, 1111, 1017, 1074, 879, 453, 464, 396, 469, 189, 148, 82, 0, 1202, 1058, 792, 642, 903, 525, 455, 307, 460, 314, 192, 110, 0), # 87
(1262, 1125, 1033, 1084, 893, 460, 467, 399, 472, 191, 149, 84, 0, 1220, 1067, 805, 645, 915, 528, 459, 309, 466, 320, 195, 112, 0), # 88
(1288, 1134, 1052, 1099, 900, 465, 474, 402, 474, 192, 150, 84, 0, 1233, 1081, 818, 653, 929, 534, 465, 313, 469, 324, 198, 113, 0), # 89
(1299, 1146, 1065, 1115, 909, 471, 478, 403, 480, 194, 150, 86, 0, 1248, 1090, 829, 655, 936, 542, 467, 315, 478, 329, 200, 116, 0), # 90
(1310, 1159, 1077, 1129, 918, 476, 482, 405, 481, 195, 151, 86, 0, 1262, 1100, 837, 659, 945, 548, 471, 319, 483, 334, 203, 118, 0), # 91
(1316, 1172, 1080, 1137, 929, 481, 486, 408, 486, 196, 154, 86, 0, 1272, 1116, 847, 666, 953, 556, 477, 323, 490, 337, 205, 121, 0), # 92
(1331, 1185, 1091, 1149, 934, 486, 491, 410, 489, 199, 155, 87, 0, 1287, 1129, 856, 675, 962, 562, 485, 330, 499, 340, 206, 123, 0), # 93
(1343, 1197, 1096, 1159, 940, 489, 497, 410, 494, 201, 155, 87, 0, 1298, 1140, 867, 680, 968, 571, 489, 331, 507, 344, 207, 123, 0), # 94
(1359, 1207, 1106, 1171, 952, 499, 503, 418, 496, 202, 155, 89, 0, 1305, 1151, 871, 685, 980, 576, 497, 333, 511, 350, 209, 124, 0), # 95
(1370, 1218, 1117, 1184, 958, 502, 509, 421, 501, 202, 157, 93, 0, 1321, 1164, 879, 692, 997, 582, 499, 337, 513, 356, 211, 125, 0), # 96
(1381, 1229, 1128, 1194, 969, 504, 516, 427, 509, 204, 158, 94, 0, 1336, 1171, 886, 697, 1005, 586, 505, 339, 517, 364, 211, 125, 0), # 97
(1402, 1238, 1138, 1208, 981, 510, 518, 428, 514, 205, 158, 95, 0, 1354, 1181, 897, 699, 1019, 590, 508, 343, 521, 366, 212, 125, 0), # 98
(1413, 1247, 1146, 1220, 991, 514, 522, 431, 518, 208, 159, 95, 0, 1366, 1191, 905, 706, 1034, 600, 515, 345, 530, 372, 215, 128, 0), # 99
(1424, 1261, 1157, 1228, 995, 521, 525, 435, 527, 210, 162, 95, 0, 1383, 1203, 913, 716, 1044, 602, 519, 347, 533, 376, 215, 128, 0), # 100
(1435, 1272, 1167, 1241, 1008, 527, 534, 436, 535, 215, 163, 99, 0, 1399, 1214, 923, 722, 1056, 612, 524, 348, 537, 380, 219, 130, 0), # 101
(1451, 1277, 1181, 1253, 1029, 534, 542, 442, 539, 219, 164, 99, 0, 1415, 1223, 928, 727, 1064, 616, 528, 349, 541, 383, 219, 130, 0), # 102
(1468, 1289, 1197, 1260, 1045, 537, 545, 445, 548, 221, 165, 100, 0, 1441, 1233, 935, 730, 1073, 618, 530, 352, 545, 389, 223, 132, 0), # 103
(1480, 1300, 1214, 1275, 1056, 540, 547, 450, 553, 224, 167, 100, 0, 1455, 1246, 944, 738, 1081, 622, 533, 355, 551, 391, 226, 135, 0), # 104
(1492, 1318, 1222, 1291, 1068, 540, 551, 454, 555, 225, 168, 102, 0, 1469, 1255, 954, 744, 1090, 630, 539, 363, 556, 395, 231, 135, 0), # 105
(1505, 1330, 1231, 1307, 1073, 543, 560, 456, 560, 230, 168, 105, 0, 1488, 1263, 960, 749, 1097, 633, 541, 373, 560, 398, 233, 137, 0), # 106
(1517, 1341, 1246, 1319, 1081, 549, 566, 459, 561, 233, 170, 106, 0, 1499, 1274, 968, 755, 1104, 635, 547, 378, 566, 399, 234, 137, 0), # 107
(1526, 1347, 1253, 1333, 1091, 554, 569, 461, 573, 234, 171, 106, 0, 1508, 1279, 976, 760, 1114, 642, 549, 383, 573, 404, 235, 137, 0), # 108
(1537, 1354, 1263, 1342, 1104, 557, 574, 466, 581, 234, 173, 107, 0, 1522, 1291, 986, 765, 1125, 651, 554, 388, 577, 406, 237, 138, 0), # 109
(1548, 1365, 1277, 1356, 1110, 563, 580, 469, 584, 235, 174, 110, 0, 1535, 1301, 996, 769, 1134, 653, 558, 392, 580, 412, 238, 139, 0), # 110
(1562, 1373, 1290, 1363, 1122, 565, 582, 472, 587, 236, 175, 111, 0, 1548, 1314, 1013, 775, 1144, 657, 562, 397, 584, 417, 240, 139, 0), # 111
(1574, 1383, 1305, 1386, 1134, 566, 588, 475, 592, 236, 176, 111, 0, 1559, 1320, 1018, 786, 1156, 662, 565, 399, 586, 421, 245, 139, 0), # 112
(1585, 1392, 1314, 1393, 1144, 569, 590, 479, 596, 240, 177, 112, 0, 1565, 1330, 1021, 798, 1164, 668, 566, 406, 590, 425, 246, 141, 0), # 113
(1595, 1404, 1323, 1403, 1151, 572, 594, 481, 600, 242, 177, 113, 0, 1575, 1339, 1027, 806, 1180, 672, 572, 408, 592, 429, 253, 141, 0), # 114
(1605, 1413, 1328, 1416, 1166, 576, 596, 484, 602, 243, 181, 114, 0, 1583, 1349, 1037, 812, 1193, 676, 579, 411, 597, 431, 255, 142, 0), # 115
(1621, 1425, 1332, 1432, 1171, 579, 597, 489, 608, 245, 183, 116, 0, 1592, 1361, 1043, 819, 1203, 682, 583, 416, 605, 437, 257, 144, 0), # 116
(1638, 1438, 1344, 1439, 1178, 587, 601, 491, 612, 247, 185, 118, 0, 1603, 1372, 1046, 825, 1212, 686, 586, 417, 609, 442, 261, 144, 0), # 117
(1650, 1441, 1352, 1447, 1184, 590, 602, 497, 618, 251, 185, 119, 0, 1619, 1383, 1053, 833, 1224, 688, 590, 420, 614, 444, 264, 146, 0), # 118
(1663, 1453, 1362, 1454, 1196, 596, 610, 497, 621, 253, 187, 122, 0, 1628, 1396, 1064, 839, 1233, 690, 598, 424, 619, 447, 266, 147, 0), # 119
(1671, 1463, 1369, 1465, 1212, 599, 613, 499, 623, 256, 190, 124, 0, 1636, 1404, 1066, 845, 1251, 696, 602, 429, 625, 453, 267, 148, 0), # 120
(1687, 1474, 1385, 1476, 1219, 601, 616, 505, 626, 257, 191, 124, 0, 1650, 1418, 1072, 851, 1260, 701, 605, 433, 628, 456, 270, 152, 0), # 121
(1701, 1484, 1394, 1482, 1233, 607, 620, 510, 632, 259, 193, 127, 0, 1666, 1428, 1082, 859, 1269, 704, 610, 437, 630, 461, 272, 153, 0), # 122
(1718, 1495, 1412, 1492, 1239, 609, 623, 513, 633, 261, 194, 128, 0, 1677, 1437, 1090, 862, 1280, 709, 614, 440, 633, 466, 272, 154, 0), # 123
(1732, 1503, 1418, 1502, 1245, 620, 629, 515, 639, 261, 194, 128, 0, 1685, 1446, 1099, 866, 1289, 711, 617, 445, 640, 468, 275, 154, 0), # 124
(1739, 1511, 1428, 1514, 1254, 626, 631, 517, 643, 261, 197, 128, 0, 1697, 1458, 1110, 874, 1304, 715, 622, 452, 642, 471, 277, 156, 0), # 125
(1749, 1522, 1434, 1525, 1258, 632, 636, 520, 647, 261, 201, 128, 0, 1704, 1471, 1119, 882, 1310, 719, 624, 457, 642, 475, 279, 157, 0), # 126
(1758, 1529, 1444, 1537, 1269, 634, 638, 523, 652, 264, 203, 130, 0, 1711, 1480, 1126, 888, 1319, 725, 625, 460, 647, 477, 279, 158, 0), # 127
(1772, 1540, 1460, 1540, 1274, 638, 643, 525, 658, 268, 204, 131, 0, 1730, 1495, 1136, 889, 1338, 730, 633, 464, 657, 479, 280, 158, 0), # 128
(1780, 1543, 1476, 1550, 1277, 642, 651, 529, 662, 269, 207, 133, 0, 1736, 1506, 1154, 892, 1348, 735, 636, 467, 665, 481, 280, 158, 0), # 129
(1791, 1552, 1487, 1556, 1288, 646, 655, 533, 666, 272, 209, 133, 0, 1748, 1514, 1164, 902, 1352, 739, 640, 470, 670, 488, 283, 158, 0), # 130
(1805, 1561, 1497, 1567, 1297, 652, 657, 536, 671, 273, 212, 133, 0, 1758, 1522, 1172, 913, 1367, 743, 648, 474, 672, 493, 285, 158, 0), # 131
(1816, 1563, 1509, 1577, 1308, 660, 659, 541, 678, 277, 213, 135, 0, 1770, 1532, 1179, 919, 1375, 745, 652, 478, 677, 494, 286, 159, 0), # 132
(1825, 1568, 1519, 1594, 1321, 667, 662, 545, 685, 277, 215, 135, 0, 1785, 1546, 1187, 925, 1392, 749, 655, 480, 681, 495, 286, 160, 0), # 133
(1836, 1581, 1530, 1602, 1328, 673, 668, 547, 688, 280, 216, 135, 0, 1796, 1556, 1194, 932, 1396, 755, 656, 482, 685, 500, 288, 162, 0), # 134
(1844, 1593, 1551, 1607, 1339, 679, 674, 553, 696, 284, 220, 137, 0, 1806, 1562, 1206, 936, 1408, 759, 658, 487, 690, 504, 290, 162, 0), # 135
(1860, 1601, 1559, 1622, 1344, 682, 682, 555, 703, 284, 220, 139, 0, 1820, 1568, 1215, 940, 1419, 760, 662, 490, 695, 508, 292, 163, 0), # 136
(1871, 1611, 1571, 1636, 1355, 684, 685, 561, 707, 286, 220, 139, 0, 1836, 1578, 1222, 945, 1428, 762, 665, 492, 698, 512, 295, 163, 0), # 137
(1888, 1617, 1578, 1642, 1364, 688, 687, 563, 712, 286, 223, 140, 0, 1844, 1590, 1229, 952, 1438, 766, 667, 495, 700, 514, 296, 165, 0), # 138
(1897, 1625, 1588, 1653, 1373, 694, 688, 566, 722, 289, 224, 140, 0, 1855, 1604, 1232, 956, 1449, 772, 671, 497, 704, 517, 298, 165, 0), # 139
(1909, 1634, 1596, 1665, 1380, 696, 694, 571, 732, 290, 225, 141, 0, 1866, 1615, 1241, 959, 1460, 774, 680, 500, 706, 518, 301, 166, 0), # 140
(1918, 1641, 1607, 1675, 1387, 703, 696, 580, 737, 291, 226, 141, 0, 1879, 1620, 1249, 965, 1467, 781, 682, 505, 711, 524, 302, 168, 0), # 141
(1925, 1645, 1619, 1685, 1394, 707, 698, 583, 740, 291, 228, 143, 0, 1893, 1632, 1262, 972, 1478, 785, 685, 508, 714, 531, 306, 169, 0), # 142
(1934, 1649, 1624, 1694, 1404, 708, 702, 586, 743, 292, 229, 144, 0, 1916, 1637, 1272, 978, 1482, 791, 687, 515, 719, 532, 308, 169, 0), # 143
(1945, 1654, 1633, 1704, 1412, 714, 707, 588, 748, 293, 231, 144, 0, 1931, 1649, 1275, 987, 1491, 796, 691, 518, 722, 534, 311, 169, 0), # 144
(1958, 1665, 1638, 1720, 1420, 719, 710, 589, 753, 295, 231, 144, 0, 1942, 1656, 1281, 991, 1503, 797, 699, 518, 724, 534, 313, 169, 0), # 145
(1968, 1676, 1641, 1730, 1432, 724, 712, 591, 759, 295, 233, 145, 0, 1956, 1661, 1292, 993, 1515, 803, 702, 519, 735, 539, 314, 170, 0), # 146
(1980, 1687, 1648, 1741, 1439, 727, 714, 593, 762, 296, 233, 145, 0, 1964, 1678, 1301, 996, 1527, 810, 711, 519, 738, 545, 314, 172, 0), # 147
(1988, 1696, 1658, 1751, 1451, 732, 717, 595, 765, 296, 234, 145, 0, 1975, 1692, 1307, 1000, 1537, 815, 713, 521, 741, 547, 316, 173, 0), # 148
(1996, 1703, 1668, 1765, 1465, 735, 721, 598, 770, 297, 234, 146, 0, 1984, 1699, 1315, 1008, 1548, 823, 722, 522, 746, 550, 318, 174, 0), # 149
(2007, 1712, 1674, 1776, 1471, 739, 725, 600, 774, 299, 235, 146, 0, 2002, 1711, 1324, 1011, 1552, 827, 722, 526, 748, 554, 319, 175, 0), # 150
(2021, 1717, 1685, 1792, 1478, 743, 729, 602, 778, 300, 237, 146, 0, 2013, 1722, 1326, 1016, 1559, 835, 724, 531, 753, 556, 324, 176, 0), # 151
(2031, 1726, 1691, 1803, 1488, 748, 730, 605, 780, 301, 238, 146, 0, 2030, 1727, 1334, 1022, 1568, 841, 728, 533, 758, 561, 326, 176, 0), # 152
(2046, 1732, 1703, 1810, 1498, 749, 735, 607, 787, 304, 238, 148, 0, 2041, 1740, 1335, 1026, 1579, 847, 732, 538, 764, 564, 328, 176, 0), # 153
(2060, 1738, 1713, 1819, 1506, 752, 738, 611, 788, 307, 239, 148, 0, 2053, 1753, 1339, 1029, 1589, 851, 734, 538, 765, 568, 328, 176, 0), # 154
(2071, 1744, 1721, 1826, 1514, 760, 739, 614, 796, 308, 241, 149, 0, 2069, 1765, 1348, 1031, 1595, 855, 738, 540, 771, 573, 329, 176, 0), # 155
(2084, 1754, 1737, 1838, 1525, 762, 743, 616, 803, 308, 242, 152, 0, 2076, 1774, 1356, 1040, 1602, 860, 742, 543, 777, 575, 330, 176, 0), # 156
(2097, 1757, 1746, 1845, 1536, 764, 749, 617, 808, 310, 243, 152, 0, 2083, 1784, 1363, 1044, 1607, 864, 747, 544, 781, 579, 331, 177, 0), # 157
(2106, 1764, 1752, 1852, 1539, 768, 750, 620, 812, 313, 243, 152, 0, 2094, 1786, 1370, 1051, 1615, 870, 748, 547, 784, 580, 332, 180, 0), # 158
(2120, 1769, 1764, 1865, 1547, 770, 754, 623, 812, 316, 245, 152, 0, 2110, 1794, 1374, 1055, 1621, 872, 750, 549, 787, 584, 337, 180, 0), # 159
(2128, 1775, 1775, 1874, 1560, 778, 759, 625, 814, 317, 245, 152, 0, 2120, 1799, 1376, 1061, 1626, 877, 751, 550, 792, 589, 340, 180, 0), # 160
(2137, 1780, 1785, 1880, 1570, 781, 762, 627, 819, 319, 245, 152, 0, 2124, 1805, 1390, 1067, 1635, 884, 757, 552, 795, 593, 342, 182, 0), # 161
(2146, 1786, 1794, 1894, 1575, 783, 768, 632, 824, 319, 246, 153, 0, 2135, 1811, 1397, 1071, 1641, 891, 759, 552, 799, 593, 347, 185, 0), # 162
(2156, 1792, 1797, 1901, 1582, 790, 774, 635, 829, 325, 246, 155, 0, 2146, 1819, 1406, 1074, 1647, 896, 761, 555, 805, 597, 349, 185, 0), # 163
(2160, 1797, 1801, 1903, 1595, 793, 780, 639, 833, 326, 246, 155, 0, 2155, 1831, 1408, 1077, 1660, 900, 766, 559, 813, 599, 349, 185, 0), # 164
(2169, 1804, 1811, 1909, 1606, 796, 783, 642, 840, 326, 247, 156, 0, 2165, 1841, 1412, 1080, 1666, 901, 767, 560, 814, 602, 351, 185, 0), # 165
(2173, 1809, 1821, 1913, 1613, 803, 786, 645, 846, 326, 248, 159, 0, 2168, 1850, 1419, 1084, 1671, 904, 772, 562, 821, 606, 353, 185, 0), # 166
(2178, 1813, 1827, 1923, 1618, 807, 790, 647, 850, 326, 250, 159, 0, 2174, 1856, 1433, 1089, 1679, 908, 773, 565, 829, 609, 354, 187, 0), # 167
(2189, 1822, 1833, 1931, 1626, 813, 792, 649, 853, 327, 252, 160, 0, 2185, 1868, 1442, 1091, 1683, 911, 773, 570, 833, 609, 354, 187, 0), # 168
(2200, 1828, 1838, 1934, 1637, 813, 793, 653, 857, 327, 252, 160, 0, 2196, 1878, 1449, 1093, 1691, 914, 776, 573, 835, 614, 354, 187, 0), # 169
(2206, 1831, 1845, 1944, 1646, 817, 796, 656, 862, 328, 252, 162, 0, 2206, 1884, 1452, 1097, 1695, 919, 779, 575, 836, 615, 356, 188, 0), # 170
(2215, 1840, 1855, 1952, 1649, 818, 797, 660, 862, 328, 253, 162, 0, 2214, 1894, 1455, 1100, 1704, 923, 781, 580, 839, 618, 357, 188, 0), # 171
(2219, 1845, 1860, 1959, 1657, 820, 801, 660, 868, 328, 253, 162, 0, 2221, 1902, 1461, 1102, 1713, 927, 782, 581, 840, 618, 357, 188, 0), # 172
(2223, 1848, 1864, 1967, 1661, 822, 805, 660, 871, 328, 253, 162, 0, 2227, 1909, 1465, 1102, 1718, 929, 784, 585, 842, 619, 359, 190, 0), # 173
(2234, 1855, 1866, 1980, 1668, 828, 806, 661, 873, 330, 255, 162, 0, 2233, 1913, 1467, 1108, 1724, 935, 785, 587, 848, 621, 360, 190, 0), # 174
(2239, 1860, 1876, 1989, 1669, 830, 806, 662, 877, 330, 258, 162, 0, 2238, 1916, 1469, 1112, 1729, 941, 788, 587, 852, 625, 361, 191, 0), # 175
(2246, 1861, 1879, 1994, 1676, 832, 806, 663, 880, 331, 259, 163, 0, 2243, 1917, 1479, 1117, 1732, 946, 789, 589, 854, 625, 362, 191, 0), # 176
(2252, 1868, 1882, 2004, 1677, 836, 809, 663, 883, 334, 261, 163, 0, 2251, 1920, 1482, 1122, 1734, 949, 790, 592, 860, 626, 362, 192, 0), # 177
(2256, 1871, 1887, 2007, 1682, 838, 809, 666, 889, 334, 261, 163, 0, 2260, 1924, 1486, 1125, 1738, 950, 793, 595, 864, 627, 363, 193, 0), # 178
(2256, 1871, 1887, 2007, 1682, 838, 809, 666, 889, 334, 261, 163, 0, 2260, 1924, 1486, 1125, 1738, 950, 793, 595, 864, 627, 363, 193, 0), # 179
)
passenger_arriving_rate = (
(7.029211809720476, 7.090786984939564, 6.079830434547925, 6.525401162556605, 5.184373233768971, 2.563234861163827, 2.9022249307617405, 2.7143527675713304, 2.8420462290117365, 1.3853052554328298, 0.9812285382399741, 0.571423425802387, 0.0, 7.117432297609708, 6.285657683826256, 4.90614269119987, 4.155915766298489, 5.684092458023473, 3.8000938745998627, 2.9022249307617405, 1.8308820436884476, 2.5921866168844856, 2.175133720852202, 1.2159660869095852, 0.6446169986308695, 0.0), # 0
(7.496058012827964, 7.558911224152441, 6.4812376898851785, 6.956401465940448, 5.527657648309288, 2.7325532603014207, 3.093628258884586, 2.893049671694997, 3.0297144856220246, 1.4766432422970026, 1.0460557650564308, 0.6091419437616749, 0.0, 7.587708306415797, 6.700561381378422, 5.230278825282154, 4.429929726891007, 6.059428971244049, 4.050269540372995, 3.093628258884586, 1.9518237573581576, 2.763828824154644, 2.3188004886468163, 1.2962475379770357, 0.687173747650222, 0.0), # 1
(7.9614122125716245, 8.025177635976757, 6.881049333138649, 7.385687089898034, 5.869698775499761, 2.9011961768518306, 3.284272955572493, 3.071031394610912, 3.2166338432095234, 1.5676198212571917, 1.1106254013811399, 0.6467104760728565, 0.0, 8.056110759493567, 7.113815236801421, 5.553127006905699, 4.702859463771574, 6.433267686419047, 4.2994439524552766, 3.284272955572493, 2.0722829834655934, 2.9348493877498805, 2.4618956966326784, 1.37620986662773, 0.7295616032706144, 0.0), # 2
(8.423460910405188, 8.487736310818441, 7.277679347539831, 7.811555227908678, 6.209150897601775, 3.0684948417778424, 3.473402549153569, 3.2475923418717962, 3.4020630750965104, 1.657873944449164, 1.1746812960930562, 0.6839799965752206, 0.0, 8.520781928755916, 7.523779962327425, 5.873406480465281, 4.97362183334749, 6.804126150193021, 4.5466292786205145, 3.473402549153569, 2.191782029841316, 3.1045754488008876, 2.6038517426362264, 1.455535869507966, 0.7716123918925856, 0.0), # 3
(8.880390607782374, 8.94473733908341, 7.669541716320211, 8.232303073451698, 6.5446682968767265, 3.233780486042246, 3.6602605679559215, 3.4220269190303676, 3.585260954605263, 1.7470445640086882, 1.2379672980711345, 0.7208014791080559, 0.0, 8.979864086115745, 7.928816270188614, 6.189836490355671, 5.241133692026064, 7.170521909210526, 4.790837686642515, 3.6602605679559215, 2.30984320431589, 3.2723341484383632, 2.7441010244839, 1.5339083432640421, 0.8131579399166738, 0.0), # 4
(9.330387806156915, 9.394330811177607, 8.055050422711272, 8.646227820006413, 6.874905255585995, 3.396384340607826, 3.844090540307657, 3.593629531639346, 3.765486255058061, 1.8347706320715327, 1.300227256194331, 0.7570258975106506, 0.0, 9.43149950348596, 8.327284872617156, 6.501136280971655, 5.504311896214597, 7.530972510116122, 5.031081344295084, 3.844090540307657, 2.4259888147198754, 3.4374526277929975, 2.8820759400021383, 1.6110100845422546, 0.8540300737434189, 0.0), # 5
(9.771639006982534, 9.834666817506942, 8.43261944994451, 9.051626661052135, 7.198516055990973, 3.5556376364373725, 4.024135994536884, 3.7616945852514516, 3.9419977497771805, 1.920691100773466, 1.3612050193415997, 0.7925042256222944, 0.0, 9.87383045277945, 8.717546481845236, 6.806025096707997, 5.762073302320396, 7.883995499554361, 5.266372419352033, 4.024135994536884, 2.5397411688838374, 3.5992580279954867, 3.017208887017379, 1.6865238899889023, 0.8940606197733586, 0.0), # 6
(10.202330711712957, 10.263895448477353, 8.800662781251408, 9.446796790068186, 7.514154980353052, 3.710871604493673, 4.19964045897171, 3.9255164854194056, 4.1140542120849, 2.004444922250256, 1.4206444363918964, 0.8270874372822752, 0.0, 10.304999205909127, 9.097961810105026, 7.103222181959481, 6.013334766750766, 8.2281084241698, 5.495723079587168, 4.19964045897171, 2.6506225746383376, 3.757077490176526, 3.148932263356063, 1.7601325562502819, 0.9330814044070321, 0.0), # 7
(10.62064942180191, 10.68016679449476, 9.157594399863463, 9.830035400533875, 7.820476310933614, 3.8614174757395103, 4.369847461940239, 4.0843896376959234, 4.280914415303496, 2.0856710486376717, 1.4782893562241752, 0.8606265063298821, 0.0, 10.723148034787885, 9.466891569628702, 7.391446781120876, 6.257013145913014, 8.561828830606991, 5.718145492774292, 4.369847461940239, 2.758155339813936, 3.910238155466807, 3.276678466844626, 1.831518879972693, 0.9709242540449783, 0.0), # 8
(11.02478163870312, 11.081630945965095, 9.501828289012156, 10.199639685928528, 8.116134329994049, 4.006606481137679, 4.534000531770584, 4.237608447633729, 4.441837132755248, 2.1640084320714803, 1.5338836277173917, 0.8929724066044035, 0.0, 11.126419211328628, 9.822696472648436, 7.669418138586958, 6.49202529621444, 8.883674265510496, 5.932651826687221, 4.534000531770584, 2.861861772241199, 4.058067164997024, 3.3998798953095104, 1.9003656578024313, 1.0074209950877362, 0.0), # 9
(11.412913863870306, 11.46643799329428, 9.83177843192898, 10.553906839731454, 8.399783319795748, 4.145769851650964, 4.691343196790848, 4.38446732078554, 4.596081137762433, 2.2390960246874507, 1.5871710997505006, 0.923976111945128, 0.0, 11.512955007444255, 10.163737231396405, 7.935855498752503, 6.717288074062351, 9.192162275524867, 6.138254249099756, 4.691343196790848, 2.961264179750688, 4.199891659897874, 3.517968946577152, 1.9663556863857963, 1.0424034539358438, 0.0), # 10
(11.783232598757209, 11.832738026888249, 10.145858811845418, 10.891134055421968, 8.670077562600099, 4.278238818242151, 4.841118985329142, 4.524260662704076, 4.7429052036473305, 2.3105727786213524, 1.6378956212024585, 0.9534885961913449, 0.0, 11.880897695047656, 10.488374558104791, 8.189478106012292, 6.931718335864056, 9.485810407294661, 6.333964927785706, 4.841118985329142, 3.055884870172965, 4.3350387813000495, 3.63037801847399, 2.0291717623690837, 1.075703456989841, 0.0), # 11
(12.133924344817538, 12.178681137152912, 10.442483411992965, 11.209618526479394, 8.925671340668487, 4.403344611874027, 4.9825714257135685, 4.656282878942054, 4.881568103732217, 2.378077646008951, 1.6858010409522184, 0.9813608331823415, 0.0, 12.22838954605175, 10.794969165005755, 8.429005204761092, 7.134232938026852, 9.763136207464434, 6.518796030518876, 4.9825714257135685, 3.1452461513385908, 4.462835670334243, 3.7365395088264655, 2.0884966823985933, 1.107152830650265, 0.0), # 12
(12.463175603505027, 12.502417414494213, 10.720066215603106, 11.507657446383048, 9.165218936262296, 4.520418463509383, 5.11494404627224, 4.779828375052198, 5.011328611339368, 2.441249578986017, 1.7306312078787365, 1.0074437967574077, 0.0, 12.55357283236943, 11.08188176433148, 8.653156039393682, 7.323748736958049, 10.022657222678736, 6.691759725073078, 5.11494404627224, 3.228870331078131, 4.582609468131148, 3.8358858154610167, 2.1440132431206216, 1.136583401317656, 0.0), # 13
(12.769172876273403, 12.802096949318072, 10.977021205907338, 11.783548008612232, 9.387374631642924, 4.6287916041110035, 5.237480375333263, 4.894191556587227, 5.131445499791063, 2.4997275296883177, 1.7721299708609668, 1.0315884607558323, 0.0, 12.85458982591359, 11.347473068314153, 8.860649854304834, 7.499182589064952, 10.262890999582126, 6.8518681792221185, 5.237480375333263, 3.306279717222145, 4.693687315821462, 3.9278493362040785, 2.195404241181468, 1.1638269953925522, 0.0), # 14
(13.050102664576398, 13.075869832030413, 11.211762366137135, 12.035587406646286, 9.590792709071755, 4.72779526464168, 5.349423941224739, 4.998666829099858, 5.241177542409583, 2.5531504502516222, 1.810041178777865, 1.0536457990169035, 0.0, 13.129582798597134, 11.590103789185937, 9.050205893889325, 7.659451350754866, 10.482355084819165, 6.998133560739801, 5.349423941224739, 3.3769966176011996, 4.795396354535877, 4.0118624688820965, 2.242352473227427, 1.1887154392754924, 0.0), # 15
(13.30415146986772, 13.321886153037171, 11.422703679523998, 12.262072833964503, 9.774127450810177, 4.816760676064193, 5.450018272274784, 5.092548598142811, 5.339783512517201, 2.6011572928116995, 1.8441086805083868, 1.0734667853799098, 0.0, 13.376694022332964, 11.808134639179006, 9.220543402541933, 7.803471878435097, 10.679567025034402, 7.1295680373999355, 5.450018272274784, 3.440543340045852, 4.887063725405088, 4.087357611321502, 2.2845407359047996, 1.2110805593670158, 0.0), # 16
(13.529505793601107, 13.538296002744264, 11.608259129299412, 12.46130148404622, 9.936033139119584, 4.895019069341334, 5.538506896811498, 5.17513126926881, 5.426522183436193, 2.643387009504314, 1.874076324931487, 1.09090239368414, 0.0, 13.594065769033982, 11.999926330525538, 9.370381624657433, 7.9301610285129405, 10.853044366872385, 7.245183776976335, 5.538506896811498, 3.496442192386667, 4.968016569559792, 4.153767161348741, 2.3216518258598824, 1.2307541820676606, 0.0), # 17
(13.724352137230287, 13.723249471557619, 11.766842698694862, 12.631570550370744, 10.07516405626135, 4.961901675435895, 5.6141333431629965, 5.245709248030569, 5.500652328488845, 2.6794785524652385, 1.8996879609261188, 1.1058035977688838, 0.0, 13.779840310613086, 12.163839575457718, 9.498439804630594, 8.038435657395715, 11.00130465697769, 7.343992947242797, 5.6141333431629965, 3.5442154824542103, 5.037582028130675, 4.210523516790249, 2.3533685397389728, 1.2475681337779656, 0.0), # 18
(13.88687700220898, 13.874896649883173, 11.896868370941842, 12.77117722641738, 10.190174484496875, 5.0167397253106545, 5.676141139657377, 5.30357693998081, 5.561432720997431, 2.7090708738302403, 1.9206874373712384, 1.1180213714734282, 0.0, 13.932159918983176, 12.298235086207708, 9.603437186856192, 8.12721262149072, 11.122865441994861, 7.425007715973134, 5.676141139657377, 3.5833855180790386, 5.095087242248438, 4.257059075472461, 2.379373674188369, 1.2613542408984704, 0.0), # 19
(14.015266889990915, 13.991387628126835, 11.996750129271838, 12.87841870566547, 10.279718706087547, 5.058864449928407, 5.723773814622755, 5.348028750672253, 5.608122134284226, 2.731802925735086, 1.936818603145802, 1.1274066886370624, 0.0, 14.049166866057154, 12.401473575007685, 9.68409301572901, 8.195408777205257, 11.216244268568452, 7.487240250941153, 5.723773814622755, 3.6134746070917196, 5.139859353043773, 4.292806235221825, 2.399350025854368, 1.2719443298297126, 0.0), # 20
(14.107708302029813, 14.070872496694552, 12.064901956916339, 12.951592181594311, 10.34245100329475, 5.087607080251938, 5.756274896387231, 5.378359085657614, 5.63997934167151, 2.747313660315545, 1.9478253071287643, 1.133810523099076, 0.0, 14.12900342374791, 12.471915754089835, 9.739126535643821, 8.241940980946634, 11.27995868334302, 7.529702719920659, 5.756274896387231, 3.634005057322813, 5.171225501647375, 4.317197393864771, 2.412980391383268, 1.279170226972232, 0.0), # 21
(14.162387739779412, 14.111501345992236, 12.099737837106835, 12.988994847683228, 10.377025658379871, 5.102298847244033, 5.77288791327892, 5.393862350489618, 5.656263116481561, 2.7552420297073854, 1.9534513981990798, 1.1370838486987573, 0.0, 14.16981186396836, 12.50792233568633, 9.7672569909954, 8.265726089122154, 11.312526232963123, 7.551407290685465, 5.77288791327892, 3.644499176602881, 5.188512829189936, 4.329664949227744, 2.419947567421367, 1.282863758726567, 0.0), # 22
(14.182550708679697, 14.116311945587563, 12.104077046181986, 12.993677353395064, 10.385883252297091, 5.104166666666667, 5.774862801581538, 5.395538065843622, 5.658298909465021, 2.7561772953818022, 1.9541568753377396, 1.1374880506020426, 0.0, 14.175, 12.512368556622466, 9.770784376688697, 8.268531886145405, 11.316597818930042, 7.553753292181072, 5.774862801581538, 3.6458333333333335, 5.192941626148546, 4.331225784465023, 2.4208154092363974, 1.283301085962506, 0.0), # 23
(14.197417378247815, 14.113505864197531, 12.10336728395062, 12.99310104166667, 10.390900439373862, 5.104166666666667, 5.773777668845317, 5.393208333333334, 5.658026111111111, 2.755602716049383, 1.9540790684624023, 1.1373934156378602, 0.0, 14.175, 12.51132757201646, 9.77039534231201, 8.266808148148147, 11.316052222222222, 7.550491666666668, 5.773777668845317, 3.6458333333333335, 5.195450219686931, 4.331033680555557, 2.4206734567901242, 1.2830459876543212, 0.0), # 24
(14.211970122296213, 14.10797467992684, 12.101966163694561, 12.991960841049384, 10.39580728255487, 5.104166666666667, 5.771639231824418, 5.388631687242799, 5.657487139917696, 2.754471593507088, 1.9539247931994848, 1.1372065996037193, 0.0, 14.175, 12.509272595640908, 9.769623965997424, 8.263414780521263, 11.314974279835392, 7.544084362139919, 5.771639231824418, 3.6458333333333335, 5.197903641277435, 4.330653613683129, 2.4203932327389124, 1.2825431527206221, 0.0), # 25
(14.226207826667249, 14.099802892089624, 12.099892889803387, 12.990269714506173, 10.400603610526364, 5.104166666666667, 5.768480702816105, 5.381894547325103, 5.65668890946502, 2.7528027480566992, 1.9536954462318665, 1.136930163084896, 0.0, 14.175, 12.506231793933855, 9.768477231159332, 8.258408244170097, 11.31337781893004, 7.534652366255146, 5.768480702816105, 3.6458333333333335, 5.200301805263182, 4.330089904835392, 2.4199785779606775, 1.2818002629172387, 0.0), # 26
(14.240129377203292, 14.089075, 12.097166666666668, 12.988040625, 10.405289251974601, 5.104166666666667, 5.7643352941176484, 5.3730833333333345, 5.655638333333333, 2.7506150000000003, 1.9533924242424245, 1.1365666666666672, 0.0, 14.175, 12.502233333333336, 9.766962121212122, 8.251845, 11.311276666666666, 7.5223166666666685, 5.7643352941176484, 3.6458333333333335, 5.2026446259873005, 4.329346875000001, 2.4194333333333335, 1.280825, 0.0), # 27
(14.253733659746702, 14.075875502972108, 12.093806698673983, 12.985286535493827, 10.40986403558584, 5.104166666666667, 5.759236218026306, 5.362284465020577, 5.654342325102881, 2.7479271696387753, 1.9530171239140377, 1.1361186709343092, 0.0, 14.175, 12.4973053802774, 9.765085619570188, 8.243781508916324, 11.308684650205763, 7.507198251028808, 5.759236218026306, 3.6458333333333335, 5.20493201779292, 4.32842884516461, 2.418761339734797, 1.2796250457247373, 0.0), # 28
(14.26701956013985, 14.060288900320074, 12.089832190214908, 12.982020408950618, 10.41432779004634, 5.104166666666667, 5.753216686839346, 5.349584362139918, 5.652807798353909, 2.7447580772748066, 1.952570941929584, 1.1355887364730988, 0.0, 14.175, 12.491476101204084, 9.76285470964792, 8.234274231824418, 11.305615596707819, 7.489418106995886, 5.753216686839346, 3.6458333333333335, 5.20716389502317, 4.327340136316874, 2.4179664380429817, 1.2782080818472796, 0.0), # 29
(14.279985964225098, 14.042399691358026, 12.085262345679013, 12.978255208333334, 10.418680344042354, 5.104166666666667, 5.746309912854031, 5.335069444444444, 5.651041666666666, 2.7411265432098775, 1.952055274971942, 1.1349794238683129, 0.0, 14.175, 12.48477366255144, 9.760276374859709, 8.223379629629632, 11.302083333333332, 7.469097222222222, 5.746309912854031, 3.6458333333333335, 5.209340172021177, 4.326085069444446, 2.4170524691358026, 1.276581790123457, 0.0), # 30
(14.292631757844802, 14.022292375400093, 12.080116369455878, 12.97400389660494, 10.422921526260142, 5.104166666666667, 5.7385491083676285, 5.318826131687244, 5.649050843621399, 2.737051387745771, 1.9514715197239891, 1.1342932937052284, 0.0, 14.175, 12.477226230757509, 9.757357598619945, 8.211154163237312, 11.298101687242799, 7.4463565843621415, 5.7385491083676285, 3.6458333333333335, 5.211460763130071, 4.324667965534981, 2.416023273891176, 1.2747538523090995, 0.0), # 31
(14.304955826841338, 14.000051451760402, 12.07441346593507, 12.969279436728398, 10.427051165385956, 5.104166666666667, 5.7299674856774, 5.3009408436214, 5.646842242798354, 2.7325514311842714, 1.950821072868604, 1.1335329065691209, 0.0, 14.175, 12.468861972260328, 9.754105364343019, 8.197654293552812, 11.293684485596708, 7.421317181069961, 5.7299674856774, 3.6458333333333335, 5.213525582692978, 4.3230931455761334, 2.4148826931870144, 1.272731950160037, 0.0), # 32
(14.316957057057056, 13.975761419753086, 12.068172839506175, 12.964094791666666, 10.431069090106059, 5.104166666666667, 5.720598257080611, 5.2815, 5.644422777777778, 2.7276454938271613, 1.9501053310886647, 1.1327008230452675, 0.0, 14.175, 12.459709053497942, 9.750526655443322, 8.182936481481482, 11.288845555555556, 7.394100000000001, 5.720598257080611, 3.6458333333333335, 5.215534545053029, 4.321364930555556, 2.413634567901235, 1.2705237654320989, 0.0), # 33
(14.328634334334335, 13.949506778692271, 12.061413694558757, 12.958462924382715, 10.434975129106702, 5.104166666666667, 5.710474634874527, 5.260590020576132, 5.641799362139919, 2.7223523959762237, 1.9493256910670491, 1.1317996037189455, 0.0, 14.175, 12.449795640908398, 9.746628455335244, 8.16705718792867, 11.283598724279837, 7.3648260288065845, 5.710474634874527, 3.6458333333333335, 5.217487564553351, 4.319487641460906, 2.4122827389117516, 1.2681369798811157, 0.0), # 34
(14.339986544515531, 13.92137202789209, 12.054155235482398, 12.952396797839505, 10.438769111074146, 5.104166666666667, 5.699629831356412, 5.238297325102881, 5.638978909465021, 2.7166909579332423, 1.9484835494866362, 1.1308318091754308, 0.0, 14.175, 12.439149900929737, 9.74241774743318, 8.150072873799726, 11.277957818930043, 7.333616255144034, 5.699629831356412, 3.6458333333333335, 5.219384555537073, 4.317465599279836, 2.41083104709648, 1.2655792752629174, 0.0), # 35
(14.35101257344301, 13.891441666666665, 12.04641666666667, 12.945909375, 10.442450864694647, 5.104166666666667, 5.68809705882353, 5.214708333333334, 5.635968333333333, 2.7106800000000004, 1.9475803030303034, 1.1298000000000004, 0.0, 14.175, 12.427800000000001, 9.737901515151515, 8.13204, 11.271936666666665, 7.300591666666668, 5.68809705882353, 3.6458333333333335, 5.221225432347324, 4.315303125000001, 2.409283333333334, 1.2628583333333334, 0.0), # 36
(14.361711306959135, 13.859800194330132, 12.038217192501145, 12.939013618827161, 10.44602021865446, 5.104166666666667, 5.675909529573146, 5.189909465020577, 5.632774547325103, 2.7043383424782816, 1.9466173483809293, 1.1287067367779304, 0.0, 14.175, 12.415774104557233, 9.733086741904645, 8.113015027434844, 11.265549094650206, 7.265873251028808, 5.675909529573146, 3.6458333333333335, 5.22301010932723, 4.313004539609055, 2.407643438500229, 1.259981835848194, 0.0), # 37
(14.372081630906267, 13.826532110196618, 12.029576017375401, 12.931722492283953, 10.449477001639845, 5.104166666666667, 5.663100455902526, 5.1639871399176975, 5.629404465020576, 2.6976848056698683, 1.9455960822213911, 1.1275545800944982, 0.0, 14.175, 12.403100381039478, 9.727980411106955, 8.093054417009604, 11.258808930041152, 7.229581995884776, 5.663100455902526, 3.6458333333333335, 5.224738500819923, 4.3105741640946516, 2.40591520347508, 1.2569574645633292, 0.0), # 38
(14.382122431126781, 13.791721913580247, 12.020512345679016, 12.924048958333334, 10.452821042337057, 5.104166666666667, 5.649703050108934, 5.137027777777778, 5.625865000000001, 2.690738209876544, 1.9445179012345684, 1.1263460905349796, 0.0, 14.175, 12.389806995884772, 9.722589506172842, 8.07221462962963, 11.251730000000002, 7.191838888888889, 5.649703050108934, 3.6458333333333335, 5.226410521168528, 4.308016319444445, 2.4041024691358035, 1.253792901234568, 0.0), # 39
(14.39183259346303, 13.755454103795152, 12.011045381801555, 12.916005979938273, 10.45605216943235, 5.104166666666667, 5.635750524489632, 5.1091177983539104, 5.622163065843623, 2.6835173754000925, 1.943384202103338, 1.125083828684652, 0.0, 14.175, 12.375922115531171, 9.71692101051669, 8.050552126200277, 11.244326131687245, 7.1527649176954755, 5.635750524489632, 3.6458333333333335, 5.228026084716175, 4.305335326646092, 2.4022090763603114, 1.2504958276177414, 0.0), # 40
(14.40121100375738, 13.717813180155463, 12.001194330132604, 12.90760652006173, 10.459170211611989, 5.104166666666667, 5.621276091341887, 5.080343621399178, 5.618305576131687, 2.676041122542296, 1.9421963815105796, 1.1237703551287916, 0.0, 14.175, 12.361473906416705, 9.710981907552897, 8.028123367626886, 11.236611152263373, 7.112481069958849, 5.621276091341887, 3.6458333333333335, 5.229585105805994, 4.302535506687244, 2.400238866026521, 1.2470739254686787, 0.0), # 41
(14.410256547852201, 13.678883641975311, 11.990978395061731, 12.89886354166667, 10.462174997562222, 5.104166666666667, 5.6063129629629636, 5.050791666666668, 5.614299444444446, 2.668328271604939, 1.9409558361391697, 1.122408230452675, 0.0, 14.175, 12.346490534979424, 9.704779180695848, 8.004984814814815, 11.228598888888891, 7.071108333333335, 5.6063129629629636, 3.6458333333333335, 5.231087498781111, 4.299621180555557, 2.3981956790123466, 1.2435348765432102, 0.0), # 42
(14.418968111589852, 13.638749988568819, 11.980416780978512, 12.889790007716051, 10.46506635596931, 5.104166666666667, 5.5908943516501255, 5.020548353909466, 5.61015158436214, 2.660397642889804, 1.9396639626719878, 1.1210000152415793, 0.0, 14.175, 12.331000167657372, 9.698319813359937, 7.981192928669412, 11.22030316872428, 7.0287676954732525, 5.5908943516501255, 3.6458333333333335, 5.232533177984655, 4.296596669238685, 2.3960833561957027, 1.2398863625971654, 0.0), # 43
(14.427344580812699, 13.597496719250115, 11.969528692272522, 12.880398881172843, 10.467844115519508, 5.104166666666667, 5.575053469700638, 4.98970010288066, 5.605868909465021, 2.652268056698675, 1.938322157791911, 1.1195482700807806, 0.0, 14.175, 12.315030970888586, 9.691610788959554, 7.9568041700960235, 11.211737818930041, 6.985580144032924, 5.575053469700638, 3.6458333333333335, 5.233922057759754, 4.293466293724282, 2.3939057384545044, 1.2361360653863744, 0.0), # 44
(14.435384841363105, 13.555208333333335, 11.958333333333336, 12.870703125000002, 10.470508104899077, 5.104166666666667, 5.558823529411765, 4.958333333333334, 5.601458333333333, 2.6439583333333343, 1.9369318181818187, 1.1180555555555556, 0.0, 14.175, 12.29861111111111, 9.684659090909092, 7.931875000000002, 11.202916666666667, 6.941666666666667, 5.558823529411765, 3.6458333333333335, 5.235254052449538, 4.290234375000002, 2.391666666666667, 1.232291666666667, 0.0), # 45
(14.443087779083434, 13.511969330132603, 11.946849908550526, 12.860715702160494, 10.47305815279427, 5.104166666666667, 5.542237743080772, 4.926534465020577, 5.596926769547324, 2.635487293095565, 1.9354943405245877, 1.1165244322511814, 0.0, 14.175, 12.281768754762993, 9.677471702622938, 7.906461879286693, 11.193853539094649, 6.897148251028808, 5.542237743080772, 3.6458333333333335, 5.236529076397135, 4.286905234053499, 2.3893699817101055, 1.228360848193873, 0.0), # 46
(14.45045227981605, 13.46786420896205, 11.935097622313673, 12.850449575617287, 10.475494087891343, 5.104166666666667, 5.525329323004923, 4.894389917695474, 5.592281131687244, 2.6268737562871523, 1.9340111215030973, 1.1149574607529342, 0.0, 14.175, 12.264532068282275, 9.670055607515485, 7.880621268861455, 11.184562263374488, 6.852145884773663, 5.525329323004923, 3.6458333333333335, 5.237747043945672, 4.283483191872429, 2.387019524462735, 1.2243512917238228, 0.0), # 47
(14.457477229403315, 13.422977469135803, 11.923095679012349, 12.839917708333335, 10.477815738876558, 5.104166666666667, 5.508131481481482, 4.861986111111112, 5.587528333333333, 2.618136543209877, 1.9324835578002246, 1.1133572016460909, 0.0, 14.175, 12.246929218106997, 9.662417789001124, 7.854409629629629, 11.175056666666666, 6.806780555555557, 5.508131481481482, 3.6458333333333335, 5.238907869438279, 4.279972569444446, 2.38461913580247, 1.2202706790123459, 0.0), # 48
(14.464161513687602, 13.377393609967992, 11.910863283036125, 12.829133063271607, 10.480022934436168, 5.104166666666667, 5.490677430807714, 4.829409465020577, 5.582675288065844, 2.6092944741655244, 1.930913046098849, 1.1117262155159278, 0.0, 14.175, 12.228988370675204, 9.654565230494246, 7.827883422496572, 11.165350576131688, 6.761173251028807, 5.490677430807714, 3.6458333333333335, 5.240011467218084, 4.276377687757203, 2.382172656607225, 1.2161266918152722, 0.0), # 49
(14.470504018511264, 13.33119713077275, 11.89841963877458, 12.81810860339506, 10.482115503256427, 5.104166666666667, 5.473000383280885, 4.796746399176955, 5.57772890946502, 2.6003663694558763, 1.9293009830818477, 1.1100670629477218, 0.0, 14.175, 12.210737692424937, 9.646504915409238, 7.8010991083676275, 11.15545781893004, 6.715444958847738, 5.473000383280885, 3.6458333333333335, 5.2410577516282135, 4.272702867798355, 2.379683927754916, 1.211927011888432, 0.0), # 50
(14.476503629716676, 13.284472530864198, 11.885783950617286, 12.806857291666669, 10.484093274023598, 5.104166666666667, 5.455133551198258, 4.764083333333335, 5.572696111111112, 2.5913710493827167, 1.9276487654320995, 1.1083823045267494, 0.0, 14.175, 12.192205349794241, 9.638243827160496, 7.774113148148149, 11.145392222222224, 6.669716666666668, 5.455133551198258, 3.6458333333333335, 5.242046637011799, 4.268952430555557, 2.377156790123457, 1.2076793209876546, 0.0), # 51
(14.482159233146191, 13.237304309556471, 11.87297542295382, 12.795392091049385, 10.485956075423934, 5.104166666666667, 5.437110146857097, 4.731506687242798, 5.567583806584363, 2.582327334247829, 1.9259577898324816, 1.1066745008382872, 0.0, 14.175, 12.173419509221157, 9.629788949162407, 7.746982002743485, 11.135167613168726, 6.624109362139918, 5.437110146857097, 3.6458333333333335, 5.242978037711967, 4.265130697016462, 2.3745950845907644, 1.2033913008687704, 0.0), # 52
(14.487469714642183, 13.189776966163697, 11.860013260173757, 12.783725964506175, 10.487703736143693, 5.104166666666667, 5.418963382554669, 4.699102880658437, 5.5623989094650215, 2.573254044352996, 1.9242294529658732, 1.104946212467612, 0.0, 14.175, 12.15440833714373, 9.621147264829364, 7.719762133058986, 11.124797818930043, 6.578744032921811, 5.418963382554669, 3.6458333333333335, 5.243851868071847, 4.261241988168726, 2.3720026520347517, 1.199070633287609, 0.0), # 53
(14.492433960047004, 13.141975000000002, 11.846916666666667, 12.771871875000002, 10.489336084869135, 5.104166666666667, 5.400726470588236, 4.6669583333333335, 5.557148333333334, 2.5641700000000007, 1.9224651515151516, 1.1032000000000002, 0.0, 14.175, 12.1352, 9.612325757575757, 7.69251, 11.114296666666668, 6.533741666666667, 5.400726470588236, 3.6458333333333335, 5.244668042434568, 4.257290625000001, 2.369383333333334, 1.1947250000000003, 0.0), # 54
(14.497050855203032, 13.093982910379516, 11.833704846822133, 12.759842785493827, 10.490852950286511, 5.104166666666667, 5.382432623255064, 4.6351594650205765, 5.551838991769547, 2.555094021490627, 1.9206662821631961, 1.101438424020729, 0.0, 14.175, 12.115822664228014, 9.603331410815981, 7.66528206447188, 11.103677983539095, 6.4892232510288075, 5.382432623255064, 3.6458333333333335, 5.2454264751432556, 4.253280928497944, 2.3667409693644266, 1.1903620827617745, 0.0), # 55
(14.501319285952622, 13.045885196616371, 11.820397005029724, 12.74765165895062, 10.492254161082082, 5.104166666666667, 5.3641150528524175, 4.603792695473252, 5.5464777983539095, 2.5460449291266585, 1.918834241592884, 1.099664045115074, 0.0, 14.175, 12.096304496265812, 9.59417120796442, 7.638134787379974, 11.092955596707819, 6.445309773662553, 5.3641150528524175, 3.6458333333333335, 5.246127080541041, 4.249217219650207, 2.3640794010059447, 1.1859895633287612, 0.0), # 56
(14.505238138138138, 12.997766358024693, 11.807012345679016, 12.735311458333335, 10.493539545942102, 5.104166666666667, 5.34580697167756, 4.572944444444445, 5.541071666666667, 2.5370415432098774, 1.9169704264870937, 1.097879423868313, 0.0, 14.175, 12.076673662551439, 9.584852132435467, 7.61112462962963, 11.082143333333335, 6.402122222222224, 5.34580697167756, 3.6458333333333335, 5.246769772971051, 4.245103819444446, 2.3614024691358035, 1.1816151234567904, 0.0), # 57
(14.508806297601952, 12.949710893918612, 11.79357007315958, 12.72283514660494, 10.494708933552829, 5.104166666666667, 5.3275415920277585, 4.5427011316872425, 5.535627510288066, 2.5281026840420675, 1.9150762335287033, 1.096087120865722, 0.0, 14.175, 12.05695832952294, 9.575381167643515, 7.584308052126201, 11.071255020576132, 6.35978158436214, 5.3275415920277585, 3.6458333333333335, 5.2473544667764145, 4.240945048868314, 2.3587140146319165, 1.1772464449016922, 0.0), # 58
(14.51202265018642, 12.901803303612255, 11.780089391860999, 12.710235686728396, 10.495762152600523, 5.104166666666667, 5.309352126200275, 4.513149176954733, 5.530152242798355, 2.5192471719250125, 1.9131530594005905, 1.0942896966925775, 0.0, 14.175, 12.037186663618352, 9.565765297002951, 7.557741515775036, 11.06030448559671, 6.3184088477366265, 5.309352126200275, 3.6458333333333335, 5.247881076300262, 4.2367452289094665, 2.3560178783722, 1.172891209419296, 0.0), # 59
(14.51488608173391, 12.854128086419754, 11.76658950617284, 12.697526041666668, 10.496699031771435, 5.104166666666667, 5.291271786492374, 4.484375000000001, 5.524652777777779, 2.5104938271604946, 1.9112023007856345, 1.0924897119341568, 0.0, 14.175, 12.017386831275722, 9.556011503928172, 7.5314814814814826, 11.049305555555557, 6.278125000000001, 5.291271786492374, 3.6458333333333335, 5.248349515885717, 4.232508680555557, 2.353317901234568, 1.1685570987654323, 0.0), # 60
(14.517395478086781, 12.806769741655238, 11.753089620484685, 12.684719174382717, 10.497519399751823, 5.104166666666667, 5.273333785201324, 4.4564650205761325, 5.519136028806585, 2.501861470050298, 1.9092253543667126, 1.0906897271757356, 0.0, 14.175, 11.997586998933091, 9.546126771833563, 7.5055844101508935, 11.03827205761317, 6.2390510288065855, 5.273333785201324, 3.6458333333333335, 5.248759699875912, 4.22823972479424, 2.350617924096937, 1.1642517946959308, 0.0), # 61
(14.519549725087407, 12.759812768632832, 11.739608939186102, 12.671828047839508, 10.498223085227952, 5.104166666666667, 5.255571334624385, 4.429505658436215, 5.513608909465021, 2.4933689208962058, 1.9072236168267036, 1.0888923030025914, 0.0, 14.175, 11.977815333028504, 9.536118084133516, 7.4801067626886155, 11.027217818930042, 6.201307921810701, 5.255571334624385, 3.6458333333333335, 5.249111542613976, 4.2239426826131705, 2.3479217878372207, 1.1599829789666212, 0.0), # 62
(14.521347708578144, 12.713341666666667, 11.72616666666667, 12.658865625, 10.498809916886067, 5.104166666666667, 5.238017647058824, 4.4035833333333345, 5.508078333333334, 2.4850350000000003, 1.9051984848484853, 1.0871000000000002, 0.0, 14.175, 11.9581, 9.525992424242425, 7.455105, 11.016156666666667, 6.165016666666668, 5.238017647058824, 3.6458333333333335, 5.249404958443034, 4.219621875000001, 2.345233333333334, 1.1557583333333337, 0.0), # 63
(14.522788314401359, 12.667440935070873, 11.712782007315958, 12.645844868827162, 10.499279723412432, 5.104166666666667, 5.220705934801905, 4.378784465020577, 5.50255121399177, 2.4768785276634664, 1.9031513551149353, 1.0853153787532392, 0.0, 14.175, 11.938469166285628, 9.515756775574676, 7.430635582990398, 11.00510242798354, 6.130298251028808, 5.220705934801905, 3.6458333333333335, 5.249639861706216, 4.215281622942388, 2.342556401463192, 1.151585539551898, 0.0), # 64
(14.523870428399414, 12.62219507315958, 11.69947416552355, 12.63277874228395, 10.499632333493302, 5.104166666666667, 5.2036694101508925, 4.35519547325103, 5.497034465020577, 2.4689183241883863, 1.9010836243089335, 1.0835409998475842, 0.0, 14.175, 11.918950998323425, 9.505418121544666, 7.406754972565158, 10.994068930041154, 6.097273662551442, 5.2036694101508925, 3.6458333333333335, 5.249816166746651, 4.2109262474279845, 2.3398948331047102, 1.1474722793781438, 0.0), # 65
(14.524592936414676, 12.577688580246916, 11.686262345679015, 12.619680208333333, 10.499867575814935, 5.104166666666667, 5.1869412854030505, 4.332902777777779, 5.491535000000001, 2.4611732098765438, 1.898996689113356, 1.0817794238683132, 0.0, 14.175, 11.899573662551441, 9.49498344556678, 7.38351962962963, 10.983070000000001, 6.06606388888889, 5.1869412854030505, 3.6458333333333335, 5.249933787907468, 4.206560069444445, 2.337252469135803, 1.1434262345679016, 0.0), # 66
(14.524954724289511, 12.534005955647004, 11.673165752171926, 12.606562229938273, 10.499985279063587, 5.104166666666667, 5.1705547728556445, 4.311992798353911, 5.486059732510288, 2.453662005029722, 1.8968919462110825, 1.0800332114007012, 0.0, 14.175, 11.88036532540771, 9.484459731055413, 7.360986015089164, 10.972119465020576, 6.036789917695475, 5.1705547728556445, 3.6458333333333335, 5.2499926395317935, 4.202187409979425, 2.3346331504343856, 1.1394550868770006, 0.0), # 67
(14.524708260273156, 12.491002420461081, 11.660140274919984, 12.593323827495976, 10.499886091610856, 5.104071942793273, 5.154460636380753, 4.292367245846671, 5.480574329370524, 2.446367154576509, 1.894733397326088, 1.078295169221637, 0.0, 14.174825210048013, 11.861246861438005, 9.47366698663044, 7.339101463729525, 10.961148658741047, 6.009314144185339, 5.154460636380753, 3.6457656734237665, 5.249943045805428, 4.197774609165326, 2.3320280549839967, 1.135545674587371, 0.0), # 68
(14.522398389694043, 12.44736508363202, 11.646819830246914, 12.579297690217391, 10.498983297022512, 5.1033231138545965, 5.13818772694263, 4.272974279835392, 5.474838991769548, 2.439082236746551, 1.8923013290802768, 1.0765088802252547, 0.0, 14.17344039351852, 11.8415976824778, 9.461506645401384, 7.317246710239651, 10.949677983539097, 5.982163991769549, 5.13818772694263, 3.6452307956104257, 5.249491648511256, 4.193099230072464, 2.329363966049383, 1.1315786439665476, 0.0), # 69
(14.517840102582454, 12.402893656798973, 11.633146504915409, 12.564391480475042, 10.49719935985368, 5.101848358989992, 5.121662094192959, 4.253638926992837, 5.468821349641823, 2.4317718335619576, 1.8895680735227522, 1.0746659888174948, 0.0, 14.170705268347055, 11.82132587699244, 9.447840367613761, 7.295315500685872, 10.937642699283646, 5.955094497789972, 5.121662094192959, 3.6441773992785653, 5.24859967992684, 4.188130493491681, 2.326629300983082, 1.127535786981725, 0.0), # 70
(14.511097524900102, 12.357614716359132, 11.619125100022863, 12.548627178945251, 10.49455687350386, 5.0996715769953775, 5.104891161677292, 4.234367588782199, 5.462530365035819, 2.4244361257699243, 1.8865437198495683, 1.072767842674817, 0.0, 14.166655842764062, 11.800446269422984, 9.43271859924784, 7.273308377309771, 10.925060730071637, 5.928114624295079, 5.104891161677292, 3.642622554996698, 5.24727843675193, 4.182875726315085, 2.323825020004573, 1.1234195196690122, 0.0), # 71
(14.502234782608697, 12.311554838709677, 11.604760416666666, 12.532026766304348, 10.49107843137255, 5.096816666666667, 5.087882352941177, 4.215166666666667, 5.4559750000000005, 2.4170752941176477, 1.8832383572567788, 1.0708157894736845, 0.0, 14.161328125, 11.778973684210527, 9.416191786283894, 7.251225882352942, 10.911950000000001, 5.901233333333334, 5.087882352941177, 3.6405833333333337, 5.245539215686275, 4.177342255434784, 2.3209520833333337, 1.1192322580645162, 0.0), # 72
(14.491316001669949, 12.264740600247798, 11.590057255944217, 12.514612223228664, 10.486786626859248, 5.0933075267997765, 5.070643091530164, 4.196042562109436, 5.4491642165828384, 2.409689519352323, 1.8796620749404376, 1.0688111768905575, 0.0, 14.154758123285324, 11.75692294579613, 9.398310374702186, 7.229068558056968, 10.898328433165677, 5.8744595869532095, 5.070643091530164, 3.638076804856983, 5.243393313429624, 4.171537407742889, 2.3180114511888434, 1.1149764182043456, 0.0), # 73
(14.478405308045566, 12.21719857737068, 11.575020418952905, 12.496405530394526, 10.481704053363458, 5.089168056190623, 5.053180800989806, 4.177001676573693, 5.4421069768328, 2.402278982221147, 1.8758249620965999, 1.0667553526018982, 0.0, 14.146981845850483, 11.734308878620878, 9.379124810482999, 7.20683694666344, 10.8842139536656, 5.84780234720317, 5.053180800989806, 3.635120040136159, 5.240852026681729, 4.165468510131509, 2.315004083790581, 1.1106544161246077, 0.0), # 74
(14.463566827697262, 12.168955346475506, 11.559654706790123, 12.477428668478263, 10.475853304284678, 5.084422153635118, 5.03550290486565, 4.158050411522635, 5.434812242798353, 2.394843863471315, 1.8717371079213185, 1.0646496642841674, 0.0, 14.138035300925928, 11.711146307125839, 9.358685539606592, 7.184531590413944, 10.869624485596706, 5.821270576131688, 5.03550290486565, 3.63173010973937, 5.237926652142339, 4.159142889492755, 2.311930941358025, 1.10626866786141, 0.0), # 75
(14.44686468658675, 12.12003748395947, 11.543964920553272, 12.457703618156202, 10.469256973022405, 5.079093717929179, 5.017616826703247, 4.139195168419449, 5.427288976527969, 2.3873843438500235, 1.8674086016106486, 1.0624954596138265, 0.0, 14.127954496742113, 11.68745005575209, 9.337043008053241, 7.162153031550069, 10.854577953055937, 5.794873235787229, 5.017616826703247, 3.6279240842351275, 5.234628486511203, 4.152567872718735, 2.3087929841106543, 1.101821589450861, 0.0), # 76
(14.428363010675731, 12.070471566219748, 11.527955861339734, 12.43725236010467, 10.461937652976141, 5.07320664786872, 4.9995299900481465, 4.120442348727329, 5.4195461400701115, 2.3799006041044684, 1.8628495323606438, 1.0602940862673376, 0.0, 14.116775441529496, 11.663234948940712, 9.314247661803218, 7.139701812313404, 10.839092280140223, 5.768619288218261, 4.9995299900481465, 3.623719034191943, 5.230968826488071, 4.145750786701558, 2.305591172267947, 1.0973155969290682, 0.0), # 77
(14.408125925925928, 12.020284169653527, 11.511632330246915, 12.416096875000001, 10.45391793754539, 5.066784842249657, 4.981249818445898, 4.101798353909466, 5.41159269547325, 2.372392824981845, 1.8580699893673582, 1.0580468919211612, 0.0, 14.10453414351852, 11.638515811132772, 9.29034994683679, 7.1171784749455345, 10.8231853909465, 5.742517695473253, 4.981249818445898, 3.6191320301783265, 5.226958968772695, 4.138698958333334, 2.3023264660493834, 1.092753106332139, 0.0), # 78
(14.386217558299041, 11.969501870657995, 11.494999128372202, 12.394259143518521, 10.445220420129644, 5.0598521998679065, 4.962783735442051, 4.0832695854290515, 5.403437604785855, 2.3648611872293506, 1.8530800618268455, 1.0557552242517592, 0.0, 14.091266610939643, 11.613307466769347, 9.265400309134227, 7.094583561688051, 10.80687520957171, 5.716577419600672, 4.962783735442051, 3.61418014276279, 5.222610210064822, 4.131419714506174, 2.2989998256744406, 1.0881365336961817, 0.0), # 79
(14.362702033756786, 11.918151245630337, 11.478061056812987, 12.371761146336556, 10.435867694128408, 5.052432619519382, 4.9441391645821575, 4.064862444749277, 5.395089830056394, 2.35730587159418, 1.847889838935161, 1.0534204309355928, 0.0, 14.07700885202332, 11.587624740291517, 9.239449194675805, 7.071917614782539, 10.790179660112788, 5.690807422648988, 4.9441391645821575, 3.6088804425138443, 5.217933847064204, 4.123920382112186, 2.2956122113625974, 1.0834682950573036, 0.0), # 80
(14.337643478260873, 11.866258870967743, 11.460822916666668, 12.348624864130437, 10.425882352941176, 5.04455, 4.925323529411765, 4.046583333333334, 5.386558333333333, 2.34972705882353, 1.8425094098883579, 1.0510438596491232, 0.0, 14.061796875, 11.561482456140352, 9.212547049441788, 7.049181176470589, 10.773116666666667, 5.665216666666669, 4.925323529411765, 3.60325, 5.212941176470588, 4.11620828804348, 2.2921645833333337, 1.0787508064516131, 0.0), # 81
(14.311106017773009, 11.813851323067393, 11.443289509030638, 12.32487227757649, 10.415286989967456, 5.036228240105676, 4.906344253476426, 4.0284386526444145, 5.3778520766651425, 2.342124929664596, 1.83694886388249, 1.048626858068812, 0.0, 14.045666688100141, 11.53489543875693, 9.18474431941245, 7.026374788993786, 10.755704153330285, 5.63981411370218, 4.906344253476426, 3.5973058857897686, 5.207643494983728, 4.1082907591921645, 2.2886579018061277, 1.0739864839152178, 0.0), # 82
(14.283153778254908, 11.760955178326475, 11.425465635002288, 12.300525367351046, 10.40410419860674, 5.027491238632323, 4.887208760321688, 4.01043480414571, 5.368980022100289, 2.3344996648645746, 1.8312182901136123, 1.0461707738711208, 0.0, 14.028654299554185, 11.507878512582325, 9.156091450568061, 7.0034989945937225, 10.737960044200578, 5.614608725803994, 4.887208760321688, 3.5910651704516594, 5.20205209930337, 4.1001751224503495, 2.2850931270004575, 1.0691777434842251, 0.0), # 83
(14.253850885668278, 11.707597013142175, 11.407356095679013, 12.275606114130436, 10.392356572258533, 5.0183628943758585, 4.867924473493101, 3.9925781893004118, 5.359951131687243, 2.3268514451706617, 1.825327777777778, 1.0436769547325107, 0.0, 14.010795717592593, 11.480446502057614, 9.12663888888889, 6.980554335511984, 10.719902263374486, 5.589609465020577, 4.867924473493101, 3.5845449245541845, 5.196178286129267, 4.091868704710146, 2.281471219135803, 1.0643270011947434, 0.0), # 84
(14.223261465974833, 11.653803403911677, 11.388965692158209, 12.250136498590983, 10.380066704322333, 5.008867106132196, 4.8484988165362175, 3.974875209571713, 5.35077436747447, 2.3191804513300527, 1.8192874160710422, 1.041146748329443, 0.0, 13.992126950445819, 11.452614231623869, 9.09643708035521, 6.957541353990157, 10.70154873494894, 5.564825293400398, 4.8484988165362175, 3.577762218665854, 5.190033352161167, 4.083378832863662, 2.2777931384316417, 1.05943667308288, 0.0), # 85
(14.191449645136279, 11.59960092703217, 11.370299225537268, 12.224138501409021, 10.367257188197637, 4.999027772697253, 4.828939212996585, 3.9573322664228017, 5.341458691510441, 2.311486864089944, 1.8131072941894584, 1.0385815023383795, 0.0, 13.97268400634431, 11.424396525722173, 9.065536470947292, 6.934460592269831, 10.682917383020882, 5.540265172991923, 4.828939212996585, 3.57073412335518, 5.183628594098819, 4.074712833803008, 2.274059845107454, 1.0545091751847429, 0.0), # 86
(14.15847954911433, 11.545016158900838, 11.35136149691358, 12.19763410326087, 10.353950617283953, 4.988868792866941, 4.809253086419753, 3.939955761316873, 5.332013065843622, 2.3037708641975314, 1.8067975013290805, 1.035982564435781, 0.0, 13.95250289351852, 11.39580820879359, 9.033987506645403, 6.9113125925925925, 10.664026131687244, 5.515938065843622, 4.809253086419753, 3.563477709190672, 5.1769753086419765, 4.065878034420291, 2.2702722993827162, 1.0495469235364399, 0.0), # 87
(14.124415303870702, 11.490075675914863, 11.332157307384547, 12.170645284822868, 10.340169584980769, 4.97841406543718, 4.789447860351274, 3.9227520957171165, 5.322446452522482, 2.296032632400011, 1.8003681266859632, 1.0333512822981095, 0.0, 13.931619620198905, 11.366864105279202, 9.001840633429817, 6.888097897200032, 10.644892905044964, 5.491852934003963, 4.789447860351274, 3.556010046740843, 5.1700847924903846, 4.056881761607624, 2.2664314614769094, 1.0445523341740786, 0.0), # 88
(14.089321035367092, 11.434806054471437, 11.312691458047555, 12.143194026771337, 10.325936684687594, 4.967687489203883, 4.769530958336696, 3.905727671086725, 5.312767813595489, 2.2882723494445796, 1.7938292594561607, 1.030689003601826, 0.0, 13.910070194615912, 11.337579039620083, 8.969146297280803, 6.864817048333737, 10.625535627190978, 5.4680187395214155, 4.769530958336696, 3.548348206574202, 5.162968342343797, 4.047731342257113, 2.2625382916095114, 1.0395278231337672, 0.0), # 89
(14.053260869565218, 11.379233870967743, 11.292968750000002, 12.115302309782612, 10.311274509803923, 4.956712962962964, 4.749509803921569, 3.8888888888888893, 5.302986111111112, 2.280490196078432, 1.787190988835726, 1.027997076023392, 0.0, 13.887890625, 11.30796783625731, 8.93595494417863, 6.841470588235294, 10.605972222222224, 5.4444444444444455, 4.749509803921569, 3.54050925925926, 5.155637254901961, 4.0384341032608715, 2.2585937500000006, 1.0344758064516133, 0.0), # 90
(14.016298932426789, 11.323385701800964, 11.272993984339278, 12.086992114533015, 10.296205653729254, 4.945514385510339, 4.729391820651443, 3.8722421505868017, 5.293110307117818, 2.2726863530487647, 1.7804634040207143, 1.025276847239269, 0.0, 13.865116919581618, 11.278045319631957, 8.902317020103572, 6.818059059146293, 10.586220614235636, 5.4211390108215225, 4.729391820651443, 3.5325102753645283, 5.148102826864627, 4.0289973715110055, 2.254598796867856, 1.0293987001637241, 0.0), # 91
(13.978499349913523, 11.267288123368292, 11.252771962162782, 12.058285421698875, 10.280752709863094, 4.934115655641925, 4.709184432071869, 3.8557938576436523, 5.2831493636640765, 2.2648610011027737, 1.7736565942071794, 1.0225296649259181, 0.0, 13.841785086591221, 11.247826314185097, 8.868282971035896, 6.79458300330832, 10.566298727328153, 5.398111400701113, 4.709184432071869, 3.524368325458518, 5.140376354931547, 4.019428473899626, 2.2505543924325564, 1.0242989203062085, 0.0), # 92
(13.939926247987117, 11.210967712066907, 11.232307484567903, 12.029204211956525, 10.264938271604938, 4.9225406721536356, 4.688895061728395, 3.839550411522634, 5.273112242798354, 2.2570143209876545, 1.7667806485911755, 1.019756876759801, 0.0, 13.81793113425926, 11.217325644357809, 8.833903242955877, 6.771042962962962, 10.546224485596708, 5.375370576131688, 4.688895061728395, 3.5161004801097393, 5.132469135802469, 4.009734737318842, 2.246461496913581, 1.0191788829151736, 0.0), # 93
(13.900643752609293, 11.154451044293994, 11.211605352652038, 11.999770465982289, 10.248784932354287, 4.910813333841387, 4.6685311331665735, 3.8235182136869392, 5.263007906569121, 2.2491464934506045, 1.7598456563687561, 1.016959830417379, 0.0, 13.793591070816188, 11.186558134591166, 8.79922828184378, 6.747439480351812, 10.526015813138242, 5.3529254991617155, 4.6685311331665735, 3.5077238098867047, 5.124392466177143, 3.9999234886607637, 2.2423210705304077, 1.014041004026727, 0.0), # 94
(13.860715989741754, 11.097764696446747, 11.190670367512576, 11.970006164452498, 10.232315285510639, 4.898957539501094, 4.648100069931951, 3.807703665599757, 5.252845317024844, 2.241257699238818, 1.752861706735976, 1.014139873575113, 0.0, 13.768800904492457, 11.155538609326241, 8.764308533679879, 6.723773097716453, 10.505690634049689, 5.33078513183966, 4.648100069931951, 3.499255385357924, 5.1161576427553195, 3.9900020548175, 2.2381340735025153, 1.0088876996769771, 0.0), # 95
(13.820207085346219, 11.040935244922345, 11.169507330246915, 11.93993328804348, 10.215551924473493, 4.88699718792867, 4.62760929557008, 3.7921131687242804, 5.242633436213992, 2.2333481190994924, 1.7458388888888892, 1.0112983539094653, 0.0, 13.74359664351852, 11.124281893004117, 8.729194444444445, 6.700044357298475, 10.485266872427983, 5.3089584362139925, 4.62760929557008, 3.490712277091907, 5.1077759622367465, 3.9799777626811608, 2.2339014660493834, 1.0037213859020315, 0.0), # 96
(13.779181165384388, 10.983989266117973, 11.148121041952448, 11.909573817431562, 10.198517442642354, 4.8749561779200326, 4.60706623362651, 3.7767531245237014, 5.2323812261850335, 2.2254179337798226, 1.7387872920235496, 1.0084366190968967, 0.0, 13.718014296124831, 11.09280281006586, 8.693936460117747, 6.676253801339467, 10.464762452370067, 5.287454374333182, 4.60706623362651, 3.482111555657166, 5.099258721321177, 3.969857939143855, 2.2296242083904896, 0.9985444787379977, 0.0), # 97
(13.737702355817978, 10.926953336430817, 11.126516303726566, 11.878949733293078, 10.181234433416716, 4.862858408271099, 4.58647830764679, 3.7616299344612103, 5.222097648986434, 2.2174673240270053, 1.7317170053360116, 1.0055560168138682, 0.0, 13.69208987054184, 11.06111618495255, 8.658585026680058, 6.652401972081014, 10.444195297972868, 5.266281908245695, 4.58647830764679, 3.4734702916222133, 5.090617216708358, 3.9596499110976935, 2.2253032607453136, 0.9933593942209834, 0.0), # 98
(13.695834782608697, 10.869854032258065, 11.10469791666667, 11.848083016304349, 10.163725490196079, 4.850727777777779, 4.5658529411764714, 3.7467500000000005, 5.211791666666667, 2.2094964705882356, 1.724638118022329, 1.0026578947368423, 0.0, 13.665859375000002, 11.029236842105265, 8.623190590111644, 6.628489411764706, 10.423583333333333, 5.245450000000001, 4.5658529411764714, 3.4648055555555564, 5.081862745098039, 3.949361005434784, 2.220939583333334, 0.988168548387097, 0.0), # 99
(13.653642571718258, 10.8127179299969, 11.082670681870143, 11.816995647141708, 10.146013206379946, 4.8385881852359915, 4.545197557761102, 3.732119722603262, 5.201472241274196, 2.201505554210711, 1.717560719278556, 0.9997436005422796, 0.0, 13.639358817729768, 10.997179605965075, 8.58780359639278, 6.6045166626321326, 10.402944482548392, 5.224967611644567, 4.545197557761102, 3.456134418025708, 5.073006603189973, 3.938998549047237, 2.2165341363740287, 0.9829743572724456, 0.0), # 100
(13.611189849108369, 10.755571606044516, 11.060439400434387, 11.785709606481484, 10.128120175367815, 4.82646352944165, 4.524519580946234, 3.7177455037341867, 5.191148334857491, 2.1934947556416264, 1.7104948983007466, 0.9968144819066413, 0.0, 13.612624206961591, 10.964959300973053, 8.552474491503732, 6.580484266924878, 10.382296669714982, 5.204843705227861, 4.524519580946234, 3.4474739496011786, 5.064060087683908, 3.928569868827162, 2.2120878800868775, 0.977779236913138, 0.0), # 101
(13.568540740740744, 10.698441636798089, 11.038008873456791, 11.754246875000002, 10.110068990559187, 4.814377709190674, 4.503826434277415, 3.7036337448559675, 5.180828909465021, 2.1854642556281783, 1.7034507442849551, 0.9938718865063897, 0.0, 13.585691550925928, 10.932590751570284, 8.517253721424776, 6.556392766884533, 10.361657818930041, 5.185087242798355, 4.503826434277415, 3.438841220850481, 5.055034495279593, 3.918082291666668, 2.207601774691358, 0.972585603345281, 0.0), # 102
(13.525759372577088, 10.641354598654807, 11.015383902034753, 11.722629433373593, 10.09188224535356, 4.802354623278973, 4.483125541300197, 3.689790847431795, 5.170522927145252, 2.1774142349175616, 1.696438346427236, 0.9909171620179854, 0.0, 13.558596857853223, 10.900088782197837, 8.482191732136178, 6.532242704752683, 10.341045854290504, 5.1657071864045125, 4.483125541300197, 3.4302533023421233, 5.04594112267678, 3.907543144457865, 2.2030767804069504, 0.9673958726049827, 0.0), # 103
(13.482909870579116, 10.58433706801186, 10.992569287265662, 11.690879262278584, 10.073582533150434, 4.790418170502465, 4.462424325560129, 3.6762232129248593, 5.160239349946655, 2.1693448742569736, 1.689467793923642, 0.9879516561178898, 0.0, 13.53137613597394, 10.867468217296787, 8.447338969618208, 6.50803462277092, 10.32047869989331, 5.146712498094804, 4.462424325560129, 3.421727264644618, 5.036791266575217, 3.896959754092862, 2.1985138574531327, 0.9622124607283511, 0.0), # 104
(13.440056360708535, 10.527415621266428, 10.969569830246915, 11.659018342391304, 10.05519244734931, 4.778592249657065, 4.441730210602761, 3.662937242798354, 5.1499871399176955, 2.1612563543936103, 1.682549175970229, 0.9849767164825647, 0.0, 13.50406539351852, 10.83474388130821, 8.412745879851144, 6.48376906318083, 10.299974279835391, 5.128112139917696, 4.441730210602761, 3.4132801783264752, 5.027596223674655, 3.886339447463769, 2.1939139660493834, 0.9570377837514936, 0.0), # 105
(13.39726296892706, 10.470616834815702, 10.946390332075904, 11.627068654388085, 10.036734581349688, 4.766900759538689, 4.4210506199736415, 3.6499393385154706, 5.139775259106843, 2.153148856074666, 1.67569258176305, 0.9819936907884712, 0.0, 13.476700638717421, 10.801930598673183, 8.378462908815248, 6.459446568223997, 10.279550518213686, 5.109915073921659, 4.4210506199736415, 3.4049291139562063, 5.018367290674844, 3.875689551462696, 2.189278066415181, 0.9518742577105185, 0.0), # 106
(13.3545938211964, 10.413967285056863, 10.923035593850026, 11.59505217894525, 10.018231528551063, 4.755367598943252, 4.400392977218323, 3.6372359015394005, 5.129612669562567, 2.145022560047339, 1.6689081004981592, 0.9790039267120707, 0.0, 13.449317879801098, 10.769043193832776, 8.344540502490794, 6.435067680142016, 10.259225339125134, 5.092130262155161, 4.400392977218323, 3.3966911421023225, 5.009115764275531, 3.865017392981751, 2.1846071187700056, 0.9467242986415331, 0.0), # 107
(13.312113043478263, 10.357493548387097, 10.899510416666669, 11.562990896739132, 9.999705882352941, 4.744016666666668, 4.379764705882353, 3.6248333333333345, 5.119508333333334, 2.1368776470588244, 1.662205821371611, 0.9760087719298248, 0.0, 13.421953125000002, 10.736096491228071, 8.311029106858054, 6.4106329411764715, 10.239016666666668, 5.074766666666668, 4.379764705882353, 3.3885833333333344, 4.999852941176471, 3.854330298913045, 2.179902083333334, 0.9415903225806455, 0.0), # 108
(13.26988476173436, 10.301222201203595, 10.87581960162323, 11.530906788446053, 9.98118023615482, 4.732871861504853, 4.359173229511284, 3.612738035360464, 5.109471212467612, 2.1287142978563174, 1.6555958335794598, 0.9730095741181947, 0.0, 13.394642382544584, 10.70310531530014, 8.277979167897298, 6.386142893568951, 10.218942424935223, 5.05783324950465, 4.359173229511284, 3.3806227582177515, 4.99059011807741, 3.8436355961486854, 2.1751639203246462, 0.9364747455639633, 0.0), # 109
(13.227973101926404, 10.245179819903537, 10.851967949817103, 11.498821834742351, 9.962677183356197, 4.721957082253722, 4.3386259716506625, 3.6009564090839814, 5.099510269013869, 2.1205326931870148, 1.6490882263177586, 0.9700076809536419, 0.0, 13.367421660665297, 10.670084490490058, 8.245441131588793, 6.361598079561043, 10.199020538027739, 5.041338972717574, 4.3386259716506625, 3.372826487324087, 4.981338591678099, 3.832940611580785, 2.170393589963421, 0.9313799836275944, 0.0), # 110
(13.186442190016104, 10.189392980884113, 10.827960262345682, 11.46675801630435, 9.944219317356573, 4.711296227709192, 4.318130355846042, 3.5894948559670787, 5.089634465020577, 2.1123330137981124, 1.6426930887825626, 0.9670044401126275, 0.0, 13.340326967592594, 10.6370488412389, 8.213465443912813, 6.336999041394336, 10.179268930041154, 5.02529279835391, 4.318130355846042, 3.3652115912208513, 4.972109658678287, 3.8222526721014507, 2.1655920524691368, 0.9263084528076467, 0.0), # 111
(13.14535615196517, 10.133888260542502, 10.803801340306359, 11.434737313808373, 9.925829231555449, 4.700913196667176, 4.297693805642971, 3.5783597774729468, 5.079852762536198, 2.1041154404368063, 1.6364205101699256, 0.9640011992716131, 0.0, 13.313394311556928, 10.604013191987741, 8.182102550849628, 6.312346321310418, 10.159705525072397, 5.0097036884621255, 4.297693805642971, 3.357795140476554, 4.962914615777724, 3.8115791046027923, 2.160760268061272, 0.9212625691402275, 0.0), # 112
(13.104705913184263, 10.078784894108638, 10.779554132960747, 11.402825576616644, 9.907497301495457, 4.690826978191853, 4.277368174559739, 3.5675806651220205, 5.07019931192069, 2.095906657814456, 1.6302822447690024, 0.9610058425921835, 0.0, 13.286621461180511, 10.571064268514016, 8.151411223845011, 6.287719973443367, 10.14039862384138, 4.9946129311708285, 4.277368174559739, 3.3505906987084666, 4.953748650747729, 3.8009418588722155, 2.15591082659215, 0.9162531721916946, 0.0), # 113
(13.064073257060091, 10.024626385524439, 10.755553287525224, 11.371278892341204, 9.88903379759524, 4.681014596966087, 4.257412745887406, 3.557289901377987, 5.060822216666095, 2.0878603087694745, 1.6242903453264128, 0.9580564200798471, 0.0, 13.25978557982405, 10.538620620878318, 8.121451726632063, 6.263580926308422, 10.12164443333219, 4.980205861929182, 4.257412745887406, 3.3435818549757763, 4.94451689879762, 3.790426297447069, 2.1511106575050447, 0.9113296714113127, 0.0), # 114
(13.023338864205595, 9.97143223830991, 10.731813088158539, 11.340088730440868, 9.870380499362694, 4.671450535207326, 4.2378417551340934, 3.547484881662581, 5.051724990045435, 2.0799888647958276, 1.6184360526663222, 0.9551543846318662, 0.0, 13.232809284324528, 10.506698230950526, 8.09218026333161, 6.239966594387481, 10.10344998009087, 4.966478834327614, 4.2378417551340934, 3.336750382290947, 4.935190249681347, 3.780029576813624, 2.146362617631708, 0.9064938398463556, 0.0), # 115
(12.982451822532688, 9.919124960991017, 10.708287554981187, 11.309199457779725, 9.851509291291528, 4.662112249784464, 4.218623372269525, 3.5381385158577467, 5.042884624972988, 2.072277675457342, 1.6127080506300124, 0.9522943730401906, 0.0, 13.205650163658248, 10.475238103442095, 8.063540253150062, 6.216833026372026, 10.085769249945976, 4.953393922200846, 4.218623372269525, 3.330080178417474, 4.925754645645764, 3.7697331525932425, 2.1416575109962372, 0.9017386328173653, 0.0), # 116
(12.941361219953283, 9.867627062093726, 10.68493070811365, 11.278555441221856, 9.832392057875436, 4.652977197566394, 4.199725767263427, 3.529223713845425, 5.034278114363028, 2.0647120903178457, 1.6070950230587664, 0.949471022096771, 0.0, 13.178265806801516, 10.44418124306448, 8.035475115293831, 6.1941362709535355, 10.068556228726056, 4.940913199383595, 4.199725767263427, 3.3235551411188533, 4.916196028937718, 3.7595184804072863, 2.1369861416227303, 0.8970570056448843, 0.0), # 117
(12.900016144379297, 9.816861050144, 10.66169656767643, 11.248101047631351, 9.81300068360812, 4.644022835422014, 4.181117110085521, 3.5207133855075567, 5.025882451129837, 2.0572774589411664, 1.6015856537938657, 0.9466789685935577, 0.0, 13.150613802730636, 10.413468654529133, 8.007928268969328, 6.171832376823498, 10.051764902259674, 4.92899873971058, 4.181117110085521, 3.317159168158581, 4.90650034180406, 3.7493670158771177, 2.132339313535286, 0.8924419136494547, 0.0), # 118
(12.858365683722639, 9.766749433667803, 10.638539153790012, 11.217780643872292, 9.793307052983273, 4.635226620220214, 4.162765570705529, 3.512580440726085, 5.017674628187687, 2.0499591308911307, 1.5961686266765933, 0.9439128493225009, 0.0, 13.122651740421906, 10.383041342547507, 7.980843133382966, 6.149877392673391, 10.035349256375374, 4.91761261701652, 4.162765570705529, 3.310876157300153, 4.896653526491637, 3.7392602146240983, 2.1277078307580024, 0.8878863121516185, 0.0), # 119
(12.816358925895228, 9.717214721191104, 10.61541248657489, 11.187538596808764, 9.773283050494598, 4.626566008829889, 4.144639319093177, 3.5047977893829505, 5.009631638450861, 2.0427424557315677, 1.5908326255482306, 0.9411673010755515, 0.0, 13.094337208851638, 10.352840311831065, 7.954163127741153, 6.128227367194702, 10.019263276901722, 4.906716905136131, 4.144639319093177, 3.3046900063070637, 4.886641525247299, 3.729179532269589, 2.1230824973149782, 0.8833831564719186, 0.0), # 120
(12.773944958808976, 9.668179421239865, 10.592270586151553, 11.157319273304857, 9.75290056063579, 4.618018458119934, 4.126706525218187, 3.4973383413600962, 5.001730474833633, 2.035612783026304, 1.5855663342500608, 0.9384369606446594, 0.0, 13.065627796996127, 10.322806567091252, 7.927831671250303, 6.106838349078911, 10.003460949667266, 4.8962736779041345, 4.126706525218187, 3.29858461294281, 4.876450280317895, 3.719106424434953, 2.118454117230311, 0.878925401930897, 0.0), # 121
(12.731072870375797, 9.61956604234005, 10.569067472640498, 11.127067040224649, 9.732131467900551, 4.609561424959241, 4.108935359050283, 3.490175006539462, 4.993948130250281, 2.0285554623391677, 1.5803584366233656, 0.9357164648217753, 0.0, 13.036481093831679, 10.292881113039527, 7.901792183116827, 6.085666387017502, 9.987896260500563, 4.886245009155247, 4.108935359050283, 3.2925438749708866, 4.8660657339502755, 3.7090223467415506, 2.1138134945280997, 0.8745060038490956, 0.0), # 122
(12.687691748507607, 9.571297093017627, 10.54575716616221, 11.09672626443223, 9.71094765678258, 4.601172366216706, 4.091293990559188, 3.4832806948029904, 4.986261597615085, 2.021555843233986, 1.5751976165094272, 0.9330004503988493, 0.0, 13.0068546883346, 10.263004954387341, 7.875988082547136, 6.064667529701957, 9.97252319523017, 4.876592972724187, 4.091293990559188, 3.28655169015479, 4.85547382839129, 3.698908754810744, 2.109151433232442, 0.8701179175470571, 0.0), # 123
(12.643750681116316, 9.523295081798558, 10.522293686837184, 11.066241312791686, 9.689321011775569, 4.592828738761221, 4.073750589714624, 3.476628316032624, 4.97864786984232, 2.014599275274587, 1.5700725577495283, 0.9302835541678323, 0.0, 12.976706169481197, 10.233119095846153, 7.85036278874764, 6.04379782582376, 9.95729573968464, 4.8672796424456735, 4.073750589714624, 3.280591956258015, 4.844660505887784, 3.6887471042638964, 2.104458737367437, 0.8657540983453236, 0.0), # 124
(12.599198756113843, 9.475482517208812, 10.498631054785912, 11.0355565521671, 9.667223417373222, 4.584507999461682, 4.056273326486318, 3.4701907801103036, 4.971083939846263, 2.0076711080247973, 1.5649719441849508, 0.927560412920674, 0.0, 12.94599312624776, 10.203164542127412, 7.824859720924753, 6.023013324074391, 9.942167879692526, 4.858267092154425, 4.056273326486318, 3.2746485710440583, 4.833611708686611, 3.678518850722367, 2.0997262109571824, 0.8614075015644376, 0.0), # 125
(12.553985061412101, 9.427781907774351, 10.474723290128884, 11.004616349422557, 9.644626758069233, 4.5761876051869805, 4.038830370843989, 3.463940996917971, 4.963546800541195, 2.0007566910484456, 1.5598844596569765, 0.9248256634493257, 0.0, 12.91467314761061, 10.173082297942582, 7.799422298284883, 6.002270073145335, 9.92709360108239, 4.849517395685159, 4.038830370843989, 3.268705432276415, 4.822313379034616, 3.66820544980752, 2.094944658025777, 0.8570710825249411, 0.0), # 126
(12.508058684923006, 9.380115762021138, 10.450524412986589, 10.973365071422144, 9.621502918357304, 4.567845012806012, 4.021389892757366, 3.4578518763375685, 4.95601344484139, 1.993841373909359, 1.5547987880068885, 0.9220739425457369, 0.0, 12.88270382254604, 10.142813368003106, 7.773993940034442, 5.981524121728076, 9.91202688968278, 4.8409926268725965, 4.021389892757366, 3.26274643771858, 4.810751459178652, 3.6577883571407157, 2.090104882597318, 0.8527377965473764, 0.0), # 127
(12.461368714558466, 9.332406588475143, 10.425988443479525, 10.941747085029949, 9.597823782731137, 4.5594576791876715, 4.003920062196168, 3.451896328251037, 4.948460865661126, 1.986910506171365, 1.5497036130759692, 0.9192998870018588, 0.0, 12.850042740030352, 10.112298757020445, 7.748518065379845, 5.960731518514094, 9.896921731322252, 4.832654859551452, 4.003920062196168, 3.2567554851340508, 4.798911891365568, 3.6472490283433174, 2.085197688695905, 0.8484005989522859, 0.0), # 128
(12.413864238230394, 9.284576895662326, 10.401069401728181, 10.909706757110053, 9.573561235684425, 4.551003061200851, 3.9863890491301195, 3.446047262540319, 4.9408660559146815, 1.9799494373982915, 1.5445876187055003, 0.916498133609641, 0.0, 12.816647489039854, 10.08147946970605, 7.7229380935275005, 5.939848312194873, 9.881732111829363, 4.824466167556446, 3.9863890491301195, 3.250716472286322, 4.786780617842212, 3.636568919036685, 2.0802138803456365, 0.8440524450602116, 0.0), # 129
(12.365494343850713, 9.236549192108656, 10.375721307853043, 10.877188454526541, 9.548687161710866, 4.542458615714445, 3.968765023528944, 3.440277589087355, 4.933206008516334, 1.9729435171539655, 1.539439488736764, 0.9136633191610346, 0.0, 12.78247565855085, 10.050296510771378, 7.697197443683819, 5.9188305514618955, 9.866412017032667, 4.816388624722297, 3.968765023528944, 3.244613296938889, 4.774343580855433, 3.6257294848421813, 2.075144261570609, 0.8396862901916962, 0.0), # 130
(12.316208119331334, 9.188245986340096, 10.349898181974611, 10.8441365441435, 9.523173445304161, 4.533801799597346, 3.9510161553623666, 3.4345602177740875, 4.92545771638036, 1.9658780950022154, 1.5342479070110426, 0.9107900804479897, 0.0, 12.747484837539638, 10.018690884927885, 7.671239535055213, 5.897634285006645, 9.85091543276072, 4.808384304883723, 3.9510161553623666, 3.238429856855247, 4.761586722652081, 3.614712181381168, 2.0699796363949226, 0.8352950896672816, 0.0), # 131
(12.265954652584163, 9.139589786882611, 10.32355404421337, 10.810495392825016, 9.49699197095801, 4.525010069718451, 3.9331106146001082, 3.4288680584824593, 4.917598172421039, 1.9587385205068681, 1.5290015573696185, 0.9078730542624567, 0.0, 12.711632614982527, 9.986603596887022, 7.645007786848092, 5.876215561520603, 9.835196344842078, 4.800415281875443, 3.9331106146001082, 3.2321500497988938, 4.748495985479005, 3.6034984642750065, 2.0647108088426744, 0.8308717988075103, 0.0), # 132
(12.21468303152113, 9.090503102262165, 10.296642914689816, 10.776209367435175, 9.470114623166108, 4.516060882946651, 3.915016571211893, 3.4231740210944106, 4.909604369552646, 1.9515101432317519, 1.5236891236537742, 0.904906877396386, 0.0, 12.674876579855821, 9.953975651360244, 7.618445618268871, 5.854530429695254, 9.819208739105292, 4.792443629532175, 3.915016571211893, 3.2257577735333225, 4.735057311583054, 3.5920697891450595, 2.059328582937963, 0.8264093729329243, 0.0), # 133
(12.162342344054133, 9.040908441004726, 10.26911881352444, 10.741222834838059, 9.442513286422153, 4.5069316961508425, 3.896702195167445, 3.4174510154918845, 4.90145330068946, 1.9441783127406937, 1.518299289704792, 0.9018861866417278, 0.0, 12.637174321135817, 9.920748053059004, 7.5914964485239596, 5.83253493822208, 9.80290660137892, 4.784431421688638, 3.896702195167445, 3.21923692582203, 4.721256643211077, 3.5804076116126873, 2.053823762704888, 0.8219007673640661, 0.0), # 134
(12.108881678095097, 8.990728311636257, 10.24093576083773, 10.705480161897759, 9.414159845219846, 4.4975999661999175, 3.8781356564364877, 3.4116719515568206, 4.893121958745757, 1.9367283785975222, 1.5128207393639534, 0.898805618790433, 0.0, 12.59848342779883, 9.88686180669476, 7.5641036968197675, 5.810185135792565, 9.786243917491515, 4.776340732179549, 3.8781356564364877, 3.212571404428512, 4.707079922609923, 3.5684933872992537, 2.048187152167546, 0.817338937421478, 0.0), # 135
(12.05425012155593, 8.93988522268272, 10.212047776750177, 10.668925715478352, 9.385026184052883, 4.488043149962771, 3.8592851249887445, 3.4058097391711617, 4.884587336635816, 1.9291456903660635, 1.5072421564725416, 0.8956598106344515, 0.0, 12.558761488821151, 9.852257916978965, 7.536210782362707, 5.787437071098189, 9.769174673271632, 4.768133634839627, 3.8592851249887445, 3.205745107116265, 4.6925130920264415, 3.556308571826118, 2.042409555350036, 0.812716838425702, 0.0), # 136
(11.998396762348548, 8.888301682670086, 10.18240888138228, 10.631503862443932, 9.355084187414965, 4.478238704308296, 3.8401187707939393, 3.399837288216851, 4.875826427273916, 1.9214155976101461, 1.5015522248718383, 0.8924433989657341, 0.0, 12.517966093179089, 9.816877388623073, 7.507761124359191, 5.764246792830437, 9.751652854547832, 4.759772203503592, 3.8401187707939393, 3.1987419316487826, 4.6775420937074825, 3.543834620814645, 2.036481776276456, 0.8080274256972807, 0.0), # 137
(11.941270688384867, 8.835900200124316, 10.15197309485452, 10.593158969658578, 9.32430573979979, 4.4681640861053875, 3.8206047638217933, 3.393727508575828, 4.8668162235743315, 1.913523449893597, 1.4957396284031257, 0.889151020576231, 0.0, 12.476054829848946, 9.78066122633854, 7.478698142015627, 5.740570349680789, 9.733632447148663, 4.751218512006159, 3.8206047638217933, 3.1915457757895624, 4.662152869899895, 3.5310529898861933, 2.0303946189709046, 0.8032636545567561, 0.0), # 138
(11.882820987576796, 8.782603283571376, 10.120694437287398, 10.553835403986378, 9.292662725701055, 4.457796752222938, 3.800711274042032, 3.3874533101300353, 4.85753371845134, 1.9054545967802445, 1.4897930509076862, 0.8857773122578926, 0.0, 12.432985287807028, 9.743550434836816, 7.448965254538431, 5.716363790340733, 9.71506743690268, 4.742434634182049, 3.800711274042032, 3.184140537302099, 4.646331362850527, 3.517945134662127, 2.0241388874574797, 0.7984184803246707, 0.0), # 139
(11.822996747836257, 8.72833344153723, 10.088526928801404, 10.513477532291418, 9.26012702961246, 4.447114159529844, 3.780406471424378, 3.3809876027614147, 4.847955904819222, 1.8971943878339157, 1.4837011762268022, 0.8823169108026693, 0.0, 12.38871505602964, 9.70548601882936, 7.41850588113401, 5.691583163501746, 9.695911809638444, 4.733382643865981, 3.780406471424378, 3.176510113949888, 4.63006351480623, 3.5044925107638067, 2.017705385760281, 0.7934848583215663, 0.0), # 140
(11.761747057075162, 8.673013182547843, 10.055424589517022, 10.472029721437782, 9.226670536027703, 4.436093764894997, 3.7596585259385567, 3.374303296351908, 4.838059775592251, 1.8887281726184386, 1.477452688201756, 0.8787644530025115, 0.0, 12.34320172349308, 9.666408983027624, 7.38726344100878, 5.6661845178553145, 9.676119551184502, 4.724024614892672, 3.7596585259385567, 3.168638403496426, 4.613335268013851, 3.490676573812595, 2.0110849179034047, 0.7884557438679859, 0.0), # 141
(11.69902100320542, 8.616565015129181, 10.02134143955475, 10.429436338289557, 9.192265129440482, 4.424713025187291, 3.7384356075542886, 3.367373300783457, 4.827822323684707, 1.8800413006976404, 1.4710362706738296, 0.8751145756493696, 0.0, 12.296402879173653, 9.626260332143064, 7.355181353369148, 5.64012390209292, 9.655644647369414, 4.71432262109684, 3.7384356075542886, 3.160509303705208, 4.596132564720241, 3.4764787794298533, 2.0042682879109504, 0.7833240922844712, 0.0), # 142
(11.634767674138946, 8.558911447807208, 9.986231499035082, 10.385641749710825, 9.156882694344494, 4.412949397275621, 3.7167058862412983, 3.360170525938002, 4.817220542010869, 1.871119121635349, 1.4644406074843055, 0.8713619155351939, 0.0, 12.248276112047666, 9.584981070887132, 7.322203037421526, 5.6133573649060455, 9.634441084021738, 4.704238736313203, 3.7167058862412983, 3.1521067123397293, 4.578441347172247, 3.4618805832369426, 1.9972462998070164, 0.7780828588915646, 0.0), # 143
(11.56893615778766, 8.499974989107892, 9.950048788078501, 10.340590322565676, 9.12049511523344, 4.400780338028881, 3.6944375319693092, 3.3526678816974873, 4.806231423485011, 1.8619469849953916, 1.4576543824744654, 0.867501109451935, 0.0, 12.198779011091421, 9.542512203971285, 7.288271912372326, 5.585840954986173, 9.612462846970022, 4.693735034376482, 3.6944375319693092, 3.1434145271634857, 4.56024755761672, 3.446863440855226, 1.9900097576157, 0.7727249990098085, 0.0), # 144
(11.501475542063469, 8.439678147557194, 9.912747326805505, 10.294226423718191, 9.083074276601018, 4.388183304315964, 3.6715987147080456, 3.344838277943853, 4.794831961021412, 1.8525102403415963, 1.4506662794855925, 0.8635267941915434, 0.0, 12.14786916528122, 9.498794736106976, 7.253331397427962, 5.557530721024787, 9.589663922042824, 4.682773589121394, 3.6715987147080456, 3.1344166459399743, 4.541537138300509, 3.4314088079060645, 1.9825494653611013, 0.7672434679597451, 0.0), # 145
(11.432334914878291, 8.377943431681082, 9.874281135336586, 10.246494420032459, 9.044592062940927, 4.375135753005765, 3.6481576044272312, 3.336654624559041, 4.782999147534349, 1.8427942372377903, 1.4434649823589683, 0.8594336065459691, 0.0, 12.095504163593366, 9.453769672005658, 7.21732491179484, 5.52838271171337, 9.565998295068699, 4.671316474382658, 3.6481576044272312, 3.125096966432689, 4.522296031470463, 3.41549814001082, 1.9748562270673173, 0.7616312210619166, 0.0), # 146
(11.361463364144042, 8.314693350005518, 9.83460423379223, 10.19733867837256, 9.005020358746862, 4.361615140967176, 3.6240823710965873, 3.3280898314249927, 4.770709975938102, 1.8327843252478015, 1.4360391749358754, 0.855216183307163, 0.0, 12.041641595004167, 9.407378016378791, 7.180195874679377, 5.498352975743403, 9.541419951876204, 4.65932576399499, 3.6240823710965873, 3.1154393864051255, 4.502510179373431, 3.3991128927908543, 1.966920846758446, 0.7558812136368653, 0.0), # 147
(11.288809977772631, 8.24985041105647, 9.793670642292932, 10.146703565602587, 8.964331048512523, 4.347598925069094, 3.599341184685839, 3.3191168084236504, 4.757941439146947, 1.822465853935457, 1.428377541057596, 0.8508691612670749, 0.0, 11.986239048489919, 9.359560773937822, 7.141887705287981, 5.4673975618063695, 9.515882878293894, 4.646763531793111, 3.599341184685839, 3.105427803620781, 4.482165524256262, 3.38223452186753, 1.9587341284585866, 0.7499864010051337, 0.0), # 148
(11.214323843675977, 8.1833371233599, 9.751434380959186, 10.094533448586619, 8.922496016731612, 4.33306456218041, 3.573902215164709, 3.3097084654369557, 4.744670530075158, 1.8118241728645852, 1.4204687645654126, 0.8463871772176558, 0.0, 11.929254113026934, 9.310258949394212, 7.102343822827062, 5.4354725185937545, 9.489341060150316, 4.6335918516117385, 3.573902215164709, 3.09504611584315, 4.461248008365806, 3.3648444828622073, 1.950286876191837, 0.7439397384872637, 0.0), # 149
(11.137954049765991, 8.115075995441773, 9.707849469911476, 10.040772694188746, 8.879487147897825, 4.317989509170021, 3.5477336325029207, 3.29983771234685, 4.730874241637018, 1.8008446315990123, 1.412301529300607, 0.8417648679508558, 0.0, 11.870644377591507, 9.259413547459413, 7.061507646503035, 5.402533894797036, 9.461748483274036, 4.61977279728559, 3.5477336325029207, 3.084278220835729, 4.439743573948912, 3.3469242313962493, 1.9415698939822956, 0.7377341814037977, 0.0), # 150
(11.059649683954586, 8.044989535828057, 9.6628699292703, 9.985365669273047, 8.835276326504857, 4.302351222906816, 3.5208036066701984, 3.2894774590352758, 4.716529566746802, 1.789512579702568, 1.4038645191044614, 0.8369968702586252, 0.0, 11.810367431159946, 9.206965572844876, 7.019322595522306, 5.368537739107703, 9.433059133493604, 4.605268442649386, 3.5208036066701984, 3.0731080163620117, 4.417638163252429, 3.3284552230910167, 1.9325739858540603, 0.731362685075278, 0.0), # 151
(10.979359834153682, 7.973000253044715, 9.616449779156152, 9.928256740703617, 8.789835437046412, 4.286127160259694, 3.4930803076362653, 3.2786006153841747, 4.701613498318786, 1.7778133667390779, 1.3951464178182584, 0.8320778209329146, 0.0, 11.748380862708558, 9.15285603026206, 6.975732089091292, 5.333440100217232, 9.403226996637573, 4.590040861537845, 3.4930803076362653, 3.061519400185496, 4.394917718523206, 3.309418913567873, 1.9232899558312306, 0.7248182048222469, 0.0), # 152
(10.897033588275185, 7.899030655617714, 9.568543039689514, 9.86939027534453, 8.743136364016186, 4.269294778097547, 3.4645319053708437, 3.2671800912754865, 4.686103029267251, 1.7657323422723707, 1.3861359092832806, 0.8270023567656742, 0.0, 11.68464226121364, 9.097025924422415, 6.930679546416402, 5.297197026817111, 9.372206058534502, 4.574052127785681, 3.4645319053708437, 3.049496270069676, 4.371568182008093, 3.2897967584481775, 1.9137086079379029, 0.7180936959652467, 0.0), # 153
(10.81262003423102, 7.823003252073014, 9.519103730990887, 9.80871064005988, 8.695150991907875, 4.251831533289268, 3.43512656984366, 3.2551887965911552, 4.6699751525064706, 1.7532548558662742, 1.3768216773408095, 0.8217651145488547, 0.0, 11.6191092156515, 9.0394162600374, 6.884108386704048, 5.259764567598821, 9.339950305012941, 4.557264315227617, 3.43512656984366, 3.037022523778049, 4.347575495953937, 3.2695702133532945, 1.9038207461981775, 0.7111821138248196, 0.0), # 154
(10.72606825993309, 7.744840550936584, 9.468085873180756, 9.746162201713748, 8.645851205215184, 4.233714882703753, 3.404832471024433, 3.2425996412131215, 4.653206860950727, 1.7403662570846146, 1.3671924058321279, 0.8163607310744064, 0.0, 11.551739314998438, 8.97996804181847, 6.8359620291606396, 5.221098771253843, 9.306413721901453, 4.53963949769837, 3.404832471024433, 3.0240820590741087, 4.322925602607592, 3.2487207339045834, 1.8936171746361512, 0.7040764137215078, 0.0), # 155
(10.637327353293314, 7.664465060734389, 9.415443486379615, 9.68168932717022, 8.595208888431804, 4.214922283209894, 3.37361777888289, 3.2293855350233276, 4.635775147514292, 1.727051895491221, 1.357236778598518, 0.8107838431342794, 0.0, 11.48249014823076, 8.918622274477073, 6.7861838929925895, 5.181155686473662, 9.271550295028584, 4.521139749032659, 3.37361777888289, 3.0106587737213526, 4.297604444215902, 3.2272297757234076, 1.8830886972759233, 0.6967695509758537, 0.0), # 156
(10.546346402223609, 7.581799289992394, 9.361130590707957, 9.615236383293386, 8.543195926051439, 4.195431191676585, 3.3414506633887537, 3.215519387903715, 4.6176570051114485, 1.7132971206499201, 1.3469434794812618, 0.8050290875204243, 0.0, 11.411319304324769, 8.855319962724668, 6.734717397406309, 5.1398913619497595, 9.235314010222897, 4.501727143065201, 3.3414506633887537, 2.996736565483275, 4.2715979630257195, 3.205078794431129, 1.8722261181415913, 0.6892544809083996, 0.0), # 157
(10.450553324967336, 7.495248171657732, 9.302523946219415, 9.544258060733807, 8.48743569881293, 4.174003322325641, 3.3075747046495003, 3.200048222203801, 4.597442309412912, 1.698678070701901, 1.335972342259087, 0.7988866158226731, 0.0, 11.335080203181485, 8.787752774049402, 6.679861711295434, 5.096034212105701, 9.194884618825824, 4.480067511085322, 3.3075747046495003, 2.9814309445183147, 4.243717849406465, 3.1814193535779363, 1.8605047892438833, 0.6813861974234302, 0.0), # 158
(10.335201473769764, 7.395933826819331, 9.224527454803487, 9.454176016727876, 8.414178555796186, 4.143513212539135, 3.2677489343700015, 3.17754122744589, 4.566999388570334, 1.6807983479345614, 1.3223972849777657, 0.7911589610963629, 0.0, 11.235598705688274, 8.70274857205999, 6.611986424888827, 5.042395043803683, 9.133998777140668, 4.448557718424246, 3.2677489343700015, 2.9596522946708106, 4.207089277898093, 3.1513920055759597, 1.8449054909606977, 0.6723576206199392, 0.0), # 159
(10.198820932866035, 7.28304080162725, 9.125574450948537, 9.343506385929302, 8.321992122590341, 4.103212058438943, 3.221570623868649, 3.147432860557619, 4.525465106040038, 1.6594219781520132, 1.3060272186755595, 0.7817252273702489, 0.0, 11.110988852451014, 8.598977501072737, 6.530136093377798, 4.978265934456038, 9.050930212080075, 4.406406004780667, 3.221570623868649, 2.9308657560278157, 4.160996061295171, 3.114502128643102, 1.8251148901897079, 0.6620946183297501, 0.0), # 160
(10.042510876420344, 7.1573051140366015, 9.006721467228694, 9.213301128944565, 8.211833582663305, 4.053588080615757, 3.1693770122048135, 3.1101003109807053, 4.473387224599541, 1.6347303676098288, 1.2870063860732652, 0.77067287137255, 0.0, 10.962523662746737, 8.477401585098049, 6.435031930366326, 4.904191102829485, 8.946774449199083, 4.354140435372988, 3.1693770122048135, 2.8954200575826836, 4.105916791331652, 3.071100376314856, 1.801344293445739, 0.6506641012760548, 0.0), # 161
(9.8673704785969, 7.01946278200249, 8.86902503621808, 9.064612206380144, 8.08466011948299, 3.9951294996602726, 3.1115053384378664, 3.0659207681568685, 4.411313507026364, 1.6069049225635816, 1.2654790298916783, 0.7580893498314843, 0.0, 10.791476155852466, 8.338982848146326, 6.3273951494583915, 4.820714767690744, 8.822627014052728, 4.292289075419616, 3.1115053384378664, 2.8536639283287664, 4.042330059741495, 3.0215374021267154, 1.773805007243616, 0.6381329801820447, 0.0), # 162
(9.674498913559898, 6.870249823480022, 8.71354169049082, 8.898491578842531, 7.941428916517308, 3.928324536163185, 3.048292841627181, 3.015271421527823, 4.339791716098023, 1.5761270492688444, 1.2415893928515955, 0.7440621194752707, 0.0, 10.599119351045232, 8.184683314227977, 6.207946964257977, 4.728381147806532, 8.679583432196045, 4.221379990138953, 3.048292841627181, 2.8059460972594175, 3.970714458258654, 2.9661638596141775, 1.742708338098164, 0.6245681657709112, 0.0), # 163
(9.464995355473539, 6.710402256424303, 8.54132796262104, 8.71599120693821, 7.783097157234176, 3.853661410715189, 2.9800767608321266, 2.9585294605352903, 4.259369614592037, 1.5425781539811894, 1.2154817176738126, 0.7286786370321272, 0.0, 10.386726267602059, 8.015465007353399, 6.077408588369063, 4.627734461943566, 8.518739229184074, 4.141941244749407, 2.9800767608321266, 2.752615293367992, 3.891548578617088, 2.905330402312737, 1.7082655925242083, 0.6100365687658459, 0.0), # 164
(9.239958978502024, 6.5406560987904445, 8.353440385182864, 8.518163051273666, 7.610622025101502, 3.771628343906979, 2.9071943351120755, 2.8960720746209856, 4.1705949652859235, 1.5064396429561904, 1.1873002470791263, 0.7120263592302724, 0.0, 10.155569924799979, 7.832289951532995, 5.936501235395631, 4.51931892886857, 8.341189930571847, 4.05450090446938, 2.9071943351120755, 2.694020245647842, 3.805311012550751, 2.839387683757889, 1.670688077036573, 0.5946050998900405, 0.0), # 165
(9.000488956809557, 6.361747368533551, 8.150935490750417, 8.306059072455376, 7.4249607035872005, 3.682713556329251, 2.8299828035264003, 2.8282764532266285, 4.074015530957201, 1.4678929224494195, 1.157189223788332, 0.6941927427979253, 0.0, 9.906923341916015, 7.636120170777177, 5.78594611894166, 4.403678767348258, 8.148031061914402, 3.95958703451728, 2.8299828035264003, 2.630509683092322, 3.7124803517936003, 2.768686357485126, 1.6301870981500834, 0.5783406698666865, 0.0), # 166
(8.747684464560333, 6.174412083608727, 7.934869811897824, 8.080731231089835, 7.2270703761591815, 3.5874052685726983, 2.7487794051344725, 2.7555197857939366, 3.9701790743833865, 1.4271193987164503, 1.1252928905222266, 0.6752652444633036, 0.0, 9.642059538227196, 7.427917689096338, 5.626464452611132, 4.28135819614935, 7.940358148766773, 3.8577277001115116, 2.7487794051344725, 2.562432334694784, 3.6135351880795907, 2.693577077029946, 1.5869739623795647, 0.5613101894189753, 0.0), # 167
(8.482644675918554, 5.979386261971081, 7.706299881199207, 7.843231487783524, 7.017908226285359, 3.4861917012280164, 2.663921378995663, 2.6781792617646265, 3.8596333583419993, 1.3843004780128556, 1.0917554900016058, 0.6553313209546264, 0.0, 9.362251533010546, 7.20864453050089, 5.458777450008029, 4.152901434038566, 7.7192667166839986, 3.7494509664704774, 2.663921378995663, 2.490136929448583, 3.5089541131426794, 2.614410495927842, 1.5412599762398416, 0.5435805692700985, 0.0), # 168
(8.206468765048422, 5.777405921575724, 7.466282231228694, 7.594611803142927, 6.798431437433646, 3.3795610748859013, 2.5757459641693443, 2.5966320705804184, 3.7429261456105576, 1.339617566594208, 1.0567212649472661, 0.6344784290001119, 0.0, 9.0687723455431, 6.9792627190012295, 5.28360632473633, 4.018852699782624, 7.485852291221115, 3.635284898812586, 2.5757459641693443, 2.413972196347072, 3.399215718716823, 2.5315372677143095, 1.493256446245739, 0.5252187201432478, 0.0), # 169
(7.9202559061141375, 5.569207080377758, 7.215873394560408, 7.335924137774526, 6.569597193071951, 3.268001610137046, 2.4845903997148873, 2.5112554016830275, 3.620605198966578, 1.2932520707160806, 1.020334458080004, 0.6127940253279787, 0.0, 8.762894995101878, 6.740734278607764, 5.101672290400019, 3.879756212148241, 7.241210397933156, 3.5157575623562387, 2.4845903997148873, 2.3342868643836043, 3.2847985965359756, 2.4453080459248424, 1.4431746789120816, 0.5062915527616144, 0.0), # 170
(7.6251052732799005, 5.355525756332291, 6.956129903768475, 7.068220452284813, 6.3323626766681915, 3.152001527572146, 2.390791924691664, 2.4224264445141737, 3.4932182811875796, 1.2453853966340462, 0.9827393121206148, 0.5903655666664452, 0.0, 8.445892500963913, 6.494021233330896, 4.913696560603074, 3.736156189902138, 6.986436562375159, 3.3913970223198433, 2.390791924691664, 2.2514296625515327, 3.1661813383340958, 2.356073484094938, 1.391225980753695, 0.4868659778483902, 0.0), # 171
(7.322116040709912, 5.137097967394431, 6.688108291427019, 6.792552707280267, 6.087685071690277, 3.0320490477818964, 2.2946877781590462, 2.3305223885155746, 3.3613131550510804, 1.1961989506036783, 0.9440800697898953, 0.56728050974373, 0.0, 8.119037882406225, 6.24008560718103, 4.720400348949476, 3.588596851811034, 6.722626310102161, 3.2627313439218044, 2.2946877781590462, 2.165749319844212, 3.0438425358451386, 2.2641842357600894, 1.337621658285404, 0.4670089061267665, 0.0), # 172
(7.012387382568372, 4.914659731519285, 6.412865090110164, 6.509972863367375, 5.836521561606121, 2.9086323913569916, 2.196615199176405, 2.235920423128947, 3.225437583334597, 1.145874138880549, 0.9045009738086416, 0.5436263112880514, 0.0, 7.783604158705848, 5.979889424168563, 4.522504869043208, 3.437622416641646, 6.450875166669194, 3.130288592380526, 2.196615199176405, 2.077594565254994, 2.9182607808030605, 2.169990954455792, 1.282573018022033, 0.446787248319935, 0.0), # 173
(6.697018473019482, 4.6889470666619575, 6.131456832392036, 6.221532881152618, 5.579829329883635, 2.7822397788881266, 2.096911426803113, 2.1389977377960108, 3.08613932881565, 1.0945923677202316, 0.8641462668976501, 0.519490428027628, 0.0, 7.440864349139807, 5.7143947083039075, 4.32073133448825, 3.283777103160694, 6.1722786576313, 2.994596832914415, 2.096911426803113, 1.9873141277772333, 2.7899146649418176, 2.07384429371754, 1.2262913664784072, 0.42626791515108714, 0.0), # 174
(6.377108486227438, 4.460695990777558, 5.84494005084676, 5.928284721242486, 5.318565559990731, 2.653359430965997, 1.9959137000985407, 2.040131521958481, 2.943966154271756, 1.0425350433782987, 0.8231601917777163, 0.49496031669067847, 0.0, 7.092091472985131, 5.444563483597462, 4.115800958888581, 3.1276051301348957, 5.887932308543512, 2.8561841307418736, 1.9959137000985407, 1.8952567364042836, 2.6592827799953653, 1.9760949070808291, 1.1689880101693522, 0.40551781734341447, 0.0), # 175
(6.053756596356447, 4.230642521821194, 5.554371278048459, 5.631280344243462, 5.053687435395322, 2.5224795681812964, 1.8939592581220606, 1.9396989650580787, 2.7994658224804327, 0.9898835721103237, 0.781686991169637, 0.470123434005421, 0.0, 6.738558549518844, 5.17135777405963, 3.9084349558481852, 2.9696507163309707, 5.5989316449608655, 2.71557855108131, 1.8939592581220606, 1.8017711201294973, 2.526843717697661, 1.8770934480811543, 1.1108742556096918, 0.38460386562010856, 0.0), # 176
(5.7280619775707065, 3.9995226777479713, 5.260807046571258, 5.331571710762027, 4.786152139565322, 2.3900884111247205, 1.791385339933044, 1.8380772565365193, 2.6531860962191995, 0.9368193601718788, 0.7398709077942084, 0.4450672367000743, 0.0, 6.381538598017975, 4.895739603700816, 3.699354538971042, 2.8104580805156356, 5.306372192438399, 2.5733081591511273, 1.791385339933044, 1.707206007946229, 2.393076069782661, 1.7771905702540096, 1.0521614093142517, 0.3635929707043611, 0.0), # 177
(5.401123804034416, 3.7680724765129963, 4.9653038889892835, 5.030210781404673, 4.516916855968639, 2.2566741803869648, 1.6885291845908623, 1.7356435858355217, 2.505674738265573, 0.8835238138185378, 0.6978561843722264, 0.41987918150285664, 0.0, 6.022304637759553, 4.618670996531422, 3.489280921861132, 2.6505714414556127, 5.011349476531146, 2.4299010201697304, 1.6885291845908623, 1.611910128847832, 2.2584584279843196, 1.6767369271348913, 0.9930607777978567, 0.34255204331936334, 0.0), # 178
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 179
)
passenger_allighting_rate = (
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 0
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 1
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 2
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 3
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 4
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 5
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 6
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 7
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 8
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 9
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 10
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 11
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 12
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 13
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 14
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 15
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 16
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 17
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 18
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 19
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 20
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 21
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 22
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 23
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 24
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 25
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 26
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 27
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 28
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 29
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 30
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 31
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 32
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 33
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 34
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 35
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 36
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 37
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 38
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 39
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 40
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 41
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 42
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 43
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 44
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 45
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 46
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 47
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 48
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 49
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 50
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 51
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 52
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 53
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 54
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 55
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 56
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 57
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 58
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 59
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 60
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 61
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 62
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 63
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 64
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 65
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 66
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 67
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 68
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 69
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 70
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 71
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 72
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 73
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 74
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 75
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 76
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 77
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 78
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 79
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 80
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 81
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 82
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 83
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 84
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 85
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 86
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 87
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 88
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 89
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 90
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 91
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 92
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 93
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 94
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 95
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 96
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 97
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 98
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 99
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 100
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 101
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 102
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 103
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 104
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 105
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 106
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 107
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 108
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 109
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 110
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 111
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 112
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 113
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 114
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 115
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 116
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 117
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 118
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 119
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 120
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 121
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 122
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 123
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 124
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 125
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 126
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 127
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 128
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 129
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 130
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 131
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 132
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 133
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 134
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 135
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 136
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 137
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 138
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 139
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 140
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 141
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 142
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 143
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 144
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 145
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 146
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 147
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 148
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 149
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 150
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 151
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 152
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 153
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 154
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 155
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 156
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 157
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 158
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 159
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 160
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 161
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 162
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 163
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 164
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 165
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 166
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 167
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 168
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 169
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 170
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 171
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 172
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 173
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 174
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 175
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 176
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 177
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 178
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 179
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 8991598675325360468762009371570610170
#index for seed sequence child
child_seed_index = (
1, # 0
38, # 1
)
| 278.927273 | 492 | 0.771761 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.