hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a176365969ad2eb5bafa9d7782f054b890effc0
| 803
|
py
|
Python
|
manage.py
|
dumel93/project-
|
f9ad52d9c8449953e2151fd1c13b39631113eea7
|
[
"MIT"
] | 3
|
2019-03-15T23:43:26.000Z
|
2021-07-05T10:21:48.000Z
|
manage.py
|
dumel93/project-
|
f9ad52d9c8449953e2151fd1c13b39631113eea7
|
[
"MIT"
] | null | null | null |
manage.py
|
dumel93/project-
|
f9ad52d9c8449953e2151fd1c13b39631113eea7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "typer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 34.913043
| 77
| 0.641345
|
4a17644a3fc03e3381aefecc3c3a08464b190760
| 6,759
|
py
|
Python
|
src/datadog_api_client/v2/model/relationship_to_user_data.py
|
mrhwick/datadog-api-client-python
|
9f57bf378b80b7558070087182722f4ca88d630d
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v2/model/relationship_to_user_data.py
|
mrhwick/datadog-api-client-python
|
9f57bf378b80b7558070087182722f4ca88d630d
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v2/model/relationship_to_user_data.py
|
mrhwick/datadog-api-client-python
|
9f57bf378b80b7558070087182722f4ca88d630d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class RelationshipToUserData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'type': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'type': 'type', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""RelationshipToUserData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): ID of the user.. [optional] # noqa: E501
type (str): Users type.. [optional] if omitted the server will use the default value of "users" # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 39.994083
| 121
| 0.586033
|
4a1764654b2959aaa0d4e8dfe74ebd572de731f1
| 1,156
|
py
|
Python
|
environment/Lib/site-packages/openpyxl/chart/picture.py
|
pumbas600/CriticalPath
|
31889c875dedf733aeb9a4ebeba8bf8930e86176
|
[
"MIT"
] | 6
|
2019-12-07T07:30:34.000Z
|
2022-01-20T14:26:44.000Z
|
environment/Lib/site-packages/openpyxl/chart/picture.py
|
pumbas600/CriticalPath
|
31889c875dedf733aeb9a4ebeba8bf8930e86176
|
[
"MIT"
] | 11
|
2020-06-06T01:28:35.000Z
|
2022-03-12T00:16:34.000Z
|
environment/Lib/site-packages/openpyxl/chart/picture.py
|
pumbas600/CriticalPath
|
31889c875dedf733aeb9a4ebeba8bf8930e86176
|
[
"MIT"
] | 1
|
2021-06-29T11:46:07.000Z
|
2021-06-29T11:46:07.000Z
|
# Copyright (c) 2010-2019 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors.nested import (
NestedBool,
NestedFloat,
NestedMinMax,
NestedNoneSet,
)
class PictureOptions(Serialisable):
tagname = "pictureOptions"
applyToFront = NestedBool(allow_none=True, nested=True)
applyToSides = NestedBool(allow_none=True, nested=True)
applyToEnd = NestedBool(allow_none=True, nested=True)
pictureFormat = NestedNoneSet(values=(['stretch', 'stack', 'stackScale']), nested=True)
pictureStackUnit = NestedFloat(allow_none=True, nested=True)
__elements__ = ('applyToFront', 'applyToSides', 'applyToEnd', 'pictureFormat', 'pictureStackUnit')
def __init__(self,
applyToFront=None,
applyToSides=None,
applyToEnd=None,
pictureFormat=None,
pictureStackUnit=None,
):
self.applyToFront = applyToFront
self.applyToSides = applyToSides
self.applyToEnd = applyToEnd
self.pictureFormat = pictureFormat
self.pictureStackUnit = pictureStackUnit
| 32.111111
| 102
| 0.676471
|
4a1764b56ddc665fae993e48f9aefbea177e6cf9
| 9,480
|
py
|
Python
|
tests/unit/python/foglamp/services/core/api/test_support.py
|
ashwinscale/FogLAMP
|
dac6f286d31978b6ce00303df8398ea5b2031d79
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/services/core/api/test_support.py
|
ashwinscale/FogLAMP
|
dac6f286d31978b6ce00303df8398ea5b2031d79
|
[
"Apache-2.0"
] | 1
|
2018-05-10T16:04:34.000Z
|
2018-05-10T16:04:34.000Z
|
tests/unit/python/foglamp/services/core/api/test_support.py
|
ashwinscale/FogLAMP
|
dac6f286d31978b6ce00303df8398ea5b2031d79
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import json
import pathlib
from pathlib import PosixPath
from unittest.mock import patch, mock_open, Mock, MagicMock
from aiohttp import web
import pytest
from foglamp.services.core import routes
from foglamp.services.core.api import support
from foglamp.services.core.support import *
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("api", "bundle-support")
class TestBundleSupport:
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
@pytest.fixture
def support_bundles_dir_path(self):
return pathlib.Path(__file__).parent
@pytest.mark.parametrize("data, expected_content, expected_count", [
(['support-180301-13-35-23.tar.gz', 'support-180301-13-13-13.tar.gz'], {'bundles': ['support-180301-13-35-23.tar.gz', 'support-180301-13-13-13.tar.gz']}, 2),
(['support-180301-15-25-02.tar.gz', 'foglamp.txt'], {'bundles': ['support-180301-15-25-02.tar.gz']}, 1),
(['foglamp.txt'], {'bundles': []}, 0),
([], {'bundles': []}, 0)
])
async def test_get_support_bundle(self, client, support_bundles_dir_path, data, expected_content, expected_count):
path = support_bundles_dir_path / 'support'
with patch.object(support, '_get_support_dir', return_value=path):
with patch('os.walk') as mockwalk:
mockwalk.return_value = [(path, [], data)]
resp = await client.get('/foglamp/support')
assert 200 == resp.status
res = await resp.text()
jdict = json.loads(res)
assert expected_count == len(jdict['bundles'])
assert expected_content == jdict
mockwalk.assert_called_once_with(path)
async def test_get_support_bundle_by_name(self, client, support_bundles_dir_path):
gz_filepath = Mock()
gz_filepath.open = mock_open()
gz_filepath.is_file.return_value = True
gz_filepath.stat.return_value = MagicMock()
gz_filepath.stat.st_size = 1024
bundle_name = 'support-180301-13-35-23.tar.gz'
filepath = Mock()
filepath.name = bundle_name
filepath.open = mock_open()
filepath.with_name.return_value = gz_filepath
with patch("aiohttp.web.FileResponse", return_value=web.FileResponse(path=filepath)) as f_res:
path = support_bundles_dir_path / 'support'
with patch.object(support, '_get_support_dir', return_value=path):
with patch('os.path.isdir', return_value=True):
with patch('os.walk') as mockwalk:
mockwalk.return_value = [(path, [], [bundle_name])]
resp = await client.get('/foglamp/support/{}'.format(bundle_name))
assert 200 == resp.status
assert 'OK' == resp.reason
mockwalk.assert_called_once_with(path)
args, kwargs = f_res.call_args
assert {'path': PosixPath(pathlib.Path(path) / str(bundle_name))} == kwargs
assert 1 == f_res.call_count
@pytest.mark.parametrize("data, request_bundle_name", [
(['support-180301-13-35-23.tar.gz'], 'xsupport-180301-01-15-13.tar.gz'),
([], 'support-180301-13-13-13.tar.gz')
])
async def test_get_support_bundle_by_name_not_found(self, client, support_bundles_dir_path, data, request_bundle_name):
path = support_bundles_dir_path / 'support'
with patch.object(support, '_get_support_dir', return_value=path):
with patch('os.path.isdir', return_value=True):
with patch('os.walk') as mockwalk:
mockwalk.return_value = [(path, [], data)]
resp = await client.get('/foglamp/support/{}'.format(request_bundle_name))
assert 404 == resp.status
assert '{} not found'.format(request_bundle_name) == resp.reason
mockwalk.assert_called_once_with(path)
async def test_get_support_bundle_by_name_bad_request(self, client):
resp = await client.get('/foglamp/support/support-180301-13-35-23.tar')
assert 400 == resp.status
assert 'Bundle file extension is invalid' == resp.reason
async def test_get_support_bundle_by_name_no_dir(self, client, support_bundles_dir_path):
path = support_bundles_dir_path / 'invalid'
with patch.object(support, '_get_support_dir', return_value=path):
with patch('os.path.isdir', return_value=False) as mockisdir:
resp = await client.get('/foglamp/support/bla.tar.gz')
assert 404 == resp.status
assert 'Support bundle directory does not exist' == resp.reason
mockisdir.assert_called_once_with(path)
async def test_create_support_bundle(self, client):
def mock_build():
return 'support-180301-13-35-23.tar.gz'
with patch.object(SupportBuilder, "__init__", return_value=None):
with patch.object(SupportBuilder, "build", return_value=mock_build()):
resp = await client.post('/foglamp/support')
res = await resp.text()
jdict = json.loads(res)
assert 200 == resp.status
assert {"bundle created": "support-180301-13-35-23.tar.gz"} == jdict
async def test_create_support_bundle_exception(self, client):
with patch.object(SupportBuilder, "__init__", return_value=None):
with patch.object(SupportBuilder, "build", side_effect=RuntimeError("blah")):
resp = await client.post('/foglamp/support')
res = await resp.text()
assert 500 == resp.status
assert "Support bundle could not be created. blah" == resp.reason
async def test_get_syslog_entries_all_ok(self, client):
def mock_syslog():
return """
echo "Mar 19 14:00:53 nerd51-ThinkPad FogLAMP[18809] INFO: server: foglamp.services.core.server: start core
Mar 19 14:00:53 nerd51-ThinkPad FogLAMP[18809] INFO: server: foglamp.services.core.server: Management API started on http://0.0.0.0:38311
Mar 19 14:00:53 nerd51-ThinkPad FogLAMP[18809] INFO: server: foglamp.services.core.server: start storage, from directory /home/asinha/Development/FogLAMP/scripts
Mar 19 14:00:54 nerd51-ThinkPad FogLAMP[18809] INFO: service_registry: foglamp.services.core.service_registry.service_registry: Registered service instance id=479a90ec-0d1d-4845-b2c5-f1d9ce72ac8e: <FogLAMP Storage, type=Storage, protocol=http, address=localhost, service port=33395, management port=45952, status=1>
Mar 19 14:00:58 nerd51-ThinkPad FogLAMP[18809] INFO: server: foglamp.services.core.server: start scheduler
Mar 19 14:00:58 nerd51-ThinkPad FogLAMP[18809] INFO: scheduler: foglamp.services.core.scheduler.scheduler: Starting
Mar 19 14:00:58 nerd51-ThinkPad FogLAMP[18809] INFO: scheduler: foglamp.services.core.scheduler.scheduler: Starting Scheduler: Management port received is 38311
Mar 19 14:00:58 nerd51-ThinkPad FogLAMP[18809] INFO: scheduler: foglamp.services.core.scheduler.scheduler: Scheduled task for schedule 'purge' to start at 2018-03-19 15:00:58.912532
Mar 19 14:00:58 nerd51-ThinkPad FogLAMP[18809] INFO: scheduler: foglamp.services.core.scheduler.scheduler: Scheduled task for schedule 'stats collection' to start at 2018-03-19 14:01:13.912532
Mar 19 14:00:58 nerd51-ThinkPad FogLAMP[18809] INFO: scheduler: foglamp.services.core.scheduler.scheduler: Scheduled task for schedule 'certificate checker' to start at 2018-03-19 15:05:00"
"""
with patch.object(support, "__GET_SYSLOG_CMD_TEMPLATE", mock_syslog()):
resp = await client.get('/foglamp/syslog')
res = await resp.text()
jdict = json.loads(res)
assert 200 == resp.status
async def test_get_syslog_entries_limit_exception(self, client):
with patch.object(support, "__DEFAULT_LIMIT", "garbage"):
resp = await client.get('/foglamp/syslog')
assert 400 == resp.status
assert 'Limit must be a positive integer' == resp.reason
async def test_get_syslog_entries_offset_exception(self, client):
with patch.object(support, "__DEFAULT_OFFSET", "garbage"):
resp = await client.get('/foglamp/syslog')
assert 400 == resp.status
assert 'Offset must be a positive integer OR Zero' == resp.reason
async def test_get_syslog_entries_search_exception(self, client):
with patch.object(support, "__DEFAULT_LOG_TYPE", "garbage"):
resp = await client.get('/foglamp/syslog')
assert 400 == resp.status
assert 'garbage is not a valid source' == resp.reason
async def test_get_syslog_entries_cmd_exception(self, client):
with patch.object(subprocess, "Popen", side_effect=Exception):
resp = await client.get('/foglamp/syslog')
assert 500 == resp.status
assert 'Internal Server Error' == resp.reason
| 52.087912
| 323
| 0.65865
|
4a176605da5dcd9578355543e7a4efc6fd622399
| 1,977
|
py
|
Python
|
pymagnitude/third_party/allennlp/tests/modules/seq2seq_encoders/multi_head_self_attention_test.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 1,520
|
2018-03-01T13:37:49.000Z
|
2022-03-25T11:40:20.000Z
|
pymagnitude/third_party/allennlp/tests/modules/seq2seq_encoders/multi_head_self_attention_test.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 87
|
2018-03-03T15:12:50.000Z
|
2022-02-21T15:24:12.000Z
|
pymagnitude/third_party/allennlp/tests/modules/seq2seq_encoders/multi_head_self_attention_test.py
|
tpeng/magnitude
|
aec98628b5547773ca8c4114ec6d1ad51e21b230
|
[
"MIT"
] | 121
|
2018-03-03T08:40:53.000Z
|
2022-03-16T05:19:38.000Z
|
# pylint: disable=invalid-name,no-self-use,too-many-public-methods
from __future__ import absolute_import
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders import MultiHeadSelfAttention
from allennlp.common.params import Params
class MultiHeadSelfAttentionTest(AllenNlpTestCase):
def test_multi_head_self_attention_can_build_from_params(self):
params = Params({u"num_heads": 3, u"input_dim": 2, u"attention_dim": 3, u"values_dim": 6})
encoder = MultiHeadSelfAttention.from_params(params)
assert isinstance(encoder, MultiHeadSelfAttention)
assert encoder.get_input_dim() == 2
assert encoder.get_output_dim() == 2
def test_multi_head_self_attention_runs_forward(self):
attention = MultiHeadSelfAttention(num_heads=3,
input_dim=5,
attention_dim=6,
values_dim=9)
inputs = torch.randn(2, 12, 5)
assert list(attention(inputs).size()) == [2, 12, 5]
def test_multi_head_self_attention_respects_masking(self):
attention = MultiHeadSelfAttention(num_heads=3,
input_dim=5,
attention_dim=6,
values_dim=9,
attention_dropout_prob=0.0)
tensor = torch.randn(2, 12, 5)
mask = torch.ones([2, 12])
mask[0, 6:] = 0
result = attention(tensor, mask)
# Compute the same function without a mask, but with
# only the unmasked elements - should be the same.
result_without_mask = attention(tensor[:, :6, :])
numpy.testing.assert_almost_equal(result[0, :6, :].detach().cpu().numpy(),
result_without_mask[0, :, :].detach().cpu().numpy())
| 43.933333
| 98
| 0.592312
|
4a1768f440a49b2e56c80eb34d28246b74ff2fe6
| 21,788
|
py
|
Python
|
huaweicloud-sdk-aom/huaweicloudsdkaom/v2/aom_async_client.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-aom/huaweicloudsdkaom/v2/aom_async_client.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-aom/huaweicloudsdkaom/v2/aom_async_client.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class AomAsyncClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(AomAsyncClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkaom.v2.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "AomClient":
raise TypeError("client type error, support client type is AomClient")
return ClientBuilder(clazz)
def add_alarm_rule_async(self, request):
"""add_alarm_rule
该接口用于添加一条阈值规则
:param AddAlarmRuleRequest request
:return: AddAlarmRuleResponse
"""
return self.add_alarm_rule_with_http_info(request)
def add_alarm_rule_with_http_info(self, request):
"""add_alarm_rule
该接口用于添加一条阈值规则
:param AddAlarmRuleRequest request
:return: AddAlarmRuleResponse
"""
all_params = ['add_alarm_rule_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/alarm-rules',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AddAlarmRuleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def add_metric_data_async(self, request):
"""添加监控数据
该接口用于向服务端添加一条或多条监控数据。
:param AddMetricDataRequest request
:return: AddMetricDataResponse
"""
return self.add_metric_data_with_http_info(request)
def add_metric_data_with_http_info(self, request):
"""添加监控数据
该接口用于向服务端添加一条或多条监控数据。
:param AddMetricDataRequest request
:return: AddMetricDataResponse
"""
all_params = ['metric_data_param']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/ams/report/metricdata',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AddMetricDataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_alarm_rule_async(self, request):
"""delete_alarm_rule
该接口用于删除阈值规则。
:param DeleteAlarmRuleRequest request
:return: DeleteAlarmRuleResponse
"""
return self.delete_alarm_rule_with_http_info(request)
def delete_alarm_rule_with_http_info(self, request):
"""delete_alarm_rule
该接口用于删除阈值规则。
:param DeleteAlarmRuleRequest request
:return: DeleteAlarmRuleResponse
"""
all_params = ['alarm_rule_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'alarm_rule_id' in local_var_params:
path_params['alarm_rule_id'] = local_var_params['alarm_rule_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/alarm-rules/{alarm_rule_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteAlarmRuleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def deleteservice_discovery_rules_async(self, request):
"""删除服务发现规则
该接口用于删除服务发现规则。
:param DeleteserviceDiscoveryRulesRequest request
:return: DeleteserviceDiscoveryRulesResponse
"""
return self.deleteservice_discovery_rules_with_http_info(request)
def deleteservice_discovery_rules_with_http_info(self, request):
"""删除服务发现规则
该接口用于删除服务发现规则。
:param DeleteserviceDiscoveryRulesRequest request
:return: DeleteserviceDiscoveryRulesResponse
"""
all_params = ['app_rules_ids']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'app_rules_ids' in local_var_params:
query_params.append(('appRulesIds', local_var_params['app_rules_ids']))
collection_formats['appRulesIds'] = 'csv'
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/inv/servicediscoveryrules',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteserviceDiscoveryRulesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_alarm_rule_async(self, request):
"""list_alarm_rule
该接口用于查询阈值规则列表。
:param ListAlarmRuleRequest request
:return: ListAlarmRuleResponse
"""
return self.list_alarm_rule_with_http_info(request)
def list_alarm_rule_with_http_info(self, request):
"""list_alarm_rule
该接口用于查询阈值规则列表。
:param ListAlarmRuleRequest request
:return: ListAlarmRuleResponse
"""
all_params = ['offset', 'limit']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/alarm-rules',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAlarmRuleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_sample_async(self, request):
"""查询时序数据
该接口用于查询指定时间范围内的监控时序数据,可以通过参数指定需要查询的数据维度,数据周期等。
:param ListSampleRequest request
:return: ListSampleResponse
"""
return self.list_sample_with_http_info(request)
def list_sample_with_http_info(self, request):
"""查询时序数据
该接口用于查询指定时间范围内的监控时序数据,可以通过参数指定需要查询的数据维度,数据周期等。
:param ListSampleRequest request
:return: ListSampleResponse
"""
all_params = ['list_sample_request_body', 'fill_value']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'fill_value' in local_var_params:
query_params.append(('fill_value', local_var_params['fill_value']))
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/samples',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSampleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_series_async(self, request):
"""查询时间序列
该接口用于查询系统当前可监控的时间序列列表,可以指定时间序列命名空间、名称、维度、所属资源的编号(格式为:resType_resId),分页查询的起始位置和返回的最大记录条数。
:param ListSeriesRequest request
:return: ListSeriesResponse
"""
return self.list_series_with_http_info(request)
def list_series_with_http_info(self, request):
"""查询时间序列
该接口用于查询系统当前可监控的时间序列列表,可以指定时间序列命名空间、名称、维度、所属资源的编号(格式为:resType_resId),分页查询的起始位置和返回的最大记录条数。
:param ListSeriesRequest request
:return: ListSeriesResponse
"""
all_params = ['list_series_request_body', 'limit', 'offset']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/series',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListSeriesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_service_discovery_rules_async(self, request):
"""查询系统中已有服务发现规则
该接口用于查询系统当前已存在的服务发现规则。
:param ListServiceDiscoveryRulesRequest request
:return: ListServiceDiscoveryRulesResponse
"""
return self.list_service_discovery_rules_with_http_info(request)
def list_service_discovery_rules_with_http_info(self, request):
"""查询系统中已有服务发现规则
该接口用于查询系统当前已存在的服务发现规则。
:param ListServiceDiscoveryRulesRequest request
:return: ListServiceDiscoveryRulesResponse
"""
all_params = ['id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/inv/servicediscoveryrules',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListServiceDiscoveryRulesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_alarm_rule_async(self, request):
"""show_alarm_rule
查询单条阈值规则
:param ShowAlarmRuleRequest request
:return: ShowAlarmRuleResponse
"""
return self.show_alarm_rule_with_http_info(request)
def show_alarm_rule_with_http_info(self, request):
"""show_alarm_rule
查询单条阈值规则
:param ShowAlarmRuleRequest request
:return: ShowAlarmRuleResponse
"""
all_params = ['alarm_rule_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'alarm_rule_id' in local_var_params:
path_params['alarm_rule_id'] = local_var_params['alarm_rule_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/alarm-rules/{alarm_rule_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAlarmRuleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_alarm_rule_async(self, request):
"""update_alarm_rule
修改阈值规则
:param UpdateAlarmRuleRequest request
:return: UpdateAlarmRuleResponse
"""
return self.update_alarm_rule_with_http_info(request)
def update_alarm_rule_with_http_info(self, request):
"""update_alarm_rule
修改阈值规则
:param UpdateAlarmRuleRequest request
:return: UpdateAlarmRuleResponse
"""
all_params = ['update_alarm_rule_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/alarm-rules',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateAlarmRuleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type,
async_request=True)
| 30.011019
| 113
| 0.628006
|
4a176b06145b76168b3068bf959fe1897e0d477a
| 13,075
|
py
|
Python
|
tests/kafkatest/tests/core/security_rolling_upgrade_test.py
|
cookingcodewithme/kafka
|
c534bf45cecb8c06abea699b62084d6aee74705c
|
[
"Apache-2.0"
] | 2
|
2020-06-24T07:15:17.000Z
|
2020-06-24T07:15:20.000Z
|
tests/kafkatest/tests/core/security_rolling_upgrade_test.py
|
cookingcodewithme/kafka
|
c534bf45cecb8c06abea699b62084d6aee74705c
|
[
"Apache-2.0"
] | 4
|
2021-06-11T09:13:34.000Z
|
2022-01-31T09:37:19.000Z
|
tests/kafkatest/tests/core/security_rolling_upgrade_test.py
|
cookingcodewithme/kafka
|
c534bf45cecb8c06abea699b62084d6aee74705c
|
[
"Apache-2.0"
] | 3
|
2018-07-11T16:56:43.000Z
|
2019-04-19T23:58:24.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.utils import is_int
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from kafkatest.services.security.kafka_acls import ACLs
import time
class TestSecurityRollingUpgrade(ProduceConsumeValidateTest):
"""Tests a rolling upgrade from PLAINTEXT to a secured cluster
"""
def __init__(self, test_context):
super(TestSecurityRollingUpgrade, self).__init__(test_context=test_context)
def setUp(self):
self.acls = ACLs(self.test_context)
self.topic = "test_topic"
self.group = "group"
self.producer_throughput = 100
self.num_producers = 1
self.num_consumers = 1
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
self.zk.start()
def create_producer_and_consumer(self):
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic,
throughput=self.producer_throughput)
self.consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic,
consumer_timeout_ms=60000, message_validator=is_int)
self.consumer.group_id = "group"
def bounce(self):
self.kafka.start_minikdc_if_necessary()
self.kafka.restart_cluster(after_each_broker_restart = lambda: time.sleep(10))
def roll_in_secured_settings(self, client_protocol, broker_protocol):
# Roll cluster to include inter broker security protocol.
self.kafka.setup_interbroker_listener(broker_protocol)
self.bounce()
# Roll cluster to disable PLAINTEXT port
self.kafka.close_port(SecurityConfig.PLAINTEXT)
self.set_authorizer_and_bounce(client_protocol, broker_protocol)
def set_authorizer_and_bounce(self, client_protocol, broker_protocol, authorizer_class_name = KafkaService.ACL_AUTHORIZER):
self.kafka.authorizer_class_name = authorizer_class_name
# Force use of direct ZooKeeper access due to SecurityDisabledException: No Authorizer is configured on the broker.
self.acls.set_acls(client_protocol, self.kafka, self.topic, self.group, force_use_zk_connection=True)
self.acls.set_acls(broker_protocol, self.kafka, self.topic, self.group, force_use_zk_connection=True)
self.bounce() # enables the authorizer
def open_secured_port(self, client_protocol):
self.kafka.security_protocol = client_protocol
self.kafka.open_port(client_protocol)
self.kafka.start_minikdc_if_necessary()
self.bounce()
def add_sasl_mechanism(self, new_client_sasl_mechanism):
self.kafka.client_sasl_mechanism = new_client_sasl_mechanism
self.kafka.start_minikdc_if_necessary()
self.bounce()
def roll_in_sasl_mechanism(self, security_protocol, new_sasl_mechanism):
# Roll cluster to update inter-broker SASL mechanism. This disables the old mechanism.
self.kafka.interbroker_sasl_mechanism = new_sasl_mechanism
self.bounce()
# Bounce again with ACLs for new mechanism. Use old SimpleAclAuthorizer here to ensure that is also tested.
self.set_authorizer_and_bounce(security_protocol, security_protocol, KafkaService.SIMPLE_AUTHORIZER)
def add_separate_broker_listener(self, broker_security_protocol, broker_sasl_mechanism):
# Enable the new internal listener on all brokers first
self.kafka.open_port(self.kafka.INTERBROKER_LISTENER_NAME)
self.kafka.port_mappings[self.kafka.INTERBROKER_LISTENER_NAME].security_protocol = broker_security_protocol
self.kafka.client_sasl_mechanism = broker_sasl_mechanism
self.bounce()
# Update inter-broker listener after all brokers have been updated to enable the new listener
self.kafka.setup_interbroker_listener(broker_security_protocol, True)
self.kafka.interbroker_sasl_mechanism = broker_sasl_mechanism
self.bounce()
def remove_separate_broker_listener(self, client_security_protocol, client_sasl_mechanism):
# separate interbroker listener port will be closed automatically in setup_interbroker_listener
# if not using separate interbroker listener
self.kafka.setup_interbroker_listener(client_security_protocol, False)
self.kafka.interbroker_sasl_mechanism = client_sasl_mechanism
self.bounce()
@cluster(num_nodes=8)
@matrix(client_protocol=[SecurityConfig.SSL])
@cluster(num_nodes=9)
@matrix(client_protocol=[SecurityConfig.SASL_PLAINTEXT, SecurityConfig.SASL_SSL])
def test_rolling_upgrade_phase_one(self, client_protocol):
"""
Start with a PLAINTEXT cluster, open a SECURED port, via a rolling upgrade, ensuring we could produce
and consume throughout over PLAINTEXT. Finally check we can produce and consume the new secured port.
"""
self.kafka.setup_interbroker_listener(SecurityConfig.PLAINTEXT)
self.kafka.security_protocol = SecurityConfig.PLAINTEXT
self.kafka.start()
# Create PLAINTEXT producer and consumer
self.create_producer_and_consumer()
# Rolling upgrade, opening a secure protocol, ensuring the Plaintext producer/consumer continues to run
self.run_produce_consume_validate(self.open_secured_port, client_protocol)
# Now we can produce and consume via the secured port
self.kafka.security_protocol = client_protocol
self.create_producer_and_consumer()
self.run_produce_consume_validate(lambda: time.sleep(1))
@cluster(num_nodes=8)
@matrix(client_protocol=[SecurityConfig.SASL_SSL, SecurityConfig.SSL, SecurityConfig.SASL_PLAINTEXT],
broker_protocol=[SecurityConfig.SASL_SSL, SecurityConfig.SSL, SecurityConfig.SASL_PLAINTEXT])
def test_rolling_upgrade_phase_two(self, client_protocol, broker_protocol):
"""
Start with a PLAINTEXT cluster with a second Secured port open (i.e. result of phase one).
A third secure port is also open if inter-broker and client protocols are different.
Start a Producer and Consumer via the SECURED client port
Incrementally upgrade to add inter-broker be the secure broker protocol
Incrementally upgrade again to add ACLs as well as disabling the PLAINTEXT port
Ensure the producer and consumer ran throughout
"""
#Given we have a broker that has both secure and PLAINTEXT ports open
self.kafka.security_protocol = client_protocol
self.kafka.setup_interbroker_listener(SecurityConfig.PLAINTEXT, use_separate_listener=False)
self.kafka.open_port(broker_protocol)
self.kafka.start()
#Create Secured Producer and Consumer
self.create_producer_and_consumer()
#Roll in the security protocol. Disable Plaintext. Ensure we can produce and Consume throughout
self.run_produce_consume_validate(self.roll_in_secured_settings, client_protocol, broker_protocol)
@cluster(num_nodes=9)
@matrix(new_client_sasl_mechanism=[SecurityConfig.SASL_MECHANISM_PLAIN])
def test_rolling_upgrade_sasl_mechanism_phase_one(self, new_client_sasl_mechanism):
"""
Start with a SASL/GSSAPI cluster, add new SASL mechanism, via a rolling upgrade, ensuring we could produce
and consume throughout over SASL/GSSAPI. Finally check we can produce and consume using new mechanism.
"""
self.kafka.setup_interbroker_listener(SecurityConfig.SASL_SSL, use_separate_listener=False)
self.kafka.security_protocol = SecurityConfig.SASL_SSL
self.kafka.client_sasl_mechanism = SecurityConfig.SASL_MECHANISM_GSSAPI
self.kafka.interbroker_sasl_mechanism = SecurityConfig.SASL_MECHANISM_GSSAPI
self.kafka.start()
# Create SASL/GSSAPI producer and consumer
self.create_producer_and_consumer()
# Rolling upgrade, adding new SASL mechanism, ensuring the GSSAPI producer/consumer continues to run
self.run_produce_consume_validate(self.add_sasl_mechanism, new_client_sasl_mechanism)
# Now we can produce and consume using the new SASL mechanism
self.kafka.client_sasl_mechanism = new_client_sasl_mechanism
self.create_producer_and_consumer()
self.run_produce_consume_validate(lambda: time.sleep(1))
@cluster(num_nodes=8)
@matrix(new_sasl_mechanism=[SecurityConfig.SASL_MECHANISM_PLAIN])
def test_rolling_upgrade_sasl_mechanism_phase_two(self, new_sasl_mechanism):
"""
Start with a SASL cluster with GSSAPI for inter-broker and a second mechanism for clients (i.e. result of phase one).
Start Producer and Consumer using the second mechanism
Incrementally upgrade to set inter-broker to the second mechanism and disable GSSAPI
Incrementally upgrade again to add ACLs
Ensure the producer and consumer run throughout
"""
#Start with a broker that has GSSAPI for inter-broker and a second mechanism for clients
self.kafka.security_protocol = SecurityConfig.SASL_SSL
self.kafka.setup_interbroker_listener(SecurityConfig.SASL_SSL, use_separate_listener=False)
self.kafka.client_sasl_mechanism = new_sasl_mechanism
self.kafka.interbroker_sasl_mechanism = SecurityConfig.SASL_MECHANISM_GSSAPI
self.kafka.start()
#Create Producer and Consumer using second mechanism
self.create_producer_and_consumer()
#Roll in the second SASL mechanism for inter-broker, disabling first mechanism. Ensure we can produce and consume throughout
self.run_produce_consume_validate(self.roll_in_sasl_mechanism, self.kafka.security_protocol, new_sasl_mechanism)
@cluster(num_nodes=9)
def test_enable_separate_interbroker_listener(self):
"""
Start with a cluster that has a single PLAINTEXT listener.
Start producing/consuming on PLAINTEXT port.
While doing that, do a rolling restart to enable separate secured interbroker port
"""
self.kafka.security_protocol = SecurityConfig.PLAINTEXT
self.kafka.setup_interbroker_listener(SecurityConfig.PLAINTEXT, use_separate_listener=False)
self.kafka.start()
self.create_producer_and_consumer()
self.run_produce_consume_validate(self.add_separate_broker_listener, SecurityConfig.SASL_SSL,
SecurityConfig.SASL_MECHANISM_PLAIN)
@cluster(num_nodes=9)
def test_disable_separate_interbroker_listener(self):
"""
Start with a cluster that has two listeners, one on SSL (clients), another on SASL_SSL (broker-to-broker).
Start producer and consumer on SSL listener.
Close dedicated interbroker listener via rolling restart.
Ensure we can produce and consume via SSL listener throughout.
"""
client_protocol = SecurityConfig.SSL
client_sasl_mechanism = SecurityConfig.SASL_MECHANISM_GSSAPI
self.kafka.security_protocol = client_protocol
self.kafka.client_sasl_mechanism = client_sasl_mechanism
self.kafka.setup_interbroker_listener(SecurityConfig.SASL_SSL, use_separate_listener=True)
self.kafka.interbroker_sasl_mechanism = SecurityConfig.SASL_MECHANISM_GSSAPI
self.kafka.start()
# create producer and consumer via client security protocol
self.create_producer_and_consumer()
# run produce/consume/validate loop while disabling a separate interbroker listener via rolling restart
self.run_produce_consume_validate(
self.remove_separate_broker_listener, client_protocol, client_sasl_mechanism)
| 51.476378
| 132
| 0.749369
|
4a176c8d07ccffce39b81f46dac6f2e489fb251d
| 4,050
|
py
|
Python
|
data_process/handle_n2v.py
|
MortonWang/geo_IF
|
4e27aeb9e005cdfb151777bc730de6d8372d1b7f
|
[
"MIT"
] | 5
|
2020-06-19T13:39:59.000Z
|
2022-03-04T13:05:58.000Z
|
data_process/handle_n2v.py
|
MortonWang/geo_IF
|
4e27aeb9e005cdfb151777bc730de6d8372d1b7f
|
[
"MIT"
] | null | null | null |
data_process/handle_n2v.py
|
MortonWang/geo_IF
|
4e27aeb9e005cdfb151777bc730de6d8372d1b7f
|
[
"MIT"
] | null | null | null |
'''
Reference implementation of node2vec.
Author: Aditya Grover
For more details, refer to the paper:
node2vec: Scalable Feature Learning for Networks
Aditya Grover and Jure Leskovec
Knowledge Discovery and Data Mining (KDD), 2016
'''
import argparse
import networkx as nx
import numpy as np
from gensim.models import Word2Vec
import node2vec as n2v
def parse_args():
'''
Parses the node2vec arguments.
'''
parser = argparse.ArgumentParser(description="Run node2vec.")
parser.add_argument('--input', nargs='?', default='graph/karate.edgelist', help='Input graph path')
parser.add_argument('--output', nargs='?', default='emb/karate.emb', help='Embeddings path')
parser.add_argument('--dimensions', type=int, default=128, help='Number of dimensions. Default is 128.')
parser.add_argument('--walk-length', type=int, default=80, help='Length of walk per source. Default is 80.')
parser.add_argument('--num-walks', type=int, default=10, help='Number of walks per source. Default is 10.')
parser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.')
parser.add_argument('--iter', default=1, type=int, help='Number of epochs in SGD')
parser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.')
parser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.')
parser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.')
parser.add_argument('--weighted', dest='weighted', action='store_true', default=False,
help='Boolean specifying (un)weighted. Default is unweighted.')
parser.add_argument('--directed', dest='directed', action='store_true', default=False,
help='Graph is (un)directed. Default is undirected.')
return parser.parse_args()
def read_graph():
'''
Reads the input network in networkx.
'''
if args.weighted:
G = nx.read_edgelist(args.input, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph())
else:
G = nx.read_edgelist(args.input, nodetype=int, create_using=nx.DiGraph())
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = 1
if not args.directed:
G = G.to_undirected()
return G
def learn_embeddings(walks):
'''
Learn embeddings by optimizing the Skipgram objective using SGD.
'''
walks = [list(map(str, walk)) for walk in walks]
model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers,
iter=args.iter)
model.wv.save_word2vec_format(args.output)
return
def main(args):
'''
Pipeline for representational learning for all nodes in a graph.
'''
nx_G = read_graph()
G = n2v.Graph(nx_G, args.directed, args.p, args.q)
G.preprocess_transition_probs()
walks = G.simulate_walks(args.num_walks, args.walk_length)
learn_embeddings(walks)
def convert_emd_to_npy_format(file_in, file_out):
def str_to_num(str_temmp):
return float(str_temmp)
def by_node_id(n):
return n[0]
def remove_node_id(n):
return n[1:]
all_nodes = list()
with open(file_in, 'r') as f:
for line in f:
line = line.split(' ')
line = list(map(str_to_num, line))
all_nodes.append(tuple(line))
all_nodes = all_nodes[1:] # remove the first line: num_of_nodes, dim_len
all_nodes_sorted = sorted(all_nodes, key=by_node_id)
all_nodes_remove_id = list(map(remove_node_id, all_nodes_sorted))
node_array = np.array(all_nodes_remove_id)
np.save(file_out, node_array)
print("done.")
args = parse_args()
main(args)
# --input ../dataset_cmu/edge/edge_pair.ungraph --output ../dataset_cmu/edge/out_of_order.emd --dimensions 128
convert_emd_to_npy_format(file_in='../dataset_cmu/edge/out_of_order.emd', file_out="../dataset_cmu/node2vec_dim128")
| 32.4
| 116
| 0.678025
|
4a176c8d366f9fd00d1b4342ea323b9db685d11c
| 6,931
|
py
|
Python
|
more_configs/config_google_and_zhwikipedia.py
|
zxq2233/zmirror
|
a304e36e84948636215b7d8e2c921b94c4e60d26
|
[
"MIT"
] | 8
|
2016-10-18T15:17:09.000Z
|
2021-07-15T08:36:28.000Z
|
more_configs/config_google_and_zhwikipedia.py
|
zxq2233/zmirror
|
a304e36e84948636215b7d8e2c921b94c4e60d26
|
[
"MIT"
] | null | null | null |
more_configs/config_google_and_zhwikipedia.py
|
zxq2233/zmirror
|
a304e36e84948636215b7d8e2c921b94c4e60d26
|
[
"MIT"
] | 4
|
2017-12-20T16:16:16.000Z
|
2020-08-04T05:24:15.000Z
|
# coding=utf-8
# 这是为Google和中文维基(无缝整合)镜像配置的示例配置文件
#
# 使用方法:
# 复制本文件到 zmirror.py 同级目录, 并重命名为 config.py
#
# 各项设置选项的详细介绍请看 config_default.py 中对应的部分
# 本配置文件假定你的服务器本身在墙外
# 如果服务器本身在墙内(或者在本地环境下测试, 请修改`Proxy Settings`中的设置
#
# 由于google搜索结果经常会出现中文维基, 所以顺便把它也加入了.
# google跟中文维基之间使用了本程序的镜像隔离功能, 可以保证中文维基站的正常使用
#
# 本配置文件试图还原出一个功能完整的google.
# 但是由于程序本身所限, 还是不能[完整]镜像过来整个[google站群]
# 在后续版本会不断增加可用的网站
#
# 以下google服务完全可用:
# google网页搜索/学术/图片/新闻/图书/视频(搜索)/财经/APP搜索/翻译/网页快照/...
# google搜索与中文维基百科无缝结合
# 以下服务部分可用:
# gg地图(地图可看, 左边栏显示不正常)/G+(不能登录)
# 以下服务暂不可用(因为目前无法解决登录的问题):
# 所有需要登录的东西, docs之类的
#
# 不过, 因为试图反代整个google, 运算速度会慢一些
# 本文件同时也提供了一个轻量版的配置, 请把所有最后面带有 '# 需要轻量级Google镜像请注释掉本行'
# 轻量级配置文件中可用的google功能(完整可用, 没有瑕疵)为:
# Google搜索/学术/图片搜索/视频搜索/与中文维基百科无缝结合
# 尽管是轻量级, 但是仍然是目前互联网上能找到的最好(功能/整合性/用户体验/访问速度)的Google镜像方案
#
# 速度对比, 在一台256M Ramnode OpenVZ VPS(Intel E3 3.3GHz)上处理Google首页(169.41KB), 测试5次取平均, 排除请求时间, 只测量运算时间
# 全功能: 0.167秒
# 轻量版: 0.045秒
# Github: https://github.com/Aploium/zmirror
# ############## Local Domain Settings ##############
my_host_name = '127.0.0.1'
my_host_scheme = 'http://'
# ############## Target Domain Settings ##############
target_domain = 'www.google.com.hk'
target_scheme = 'https://'
# 这里面大部分域名都是通过 `enable_automatic_domains_whitelist` 自动采集的, 我只是把它们复制黏贴到了这里
# 实际镜像一个新的站时, 手动只需要添加很少的几个域名就可以了.
# 自动采集(如果开启的话)会不断告诉你新域名
external_domains = (
'www.google.com',
'webcache.googleusercontent.com', # Google网页快照
'images.google.com.hk',
'images.google.com',
'apis.google.com',
# Google学术
'scholar.google.com.hk',
'scholar.google.com',
# 中文维基百科
'zh.wikipedia.org',
'zh.m.wikipedia.org',
'upload.wikipedia.org',
'meta.wikimedia.org',
'login.wikimedia.org',
# Google静态资源域名
'ssl.gstatic.com',
'www.gstatic.com',
'encrypted-tbn0.gstatic.com',
'encrypted-tbn1.gstatic.com',
'encrypted-tbn2.gstatic.com',
'encrypted-tbn3.gstatic.com',
'csi.gstatic.com',
'fonts.googleapis.com',
# Google登陆支持, 因为现在登陆还bug多多, 注释掉它们也没关系
'accounts.google.com',
'accounts.youtube.com',
'accounts.google.com.hk',
'myaccount.google.com',
'myaccount.google.com.hk',
# # 需要轻量级Google镜像请注释掉以下的一堆域名...它们会拖慢内容重写速度
'translate.google.com', # 需要轻量级Google镜像请注释掉本行
'translate.google.com.hk', # 需要轻量级Google镜像请注释掉本行
'video.google.com.hk', # 需要轻量级Google镜像请注释掉本行
'books.google.com', # 需要轻量级Google镜像请注释掉本行
'cloud.google.com', # 需要轻量级Google镜像请注释掉本行
'analytics.google.com', # 需要轻量级Google镜像请注释掉本行
'security.google.com', # 需要轻量级Google镜像请注释掉本行
'investor.google.com', # 需要轻量级Google镜像请注释掉本行
'families.google.com', # 需要轻量级Google镜像请注释掉本行
'clients1.google.com', # 需要轻量级Google镜像请注释掉本行
'clients2.google.com', # 需要轻量级Google镜像请注释掉本行
'clients3.google.com', # 需要轻量级Google镜像请注释掉本行
'clients4.google.com', # 需要轻量级Google镜像请注释掉本行
'clients5.google.com', # 需要轻量级Google镜像请注释掉本行
'talkgadget.google.com', # 需要轻量级Google镜像请注释掉本行
'news.google.com.hk', # 需要轻量级Google镜像请注释掉本行
'news.google.com', # 需要轻量级Google镜像请注释掉本行
'support.google.com', # 需要轻量级Google镜像请注释掉本行
'docs.google.com', # 需要轻量级Google镜像请注释掉本行
'books.google.com.hk', # 需要轻量级Google镜像请注释掉本行
'chrome.google.com', # 需要轻量级Google镜像请注释掉本行
'profiles.google.com', # 需要轻量级Google镜像请注释掉本行
'feedburner.google.com', # 需要轻量级Google镜像请注释掉本行
'cse.google.com', # 需要轻量级Google镜像请注释掉本行
'sites.google.com', # 需要轻量级Google镜像请注释掉本行
'productforums.google.com', # 需要轻量级Google镜像请注释掉本行
'encrypted.google.com', # 需要轻量级Google镜像请注释掉本行
'm.google.com', # 需要轻量级Google镜像请注释掉本行
'research.google.com', # 需要轻量级Google镜像请注释掉本行
'maps.google.com.hk', # 需要轻量级Google镜像请注释掉本行
'hangouts.google.com', # 需要轻量级Google镜像请注释掉本行
'developers.google.com', # 需要轻量级Google镜像请注释掉本行
'get.google.com', # 需要轻量级Google镜像请注释掉本行
'afp.google.com', # 需要轻量级Google镜像请注释掉本行
'groups.google.com', # 需要轻量级Google镜像请注释掉本行
'payments.google.com', # 需要轻量级Google镜像请注释掉本行
'photos.google.com', # 需要轻量级Google镜像请注释掉本行
'play.google.com', # 需要轻量级Google镜像请注释掉本行
'mail.google.com', # 需要轻量级Google镜像请注释掉本行
'code.google.com', # 需要轻量级Google镜像请注释掉本行
'tools.google.com', # 需要轻量级Google镜像请注释掉本行
'drive.google.com', # 需要轻量级Google镜像请注释掉本行
'script.google.com', # 需要轻量级Google镜像请注释掉本行
'goto.google.com', # 需要轻量级Google镜像请注释掉本行
'calendar.google.com', # 需要轻量级Google镜像请注释掉本行
'wallet.google.com', # 需要轻量级Google镜像请注释掉本行
'privacy.google.com', # 需要轻量级Google镜像请注释掉本行
'ipv4.google.com', # 需要轻量级Google镜像请注释掉本行
'video.google.com', # 需要轻量级Google镜像请注释掉本行
'store.google.com', # 需要轻量级Google镜像请注释掉本行
'fi.google.com', # 需要轻量级Google镜像请注释掉本行
'apps.google.com', # 需要轻量级Google镜像请注释掉本行
'events.google.com', # 需要轻量级Google镜像请注释掉本行
'notifications.google.com', # 需要轻量级Google镜像请注释掉本行
'plus.google.com', # 需要轻量级Google镜像请注释掉本行
'scholar.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
'translate.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
't0.gstatic.com', # 需要轻量级Google镜像请注释掉本行
't1.gstatic.com', # 需要轻量级Google镜像请注释掉本行
't2.gstatic.com', # 需要轻量级Google镜像请注释掉本行
't3.gstatic.com', # 需要轻量级Google镜像请注释掉本行
's-v6exp1-ds.metric.gstatic.com', # 需要轻量级Google镜像请注释掉本行
'ci4.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
'gp3.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
# For Google Map (optional)
'maps-api-ssl.google.com', # 需要轻量级Google镜像请注释掉本行
'maps.gstatic.com', # 需要轻量级Google镜像请注释掉本行
'maps.google.com', # 需要轻量级Google镜像请注释掉本行
'fonts.gstatic.com', # 需要轻量级Google镜像请注释掉本行
'lh1.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
'lh2.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
'lh3.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
'lh4.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
'lh5.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
'lh6.googleusercontent.com', # 需要轻量级Google镜像请注释掉本行
# 'upload.wikimedia.org',
'id.google.com.hk', # 需要轻量级Google镜像请注释掉本行
'id.google.com', # 需要轻量级Google镜像请注释掉本行
)
force_https_domains = 'ALL'
# 需要轻量级Google的请一定要注释掉下面这两行, 否则会动态添加大量的域名, 导致很快就变慢(支持的Google服务也变多)
enable_automatic_domains_whitelist = True # 需要轻量级Google镜像请注释掉本行
domains_whitelist_auto_add_glob_list = ('*.google.com', '*.gstatic.com', '*.google.com.hk') # 需要轻量级Google镜像请注释掉本行
# ############## Proxy Settings ##############
# 如果你在墙内使用本配置文件, 请指定一个墙外的http代理
is_use_proxy = False
requests_proxies = dict(
http='http://127.0.0.1:8123',
https='https://127.0.0.1:8123',
)
# ############## Sites Isolation ##############
enable_individual_sites_isolation = True
# 镜像隔离, 用于支持Google和维基共存
isolated_domains = {'zh.wikipedia.org', 'zh.m.wikipedia.org'}
# ############## URL Custom Redirect ##############
# 这是一个方便的设置, 如果你访问 /wiki ,程序会自动重定向到后面这个长长的wiki首页
url_custom_redirect_enable = True
url_custom_redirect_list = {'/wiki': '/extdomains/https-zh.wikipedia.org/'}
| 36.098958
| 114
| 0.700765
|
4a176ced3371416d03eb2716453db5c987880a3b
| 228
|
py
|
Python
|
Homework1/join_methods.py
|
Tavinson/BigDataClass_HW1
|
017ea807cb7698164c0c5178a1df17660ef383ac
|
[
"Apache-2.0"
] | null | null | null |
Homework1/join_methods.py
|
Tavinson/BigDataClass_HW1
|
017ea807cb7698164c0c5178a1df17660ef383ac
|
[
"Apache-2.0"
] | null | null | null |
Homework1/join_methods.py
|
Tavinson/BigDataClass_HW1
|
017ea807cb7698164c0c5178a1df17660ef383ac
|
[
"Apache-2.0"
] | null | null | null |
def join_dataframes_one_column(left,right,column,method):
return left.join(right,column,how=method)
def join_dataframes_two_columns(left,right,column1,column2,method):
return left.join(right, [column1,column2], how=method)
| 32.571429
| 67
| 0.811404
|
4a176d484449b6700f9d296a482f3b9be9991926
| 9,031
|
py
|
Python
|
contrib/testgen/gen_key_io_test_vectors.py
|
BlockMechanic/crown
|
e6b1873ca79c484a3621e503eb8ce464f85dd2c7
|
[
"MIT"
] | 1
|
2021-10-12T05:27:56.000Z
|
2021-10-12T05:27:56.000Z
|
contrib/testgen/gen_key_io_test_vectors.py
|
BlockMechanic/crown
|
e6b1873ca79c484a3621e503eb8ce464f85dd2c7
|
[
"MIT"
] | 15
|
2022-01-14T09:13:52.000Z
|
2022-03-21T09:40:29.000Z
|
contrib/testgen/gen_key_io_test_vectors.py
|
BlockMechanic/crown
|
e6b1873ca79c484a3621e503eb8ce464f85dd2c7
|
[
"MIT"
] | 2
|
2021-10-12T05:39:32.000Z
|
2022-01-03T10:41:04.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2012-2020 The Crown Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_valid.json
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from segwit_addr import bech32_encode, decode_segwit_address, convertbits, CHARSET
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PUBKEY_ADDRESS_REGTEST = 111
SCRIPT_ADDRESS_REGTEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
PRIVKEY_REGTEST = 239
# script
OP_0 = 0x00
OP_1 = 0x51
OP_2 = 0x52
OP_16 = 0x60
OP_DUP = 0x76
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_HASH160 = 0xa9
OP_CHECKSIG = 0xac
pubkey_prefix = (OP_DUP, OP_HASH160, 20)
pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG)
script_prefix = (OP_HASH160, 20)
script_suffix = (OP_EQUAL,)
p2wpkh_prefix = (OP_0, 20)
p2wsh_prefix = (OP_0, 32)
metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata, output_prefix, output_suffix
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, 'main', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix),
((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()),
((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()),
((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()),
((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ())
]
# templates for valid bech32 sequences
bech32_templates = [
# hrp, version, witprog_size, metadata, output_prefix
('bc', 0, 20, (False, 'main', None, True), p2wpkh_prefix),
('bc', 0, 32, (False, 'main', None, True), p2wsh_prefix),
('bc', 1, 2, (False, 'main', None, True), (OP_1, 2)),
('tb', 0, 20, (False, 'test', None, True), p2wpkh_prefix),
('tb', 0, 32, (False, 'test', None, True), p2wsh_prefix),
('tb', 2, 16, (False, 'test', None, True), (OP_2, 16)),
('bcrt', 0, 20, (False, 'regtest', None, True), p2wpkh_prefix),
('bcrt', 0, 32, (False, 'regtest', None, True), p2wsh_prefix),
('bcrt', 16, 40, (False, 'regtest', None, True), (OP_16, 40))
]
# templates for invalid bech32 sequences
bech32_ng_templates = [
# hrp, version, witprog_size, invalid_bech32, invalid_checksum, invalid_char
('tc', 0, 20, False, False, False),
('tb', 17, 32, False, False, False),
('bcrt', 3, 1, False, False, False),
('bc', 15, 41, False, False, False),
('tb', 0, 16, False, False, False),
('bcrt', 0, 32, True, False, False),
('bc', 0, 16, True, False, False),
('tb', 0, 32, False, True, False),
('bcrt', 0, 20, False, False, True)
]
def is_valid(v):
'''Check vector v for validity'''
if len(set(v) - set(b58chars)) > 0:
return is_valid_bech32(v)
result = b58decode_chk(v)
if result is None:
return is_valid_bech32(v)
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return is_valid_bech32(v)
def is_valid_bech32(v):
'''Check vector v for bech32 validity'''
for hrp in ['bc', 'tb', 'bcrt']:
if decode_segwit_address(hrp, v) != (None, None):
return True
return False
def gen_valid_base58_vector(template):
'''Generate valid base58 vector'''
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
dst_prefix = bytearray(template[4])
dst_suffix = bytearray(template[5])
rv = b58encode_chk(prefix + payload + suffix)
return rv, dst_prefix + payload + dst_suffix
def gen_valid_bech32_vector(template):
'''Generate valid bech32 vector'''
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
dst_prefix = bytearray(template[4])
rv = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))
return rv, dst_prefix + witprog
def gen_valid_vectors():
'''Generate valid test vectors'''
glist = [gen_valid_base58_vector, gen_valid_bech32_vector]
tlist = [templates, bech32_templates]
while True:
for template, valid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
rv, payload = valid_vector_generator(template)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = payload.hex()
yield (rv, hexrepr, metadata)
def gen_invalid_base58_vector(template):
'''Generate possibly invalid vector'''
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
corrupt_prefix = randbool(0.2)
randomize_payload_size = randbool(0.2)
corrupt_suffix = randbool(0.2)
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
val = b58encode_chk(prefix + payload + suffix)
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
return val
def gen_invalid_bech32_vector(template):
'''Generate possibly invalid bech32 vector'''
no_data = randbool(0.1)
to_upper = randbool(0.1)
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
if no_data:
rv = bech32_encode(hrp, [])
else:
data = [witver] + convertbits(witprog, 8, 5)
if template[3] and not no_data:
if template[2] % 5 in {2, 4}:
data[-1] |= 1
else:
data.append(0)
rv = bech32_encode(hrp, data)
if template[4]:
i = len(rv) - random.randrange(1, 7)
rv = rv[:i] + random.choice(CHARSET.replace(rv[i], '')) + rv[i + 1:]
if template[5]:
i = len(hrp) + 1 + random.randrange(0, len(rv) - len(hrp) - 4)
rv = rv[:i] + rv[i:i + 4].upper() + rv[i + 4:]
if to_upper:
rv = rv.swapcase()
return rv
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
glist = [gen_invalid_base58_vector, gen_invalid_bech32_vector]
tlist = [templates, bech32_ng_templates]
while True:
for template, invalid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
val = invalid_vector_generator(template)
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| 36.562753
| 133
| 0.617318
|
4a176e955bdbaace6e952007047fad7773c4652b
| 4,617
|
py
|
Python
|
app.py
|
Jim-Shaddix/Neutron-Scattering-Dashboard
|
26f12e8f94db978d5102320a66e67448d03b2733
|
[
"MIT"
] | null | null | null |
app.py
|
Jim-Shaddix/Neutron-Scattering-Dashboard
|
26f12e8f94db978d5102320a66e67448d03b2733
|
[
"MIT"
] | null | null | null |
app.py
|
Jim-Shaddix/Neutron-Scattering-Dashboard
|
26f12e8f94db978d5102320a66e67448d03b2733
|
[
"MIT"
] | null | null | null |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from tabs import tabs
"""
This file initializes the application and formats
how the application gets displayed.
"""
# Support for Font Awsome (doesn't seem to work ... )
# html.Script(src="https://kit.fontawesome.com/925f94215c.js")
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
# Layout
app.layout = html.Div([
# title
html.Div([
html.A(
html.Img(src="assets/images/rosslab_logo.png", id="logo"),
href="http://www.rosslabcsu.com/",
target="_Blanck"
),
html.H1("Neutron Scattering", id="title"),
html.A(
dbc.Button("Github", outline=True, color="info", className="mr-1",id="github-text"),
# support for font awsome doesn't seem to be working
#html.I(className="fa fa-camera-retro fa-lg"),
href="https://github.com/Jim-Shaddix/Neutron-Scattering-Dashboard",
id="github-link",
target="_Blanck"
),
html.Div(className="clr")
], id="banner"),
html.Div(className="clr"),
# All Content
dbc.Container([
dbc.Row([
# tabs
dbc.Col([
tabs
], width="auto", lg=5),
# plots
dbc.Col([
dbc.Row([
# heat map
dbc.Col([
# heat-map
dcc.Graph(
id="graph-heatmap"
),
], md=6, lg=12),
# cross section
dbc.Col([
dcc.Graph(id="graph-cross-section")
], md=6, lg=12)
])
], width="auto", lg=7)
]),
], fluid=True)
])
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
from __init__ import x_unique, y_unique, df
from layout import layout_heatmap, layout_cross, trace_heatmap
# update axis drop down
@app.callback([Output('slider-heatmap', 'min'),
Output('slider-heatmap', 'max'),
Output('slider-heatmap', 'value'),
Output('slider-heatmap', 'marks')],
[Input('dropdown-axis', 'value')])
def update_axis_dropdown(value):
if value == "x":
max_val = len(x_unique)-1
marks = [50,100,150,200,250,300,350]
default_value = 70
else:
max_val = len(y_unique)-1
marks = [10, 20, 30, 40, 50, 60, 70, 80, 90]
default_value = 15
marks = dict(zip(marks, [str(i) for i in marks]))
return 1, max_val, default_value, marks
@app.callback([Output('graph-heatmap', 'figure'),
Output('graph-cross-section', 'figure'),
Output('input-axis-value', 'value')],
[Input('slider-heatmap', 'value')],
[State('dropdown-axis', 'value')])
def update_slider(slider_value, dropdown_value):
# red line plot values
if dropdown_value == "x":
x = [slider_value] * 2
y = [0, len(y_unique)-1]
else:
x = [0, len(x_unique)-1]
y = [slider_value] * 2
# Find cross section plot parameters
if dropdown_value == "x":
new_df = df[df.x == x_unique[slider_value]]
y_cross = new_df.z
x_cross = new_df.y
else:
new_df = df[df.y == y_unique[slider_value]]
y_cross = new_df.z
x_cross = new_df.x
# heatmap figure
figure_heatmap=go.Figure(
data=[trace_heatmap, go.Scatter(x=x, y=y, marker={"color": "black"}, line={"width": 4})],
layout=layout_heatmap
)
# cross-section figure
figure_cross=go.Figure(
data=[go.Scatter(x=x_cross, y=y_cross)],
layout=layout_cross
)
# Update x-axis of the cross section plot: based on scan direction
if dropdown_value == "x":
figure_cross.update_layout({"xaxis":{"title":"Energy Transfer (meV)"}})
figure_cross.update_yaxes(range=[-0.005, 0.05])
figure_cross.update_traces({"hovertemplate":"<b>Intensity</b>: %{y}<br>Energy Transfer (meV): %{x}<extra></extra>"})
else:
figure_cross.update_layout({"xaxis":{"title":"[1K0] (r.l.u.)"}})
figure_cross.update_yaxes(range=[-0.005, 0.05])
figure_cross.update_traces({"hovertemplate":"<b>Intensity</b>: %{y}<br>[1K0](r.l.u): %{x}<extra></extra>"})
return figure_heatmap, figure_cross, slider_value
if __name__ == '__main__':
app.run_server(debug=True)
| 29.787097
| 124
| 0.557288
|
4a176ed8f1aea7c7442a248019a3ffc1ea330b15
| 116
|
py
|
Python
|
python basics and applications/3/3-3/3-3-6.py
|
DzmitrySakalenka/stepik_courses
|
7c43ac35cd921e8f6f96fb4f15f77ace38cc2d21
|
[
"MIT"
] | null | null | null |
python basics and applications/3/3-3/3-3-6.py
|
DzmitrySakalenka/stepik_courses
|
7c43ac35cd921e8f6f96fb4f15f77ace38cc2d21
|
[
"MIT"
] | null | null | null |
python basics and applications/3/3-3/3-3-6.py
|
DzmitrySakalenka/stepik_courses
|
7c43ac35cd921e8f6f96fb4f15f77ace38cc2d21
|
[
"MIT"
] | null | null | null |
import sys
import re
for line in sys.stdin:
line = line.rstrip()
print(re.sub(r'human', r'computer', line))
| 19.333333
| 46
| 0.663793
|
4a176ff54a1c0cd602a4a976cb461c88c364b58a
| 123,292
|
py
|
Python
|
o/soft_robot/derivation_of_dynamics/derived/ikko_dake/eqs/numpy_style/C0_1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/derivation_of_dynamics/derived/ikko_dake/eqs/numpy_style/C0_1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/derivation_of_dynamics/derived/ikko_dake/eqs/numpy_style/C0_1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
import numpy
def f(q, q_dot, xi):
l1, l2, l3 = q[0,0], q[1,0], q[2,0]
l1_dot, l2_dot, l3_dot = q_dot[0,0], q_dot[1,0], q_dot[2,0]
return l1_dot*(-2.4045095001e+18*l1**17 + 2.31139995715999e+19*l1**16*l2 + 1.65335189894899e+19*l1**16*l3 - 1.84371441091536e+17*l1**16 - 1.15408521784635e+20*l1**15*l2**2 - 1.29196727131924e+20*l1**15*l2*l3 + 1.47153336666065e+18*l1**15*l2 - 6.45983635659619e+19*l1**15*l3**2 + 9.21427435198725e+17*l1**15*l3 - 9.87284477294186e+16*l1**15 + 3.91435633435895e+20*l1**14*l2**3 + 5.16903864810132e+20*l1**14*l2**2*l3 - 5.98755924775589e+18*l1**14*l2**2 + 4.34777633458962e+20*l1**14*l2*l3**2 - 5.18818656610028e+18*l1**14*l2*l3 + 9.13743861789824e+17*l1**14*l2 + 1.72301288270044e+20*l1**14*l3**3 - 2.59409328305014e+18*l1**14*l3**2 + 5.92744288533555e+17*l1**14*l3 + 3.83421515731513e+15*l1**14 - 9.97726716488485e+20*l1**13*l2**4 - 1.38137123958205e+21*l1**13*l2**3*l3 + 1.61731143849806e+19*l1**13*l2**3 - 1.50294935219523e+21*l1**13*l2**2*l3**2 + 1.2996252630788e+19*l1**13*l2**2*l3 - 4.38534974621181e+18*l1**13*l2**2 - 1.00196623479682e+21*l1**13*l2*l3**3 + 1.12634189466829e+19*l1**13*l2*l3**2 - 4.20033288849909e+18*l1**13*l2*l3 - 2.67927473797752e+16*l1**13*l2 - 3.45342809895511e+20*l1**13*l3**4 + 4.33208421026266e+18*l1**13*l3**3 - 2.10016644424955e+18*l1**13*l3**2 - 1.53368606292605e+16*l1**13*l3 + 1.61120204790836e+15*l1**13 + 2.01520288240847e+21*l1**12*l2**5 + 2.68364491596603e+21*l1**12*l2**4*l3 - 3.16181979512921e+19*l1**12*l2**4 + 3.56003933373645e+21*l1**12*l2**3*l3**2 - 1.54751674844383e+19*l1**12*l2**3*l3 + 1.41885705844791e+19*l1**12*l2**3 + 2.91118527353702e+21*l1**12*l2**2*l3**3 - 1.87001635076338e+19*l1**12*l2**2*l3**2 + 1.50727397505586e+19*l1**12*l2**2*l3 + 9.43357204863813e+16*l1**12*l2**2 + 1.78001966686822e+21*l1**12*l2*l3**4 - 1.24667756717559e+19*l1**12*l2*l3**3 + 1.24615031453864e+19*l1**12*l2*l3**2 + 6.9623736210271e+16*l1**12*l2*l3 - 1.32366856382182e+16*l1**12*l2 + 5.36728983193205e+20*l1**12*l3**5 - 3.86879187110957e+18*l1**12*l3**4 + 5.02424658351953e+18*l1**12*l3**3 + 3.48118681051355e+16*l1**12*l3**2 - 7.99137983971431e+15*l1**12*l3 - 42365828268584.8*l1**12 - 3.32306033969879e+21*l1**11*l2**6 - 3.93505054371013e+21*l1**11*l2**5*l3 + 4.63533010498105e+19*l1**11*l2**5 - 6.29071698447244e+21*l1**11*l2**4*l3**2 - 7.94215438548155e+18*l1**11*l2**4*l3 - 3.4053528424063e+19*l1**11*l2**4 - 5.83007203011451e+21*l1**11*l2**3*l3**3 + 1.01081964906129e+19*l1**11*l2**3*l3**2 - 3.55007474803246e+19*l1**11*l2**3*l3 - 2.17802124424071e+17*l1**11*l2**3 - 4.37255402258588e+21*l1**11*l2**2*l3**4 - 6.93133473642026e+18*l1**11*l2**2*l3**3 - 3.75818312405967e+19*l1**11*l2**2*l3**2 - 1.1895418731963e+17*l1**11*l2**2*l3 + 5.60844735229346e+16*l1**11*l2**2 - 2.51628679378898e+21*l1**11*l2*l3**5 + 5.05409824530644e+18*l1**11*l2*l3**4 - 2.50545541603978e+19*l1**11*l2*l3**3 - 1.18393082662462e+17*l1**11*l2*l3**2 + 4.83736302218318e+16*l1**11*l2*l3 + 255352441362365.0*l1**11*l2 - 6.55841757285021e+20*l1**11*l3**6 - 1.58843087709631e+18*l1**11*l3**5 - 8.87518687008116e+18*l1**11*l3**4 - 3.96513957732101e+16*l1**11*l3**3 + 2.41868151109159e+16*l1**11*l3**2 + 127605355063783.0*l1**11*l3 - 45539308220349.6*l1**11 + 4.55605960516938e+21*l1**10*l2**7 + 4.32478796017064e+21*l1**10*l2**6*l3 - 5.046878104956e+19*l1**10*l2**6 + 8.90379068531806e+21*l1**10*l2**5*l3**2 + 7.06129726272814e+19*l1**10*l2**5*l3 + 6.37059596704034e+19*l1**10*l2**5 + 8.29008904063106e+21*l1**10*l2**4*l3**3 + 2.87000578929901e+19*l1**10*l2**4*l3**2 + 5.8422667680997e+19*l1**10*l2**4*l3 + 3.54548005248073e+17*l1**10*l2**4 + 7.86077867258601e+21*l1**10*l2**3*l3**4 + 7.09017782412989e+19*l1**10*l2**3*l3**3 + 7.84595869237463e+19*l1**10*l2**3*l3**2 + 1.52433431863992e+16*l1**10*l2**3*l3 - 1.59195266898661e+17*l1**10*l2**3 + 4.97405342437864e+21*l1**10*l2**2*l3**5 + 5.31763336809742e+19*l1**10*l2**2*l3**4 + 5.95555510770225e+19*l1**10*l2**2*l3**3 + 9.67905533614916e+16*l1**10*l2**2*l3**2 - 1.44415260293569e+17*l1**10*l2**2*l3 - 760777835590842.0*l1**10*l2**2 + 2.96793022843935e+21*l1**10*l2*l3**6 + 1.14800231571961e+19*l1**10*l2*l3**5 + 3.92297934618731e+19*l1**10*l2*l3**4 + 6.45270355743278e+16*l1**10*l2*l3**3 - 1.23096624280662e+17*l1**10*l2*l3**2 - 437075506246793.0*l1**10*l2*l3 + 301425893705836.0*l1**10*l2 + 6.17826851452949e+20*l1**10*l3**7 + 1.17688287712136e+19*l1**10*l3**6 + 1.16845335361994e+19*l1**10*l3**5 + 3.8108357965998e+15*l1**10*l3**4 - 4.81384200978564e+16*l1**10*l3**3 - 218537753123397.0*l1**10*l3**2 + 199166415572844.0*l1**10*l3 - 51012171774.8208*l1**10 - 5.24624169583991e+21*l1**9*l2**8 - 3.33935575861832e+21*l1**9*l2**7*l3 + 3.76960089534284e+19*l1**9*l2**7 - 1.04639066815244e+22*l1**9*l2**6*l3**2 - 1.58313610750599e+20*l1**9*l2**6*l3 - 9.53785195400235e+19*l1**9*l2**6 - 8.90519768702738e+21*l1**9*l2**5*l3**3 - 6.91689445571938e+19*l1**9*l2**5*l3**2 - 6.75509847260171e+19*l1**9*l2**5*l3 - 4.14375789318618e+17*l1**9*l2**5 - 9.92785754345536e+21*l1**9*l2**4*l3**4 - 2.00479230397155e+20*l1**9*l2**4*l3**3 - 1.21826880450831e+20*l1**9*l2**4*l3**2 + 4.22698841733277e+17*l1**9*l2**4*l3 + 3.31363920807814e+17*l1**9*l2**4 - 7.94228603476429e+21*l1**9*l2**3*l3**5 - 1.22261043267413e+20*l1**9*l2**3*l3**4 - 9.88789475274014e+19*l1**9*l2**3*l3**3 + 9.7725727790105e+16*l1**9*l2**3*l3**2 + 2.76244945126514e+17*l1**9*l2**3*l3 + 1.46219390567349e+15*l1**9*l2**3 - 4.45259884351369e+21*l1**9*l2**2*l3**6 - 1.20287538238293e+20*l1**9*l2**2*l3**5 - 7.41592106455511e+19*l1**9*l2**2*l3**4 + 3.42273840872521e+17*l1**9*l2**2*l3**3 + 3.08688010391811e+17*l1**9*l2**2*l3**2 + 293737984120351.0*l1**9*l2**2*l3 - 1.00673560590287e+15*l1**9*l2**2 - 2.98968762329269e+21*l1**9*l2*l3**7 - 2.30563148523979e+19*l1**9*l2*l3**6 - 4.87307521803325e+19*l1**9*l2*l3**5 + 4.88628638950525e+16*l1**9*l2*l3**4 + 2.05792006927874e+17*l1**9*l2*l3**3 + 567869814093548.0*l1**9*l2*l3**2 - 994332458054359.0*l1**9*l2*l3 + 968290079740.516*l1**9*l2 - 4.1741946982729e+20*l1**9*l3**8 - 2.26162301072284e+19*l1**9*l3**7 - 1.12584974543362e+19*l1**9*l3**6 + 8.45397683466555e+16*l1**9*l3**5 + 6.90612362816284e+16*l1**9*l3**4 + 97912661373450.5*l1**9*l3**3 - 500434019809507.0*l1**9*l3**2 - 530451291739.658*l1**9*l3 + 263008303660.188*l1**9 + 5.09702458035085e+21*l1**8*l2**9 + 1.26397086121091e+21*l1**8*l2**8*l3 - 1.18474767286023e+19*l1**8*l2**8 + 1.07205648924521e+22*l1**8*l2**7*l3**2 + 2.24804227053987e+20*l1**8*l2**7*l3 + 1.15959485929895e+20*l1**8*l2**7 + 6.55299938205732e+21*l1**8*l2**6*l3**3 + 7.91688389425501e+19*l1**8*l2**6*l3**2 + 4.88467865833311e+19*l1**8*l2**6*l3 + 3.23476834857391e+17*l1**8*l2**6 + 1.073246093322e+22*l1**8*l2**5*l3**4 + 3.15159126296609e+20*l1**8*l2**5*l3**3 + 1.53766641535919e+20*l1**8*l2**5*l3**2 - 1.1017289943495e+18*l1**8*l2**5*l3 - 5.30075570590677e+17*l1**8*l2**5 + 7.43082034322747e+21*l1**8*l2**4*l3**5 + 2.1985327367083e+20*l1**8*l2**4*l3**4 + 1.08190579140322e+20*l1**8*l2**4*l3**3 - 4.55546843538325e+17*l1**8*l2**4*l3**2 - 3.44542222648232e+17*l1**8*l2**4*l3 - 1.8967182496939e+15*l1**8*l2**4 + 7.15497395547997e+21*l1**8*l2**3*l3**6 + 1.75882618936664e+20*l1**8*l2**3*l3**5 + 1.12620104377328e+20*l1**8*l2**3*l3**4 - 1.00016905140208e+18*l1**8*l2**3*l3**3 - 5.50285551923147e+17*l1**8*l2**3*l3**2 + 1.11967675490991e+15*l1**8*l2**3*l3 + 2.20976855054334e+15*l1**8*l2**3 + 2.80842830659599e+21*l1**8*l2**2*l3**7 + 1.57579563148304e+20*l1**8*l2**2*l3**6 + 6.49143474841932e+19*l1**8*l2**2*l3**5 - 7.5012678855156e+17*l1**8*l2**2*l3**4 - 3.75482024752831e+17*l1**8*l2**2*l3**3 + 133404524754298.0*l1**8*l2**2*l3**2 + 2.40289857473884e+15*l1**8*l2**2*l3 - 4262434013538.93*l1**8*l2**2 + 2.68014122311302e+21*l1**8*l2*l3**8 + 2.26196682693e+19*l1**8*l2*l3**7 + 5.12555471786395e+19*l1**8*l2*l3**6 - 1.8221873741533e+17*l1**8*l2*l3**5 - 2.75142775961574e+17*l1**8*l2*l3**4 + 88936349836198.6*l1**8*l2*l3**3 + 2.07954860877798e+15*l1**8*l2*l3**2 + 2385336681662.5*l1**8*l2*l3 - 1238085532351.65*l1**8*l2 + 1.40441206801212e+20*l1**8*l3**9 + 2.81005283817484e+19*l1**8*l3**8 + 6.9781123690473e+18*l1**8*l3**7 - 1.8362149905825e+17*l1**8*l3**6 - 6.89084445296464e+16*l1**8*l3**5 + 279919188727477.0*l1**8*l3**4 + 808634958657498.0*l1**8*l3**3 + 232095969735.629*l1**8*l3**2 - 1086421100722.45*l1**8*l3 + 6385214980.1395*l1**8 - 4.17581151602659e+21*l1**7*l2**10 + 8.86337611867598e+20*l1**7*l2**9*l3 - 1.43371358387264e+19*l1**7*l2**9 - 9.81241934181011e+21*l1**7*l2**8*l3**2 - 2.30404993068684e+20*l1**7*l2**8*l3 - 1.15127229450823e+20*l1**7*l2**8 - 2.5058876758923e+21*l1**7*l2**7*l3**3 - 3.78747933811535e+19*l1**7*l2**7*l3**2 - 7.38748788688808e+18*l1**7*l2**7*l3 - 1.09415408147773e+17*l1**7*l2**7 - 9.37339601898269e+21*l1**7*l2**6*l3**4 - 3.91620412607744e+20*l1**7*l2**6*l3**3 - 1.63979743320967e+20*l1**7*l2**6*l3**2 + 1.66535862247482e+18*l1**7*l2**6*l3 + 6.66002689838007e+17*l1**7*l2**6 - 6.15259631667458e+21*l1**7*l2**5*l3**5 - 1.71550534726401e+20*l1**7*l2**5*l3**4 - 8.09514593797034e+19*l1**7*l2**5*l3**3 + 5.10044133365773e+17*l1**7*l2**5*l3**2 + 2.55436534408866e+17*l1**7*l2**5*l3 + 1.62721630672413e+15*l1**7*l2**5 - 5.12716359722882e+21*l1**7*l2**4*l3**6 - 3.11910063138912e+20*l1**7*l2**4*l3**5 - 1.11825206113006e+20*l1**7*l2**4*l3**4 + 2.01997676580504e+18*l1**7*l2**4*l3**3 + 7.26080016643432e+17*l1**7*l2**4*l3**2 - 4.0492613781995e+15*l1**7*l2**4*l3 - 3.49528440498367e+15*l1**7*l2**4 - 5.35622629656154e+21*l1**7*l2**3*l3**7 - 1.14367023150934e+20*l1**7*l2**3*l3**6 - 8.9460164890405e+19*l1**7*l2**3*l3**5 + 9.53877917185714e+17*l1**7*l2**3*l3**4 + 4.97151386919899e+17*l1**7*l2**3*l3**3 - 976528207763192.0*l1**7*l2**3*l3**2 - 3.63725689582089e+15*l1**7*l2**3*l3 + 8963083288671.18*l1**7*l2**3 - 9.39707878459614e+20*l1**7*l2**2*l3**8 - 1.67837319689033e+20*l1**7*l2**2*l3**7 - 4.04757296898517e+19*l1**7*l2**2*l3**6 + 1.21198605948302e+18*l1**7*l2**2*l3**5 + 3.72863540189924e+17*l1**7*l2**2*l3**4 - 3.14218147548767e+15*l1**7*l2**2*l3**3 - 4.1759933523844e+15*l1**7*l2**2*l3**2 - 6085319148104.88*l1**7*l2**2*l3 + 2749807021333.81*l1**7*l2**2 - 2.18053763151336e+21*l1**7*l2*l3**9 - 9.46869834528839e+18*l1**7*l2*l3**8 - 4.68513552345619e+19*l1**7*l2*l3**7 + 1.70014711121924e+17*l1**7*l2*l3**6 + 2.90432006657373e+17*l1**7*l2*l3**5 - 488264103881596.0*l1**7*l2*l3**4 - 2.76249139665643e+15*l1**7*l2*l3**3 - 460803677139.353*l1**7*l2*l3**2 + 4139527373174.42*l1**7*l2*l3 - 39831426445.6718*l1**7*l2 + 8.86337611867599e+19*l1**7*l3**10 - 2.56005547854094e+19*l1**7*l3**9 - 9.2343598586101e+17*l1**7*l3**8 + 2.3790837463926e+17*l1**7*l3**7 + 4.2572755734811e+16*l1**7*l3**6 - 809852275639900.0*l1**7*l3**5 - 924809877019810.0*l1**7*l3**4 + 1102314678647.06*l1**7*l3**3 + 2290202730723.26*l1**7*l3**2 + 4284604326.20109*l1**7*l3 + 493328671.232114*l1**7 + 2.86939485434628e+21*l1**6*l2**11 - 2.14028615717468e+21*l1**6*l2**10*l3 + 2.88564942672496e+19*l1**6*l2**10 + 8.17766261018856e+21*l1**6*l2**9*l3**2 + 1.73524039755521e+20*l1**6*l2**9*l3 + 9.32106667201624e+19*l1**6*l2**9 - 1.66501990435211e+21*l1**6*l2**8*l3**3 - 1.24547421045052e+19*l1**6*l2**8*l3**2 - 3.37502607580881e+19*l1**6*l2**8*l3 - 1.11729964858591e+17*l1**6*l2**8 + 8.32135368826249e+21*l1**6*l2**7*l3**4 + 3.63606268048046e+20*l1**6*l2**7*l3**3 + 1.55165779063919e+20*l1**6*l2**7*l3**2 - 1.70744147176243e+18*l1**6*l2**7*l3 - 6.63512434799416e+17*l1**6*l2**7 + 1.60089641855604e+21*l1**6*l2**6*l3**5 + 9.60278666608223e+19*l1**6*l2**6*l3**4 + 1.99593001204164e+19*l1**6*l2**6*l3**3 - 2.72977415712266e+17*l1**6*l2**6*l3**2 - 2.1871516200211e+16*l1**6*l2**6*l3 - 665959639510496.0*l1**6*l2**6 + 5.85342332164106e+21*l1**6*l2**5*l3**6 + 2.48661633669077e+20*l1**6*l2**5*l3**5 + 1.07778732470679e+20*l1**6*l2**5*l3**4 - 2.36842275790641e+18*l1**6*l2**5*l3**3 - 8.06610097325136e+17*l1**6*l2**5*l3**2 + 6.54096735157844e+15*l1**6*l2**5*l3 + 4.16367903556313e+15*l1**6*l2**5 + 1.14349744182574e+21*l1**6*l2**4*l3**7 + 2.07218028057564e+20*l1**6*l2**4*l3**6 + 4.70221688594509e+19*l1**6*l2**4*l3**5 - 1.31579042105912e+18*l1**6*l2**4*l3**4 - 3.45870037207788e+17*l1**6*l2**4*l3**3 + 1.78492773824641e+15*l1**6*l2**4*l3**2 + 3.57158798373764e+15*l1**6*l2**4*l3 - 11551150999859.4*l1**6*l2**4 + 4.16067684413124e+21*l1**6*l2**3*l3**8 + 5.4873066663327e+19*l1**6*l2**3*l3**7 + 7.18524883137861e+19*l1**6*l2**3*l3**6 - 1.05263233684729e+18*l1**6*l2**3*l3**5 - 5.15250073824791e+17*l1**6*l2**3*l3**4 + 5.33688964634589e+15*l1**6*l2**3*l3**3 + 5.64558410419697e+15*l1**6*l2**3*l3**2 + 17508280889740.5*l1**6*l2**3*l3 - 3647739362464.16*l1**6*l2**3 - 5.55006634784036e+20*l1**6*l2**2*l3**9 + 1.36352350518017e+20*l1**6*l2**2*l3**8 + 8.55398576589273e+18*l1**6*l2**2*l3**7 - 1.1842113789532e+18*l1**6*l2**2*l3**6 - 2.07522022324673e+17*l1**6*l2**2*l3**5 + 4.00266723475942e+15*l1**6*l2**2*l3**4 + 4.07729527510811e+15*l1**6*l2**2*l3**3 - 9497299316335.21*l1**6*l2**2*l3**2 - 7610914660086.34*l1**6*l2**2*l3 + 104177460278.68*l1**6*l2**2 + 1.63553252203771e+21*l1**6*l2*l3**10 - 2.76772046766781e+18*l1**6*l2*l3**9 + 3.87914447659798e+19*l1**6*l2*l3**8 - 7.79935473463615e+16*l1**6*l2*l3**7 - 2.68870032441712e+17*l1**6*l2*l3**6 + 713971095298563.0*l1**6*l2*l3**5 + 2.80144599940747e+15*l1**6*l2*l3**4 + 3440215687662.86*l1**6*l2*l3**3 - 7034105699429.33*l1**6*l2*l3**2 - 47002366021.5592*l1**6*l2*l3 - 6155734523.22814*l1**6*l2 - 1.94571468834062e+20*l1**6*l3**11 + 1.73524039755521e+19*l1**6*l3**10 - 3.7500289731209e+18*l1**6*l3**9 - 2.13430183970304e+17*l1**6*l3**8 - 3.12450231431586e+15*l1**6*l3**7 + 1.09016122526307e+15*l1**6*l3**6 + 728390179632715.0*l1**6*l3**5 - 4540836235143.97*l1**6*l3**4 - 2970511936021.67*l1**6*l3**3 + 12866641135.2686*l1**6*l3**2 + 2009051912.85056*l1**6*l3 - 104007286.762917*l1**6 - 1.63480022290245e+21*l1**5*l2**12 + 2.21062184350019e+21*l1**5*l2**11*l3 - 2.85917557877336e+19*l1**5*l2**11 - 6.04995307355592e+21*l1**5*l2**10*l3**2 - 9.37174217486823e+19*l1**5*l2**10*l3 - 6.09886465761545e+19*l1**5*l2**10 + 3.89622223338111e+21*l1**5*l2**9*l3**3 + 3.82667438573202e+19*l1**5*l2**9*l3**2 + 5.2121861774447e+19*l1**5*l2**9*l3 + 2.24909450081533e+17*l1**5*l2**9 - 6.74333338960309e+21*l1**5*l2**8*l3**4 - 2.84617932614257e+20*l1**5*l2**8*l3**3 - 1.29151032294275e+20*l1**5*l2**8*l3**2 + 1.24396902494161e+18*l1**5*l2**8*l3 + 5.22901262942018e+17*l1**5*l2**8 + 8.70711899902023e+20*l1**5*l2**7*l3**5 + 5.02521768390469e+19*l1**5*l2**7*l3**4 + 3.37532353511669e+19*l1**5*l2**7*l3**3 - 1.43081687577857e+17*l1**5*l2**7*l3**2 - 2.04588389976568e+17*l1**5*l2**7*l3 - 367934285537479.0*l1**5*l2**7 - 2.92671166082053e+21*l1**5*l2**6*l3**6 - 2.78986223140915e+20*l1**5*l2**6*l3**5 - 8.50139172349872e+19*l1**5*l2**6*l3**4 + 2.39591688610765e+18*l1**5*l2**6*l3**3 + 7.586451862698e+17*l1**5*l2**6*l3**2 - 6.90861817464632e+15*l1**5*l2**6*l3 - 3.79543172172898e+15*l1**5*l2**6 - 2.50860999498903e+21*l1**5*l2**5*l3**7 - 16384.0*l1**5*l2**5*l3**6 - 2.6956426883614e+19*l1**5*l2**5*l3**5 + 2.71013549412176e+17*l1**5*l2**5*l3**4 + 9.53421099237067e+16*l1**5*l2**5*l3**3 - 264470484134416.0*l1**5*l2**5*l3**2 - 2.15382602709947e+15*l1**5*l2**5*l3 + 8348678385818.74*l1**5*l2**5 + 5.44194937438765e+20*l1**5*l2**4*l3**8 - 1.99275873672082e+20*l1**5*l2**4*l3**7 - 2.24636890696784e+19*l1**5*l2**4*l3**6 + 1.8853116480847e+18*l1**5*l2**4*l3**5 + 3.84994258960893e+17*l1**5*l2**4*l3**4 - 8.77989566769386e+15*l1**5*l2**4*l3**3 - 5.4251266997965e+15*l1**5*l2**4*l3**2 - 28478344899677.9*l1**5*l2**4*l3 + 2905216900532.96*l1**5*l2**4 - 2.99703706204582e+21*l1**5*l2**3*l3**9 + 2.51260884195234e+19*l1**5*l2**3*l3**8 - 4.85793812771355e+19*l1**5*l2**3*l3**7 + 1.80675699608118e+17*l1**5*l2**3*l3**6 + 3.07995407168714e+17*l1**5*l2**3*l3**5 - 1.73195568924039e+15*l1**5*l2**3*l3**4 - 4.02571086347279e+15*l1**5*l2**3*l3**3 + 14379785336613.1*l1**5*l2**3*l3**2 + 9358190291196.16*l1**5*l2**3*l3 - 136106742218.544*l1**5*l2**3 + 1.16886667001433e+21*l1**5*l2**2*l3**10 - 9.48726442047523e+19*l1**5*l2**2*l3**9 + 1.26574632566876e+19*l1**5*l2**2*l3**8 + 1.02682152261756e+18*l1**5*l2**2*l3**7 + 4.76710549618534e+16*l1**5*l2**2*l3**6 - 5.26793740061632e+15*l1**5*l2**2*l3**5 - 3.09043665657464e+15*l1**5*l2**2*l3**4 + 3733865089761.5*l1**5*l2**2*l3**3 + 9291266798577.57*l1**5*l2**2*l3**2 + 148742476533.837*l1**5*l2**2*l3 + 22458601303.136*l1**5*l2**2 - 1.09999146791926e+21*l1**5*l2*l3**11 + 7.65334877146404e+18*l1**5*l2*l3**10 - 2.87002293987278e+19*l1**5*l2*l3**9 - 3.57704218944643e+16*l1**5*l2*l3**8 + 2.16755767505657e+17*l1**5*l2*l3**7 - 88156828044805.2*l1**5*l2*l3**6 - 2.12640986108364e+15*l1**5*l2*l3**5 - 636993318398.498*l1**5*l2*l3**4 + 7827766648100.87*l1**5*l2*l3**3 - 20820098267.7376*l1**5*l2*l3**2 - 4989961006.35615*l1**5*l2*l3 + 448925169.082043*l1**5*l2 + 1.84218486958349e+20*l1**5*l3**12 - 8.51976561351657e+18*l1**5*l3**11 + 5.2121861774447e+18*l1**5*l3**10 + 1.38218780549067e+17*l1**5*l3**9 - 2.5573548747071e+16*l1**5*l3**8 - 986945453520904.0*l1**5*l3**7 - 366876949957694.0*l1**5*l3**6 + 6400427544972.25*l1**5*l3**5 + 2460234870705.93*l1**5*l3**4 - 23154822780.8173*l1**5*l3**3 - 3748568380.93958*l1**5*l3**2 - 64917943.6697924*l1**5*l3 - 19145996.4901994*l1**5 + 7.57433863161779e+20*l1**4*l2**13 - 1.54279691595549e+21*l1**4*l2**12*l3 + 1.94763285953059e+19*l1**4*l2**12 + 3.83636295013389e+21*l1**4*l2**11*l3**2 + 3.18408189454306e+19*l1**4*l2**11*l3 + 3.1668603713435e+19*l1**4*l2**11 - 4.04429887818641e+21*l1**4*l2**10*l3**3 - 2.74004326299113e+19*l1**4*l2**10*l3**2 - 4.48104289122183e+19*l1**4*l2**10*l3 - 2.09853141780857e+17*l1**4*l2**10 + 5.51663977214389e+21*l1**4*l2**9*l3**4 + 1.65702221042547e+20*l1**4*l2**9*l3**3 + 9.1716602990835e+19*l1**4*l2**9*l3**2 - 6.09266140241659e+17*l1**4*l2**9*l3 - 3.21795525834839e+17*l1**4*l2**9 - 3.29925238972273e+21*l1**4*l2**8*l3**5 - 8.44756421001218e+19*l1**4*l2**8*l3**4 - 5.93159424002343e+19*l1**4*l2**8*l3**3 + 2.61965736815341e+17*l1**4*l2**8*l3**2 + 2.87613216226748e+17*l1**4*l2**8*l3 + 893969763492945.0*l1**4*l2**8 + 3.85824552934983e+21*l1**4*l2**7*l3**6 + 1.66352033674086e+20*l1**4*l2**7*l3**5 + 7.36769001749573e+19*l1**4*l2**7*l3**4 - 1.65806426193164e+18*l1**4*l2**7*l3**3 - 6.23183581660006e+17*l1**4*l2**7*l3**2 + 4.73949249166928e+15*l1**4*l2**7*l3 + 2.6460310631083e+15*l1**4*l2**7 - 1.74752080721108e+21*l1**4*l2**6*l3**7 + 2.1227212630287e+19*l1**4*l2**6*l3**6 - 1.79897261479466e+19*l1**4*l2**6*l3**5 + 1.08012646504853e+17*l1**4*l2**6*l3**4 + 1.87914345957489e+17*l1**4*l2**6*l3**3 - 797415359786312.0*l1**4*l2**6*l3**2 + 441870003757485.0*l1**4*l2**6*l3 - 1960674469396.83*l1**4*l2**6 + 2.89368414701237e+21*l1**4*l2**5*l3**8 + 1.81947536831032e+19*l1**4*l2**5*l3**7 + 3.93929274155773e+19*l1**4*l2**5*l3**6 - 9.54439021842882e+17*l1**4*l2**5*l3**5 - 3.88922277563401e+17*l1**4*l2**5*l3**4 + 7.27612726647941e+15*l1**4*l2**5*l3**3 + 4.16828298771908e+15*l1**4*l2**5*l3**2 + 32798379372859.2*l1**4*l2**5*l3 - 1101036817771.22*l1**4*l2**5 - 1.83291799429041e+21*l1**4*l2**4*l3**9 + 1.03970021046304e+20*l1**4*l2**4*l3**8 - 1.28498043913904e+19*l1**4*l2**4*l3**7 - 7.95365851535735e+17*l1**4*l2**4*l3**6 + 7.11815247910274e+15*l1**4*l2**4*l3**5 + 2.39260040744593e+15*l1**4*l2**4*l3**4 + 2.03331785352342e+15*l1**4*l2**4*l3**3 - 27639749972531.0*l1**4*l2**4*l3**2 - 8408676636958.53*l1**4*l2**4*l3 + 91833566527.5212*l1**4*l2**4 + 2.20665590885755e+21*l1**4*l2**3*l3**10 - 3.75447298222764e+19*l1**4*l2**3*l3**9 + 3.68384500874786e+19*l1**4*l2**3*l3**8 + 6.17215122884872e+16*l1**4*l2**3*l3**7 - 2.59281518375601e+17*l1**4*l2**3*l3**6 + 1.91408032595675e+15*l1**4*l2**3*l3**5 + 3.03008685766922e+15*l1**4*l2**3*l3**4 + 18759679110991.5*l1**4*l2**3*l3**3 - 7642132528413.29*l1**4*l2**3*l3**2 - 319203022301.981*l1**4*l2**3*l3 - 42349809021.3927*l1**4*l2**3 - 1.10299060314175e+21*l1**4*l2**2*l3**11 + 4.97106663127641e+19*l1**4*l2**2*l3**10 - 1.97719808000781e+19*l1**4*l2**2*l3**9 - 6.21774098224364e+17*l1**4*l2**2*l3**8 + 8.05347196960667e+16*l1**4*l2**2*l3**7 + 3.63806363323971e+15*l1**4*l2**2*l3**6 + 1.2429970133477e+15*l1**4*l2**2*l3**5 - 22040646469054.8*l1**4*l2**2*l3**4 - 7528124326616.9*l1**4*l2**2*l3**3 + 141834514169.588*l1**4*l2**2*l3**2 + 9650821484.44502*l1**4*l2**2*l3 - 765839859.607975*l1**4*l2**2 + 6.39393825022314e+20*l1**4*l2*l3**12 - 4.98189684180206e+18*l1**4*l2*l3**11 + 1.8343320598167e+19*l1**4*l2*l3**10 + 5.8214608181187e+16*l1**4*l2*l3**9 - 1.55795895415002e+17*l1**4*l2*l3**8 - 227832959938946.0*l1**4*l2*l3**7 + 1.36254744807323e+15*l1**4*l2*l3**6 + 4384411457487.6*l1**4*l2*l3**5 - 6122822414656.53*l1**4*l2*l3**4 - 61305760703.3384*l1**4*l2*l3**3 + 4912541380.17864*l1**4*l2*l3**2 + 626538104.619987*l1**4*l2*l3 + 101166129.937225*l1**4*l2 - 1.1867668584273e+20*l1**4*l3**13 + 2.65340157878588e+18*l1**4*l3**12 - 4.07367535565621e+18*l1**4*l3**11 - 6.09266140241659e+16*l1**4*l3**10 + 3.19570240251942e+16*l1**4*l3**9 + 592436561458660.0*l1**4*l3**8 + 61780275526079.6*l1**4*l3**7 - 5906305954902.48*l1**4*l3**6 - 1168891280227.43*l1**4*l3**5 + 50084330660.391*l1**4*l3**4 + 4119822240.10752*l1**4*l3**3 - 220625070.629506*l1**4*l3**2 - 14955980.1740121*l1**4*l3 - 1427974.90316759*l1**4 - 2.76260730356127e+20*l1**3*l2**14 + 7.74056642128129e+20*l1**3*l2**13*l3 - 9.57871953158078e+18*l1**3*l2**13 - 1.95620343353916e+21*l1**3*l2**12*l3**2 - 3.1287274851897e+18*l1**3*l2**12*l3 - 1.26366854300275e+19*l1**3*l2**12 + 2.72827505132593e+21*l1**3*l2**11*l3**3 + 8.37536280650781e+18*l1**3*l2**11*l3**2 + 2.58205597343424e+19*l1**3*l2**11*l3 + 1.28212414162903e+17*l1**3*l2**11 - 3.58200213673406e+21*l1**3*l2**10*l3**4 - 7.41267742644944e+19*l1**3*l2**10*l3**3 - 5.18230112122303e+19*l1**3*l2**10*l3**2 + 1.70762850664815e+17*l1**3*l2**10*l3 + 1.5029382952962e+17*l1**3*l2**10 + 2.85915500857293e+21*l1**3*l2**9*l3**5 + 8.13469146149322e+19*l1**3*l2**9*l3**4 + 5.00607543769384e+19*l1**3*l2**9*l3**3 - 1.77215554222248e+17*l1**3*l2**9*l3**2 - 2.21049066180828e+17*l1**3*l2**9*l3 - 793931133597510.0*l1**3*l2**9 - 2.24037252438007e+21*l1**3*l2**8*l3**6 - 1.36027444202248e+20*l1**3*l2**8*l3**5 - 5.17075790585189e+19*l1**3*l2**8*l3**4 + 9.67905533614916e+17*l1**3*l2**8*l3**3 + 4.12535361731611e+17*l1**3*l2**8*l3**2 - 2.08770108858518e+15*l1**3*l2**8*l3 - 1.38260347207473e+15*l1**3*l2**8 + 4.43730960125512e+20*l1**3*l2**7*l3**7 + 9.24177964856034e+19*l1**3*l2**7*l3**6 + 2.2922764483911e+19*l1**3*l2**7*l3**5 - 6.50881402314958e+17*l1**3*l2**7*l3**4 - 2.66077366904033e+17*l1**3*l2**7*l3**3 + 1.3974699751154e+15*l1**3*l2**7*l3**2 + 458030433979925.0*l1**3*l2**7*l3 - 2800963527709.75*l1**3*l2**7 + 3.88264590109823e+20*l1**3*l2**6*l3**8 - 1.16677468063074e+20*l1**3*l2**6*l3**7 - 1.3432101971206e+19*l1**3*l2**6*l3**6 + 1.18617524525329e+18*l1**3*l2**6*l3**5 + 2.6752702706167e+17*l1**3*l2**6*l3**4 - 6.11641143773886e+15*l1**3*l2**6*l3**3 - 2.55947581189709e+15*l1**3*l2**6*l3**2 - 21788785635716.3*l1**3*l2**6*l3 - 298477594124.879*l1**3*l2**6 - 1.49358168292005e+21*l1**3*l2**5*l3**9 + 6.93133473642026e+19*l1**3*l2**5*l3**8 - 1.15132302610337e+19*l1**3*l2**5*l3**7 - 5.42027098824353e+17*l1**3*l2**5*l3**6 - 6.51290316098542e+15*l1**3*l2**5*l3**5 + 2.53628044672314e+15*l1**3*l2**5*l3**4 - 390028284693248.0*l1**3*l2**5*l3**3 + 21928833812101.8*l1**3*l2**5*l3**2 + 6186033126168.44*l1**3*l2**5*l3 - 1346956449.85359*l1**3*l2**5 + 1.42957750428646e+21*l1**3*l2**4*l3**10 - 7.5570802334582e+19*l1**3*l2**4*l3**9 + 1.43267278024444e+19*l1**3*l2**4*l3**8 + 8.47268032323782e+17*l1**3*l2**4*l3**7 - 5.42741930082118e+15*l1**3*l2**4*l3**6 - 5.49846325401644e+15*l1**3*l2**4*l3**5 - 1.56940621558209e+15*l1**3*l2**4*l3**4 - 14174230755144.0*l1**3*l2**4*l3**3 + 2468106285763.61*l1**3*l2**4*l3**2 + 334750748180.292*l1**3*l2**4*l3 + 48846534008.6255*l1**3*l2**4 - 1.30254623153966e+21*l1**3*l2**3*l3**11 + 3.25387658459729e+19*l1**3*l2**3*l3**10 - 2.29811462482306e+19*l1**3*l2**3*l3**9 - 3.25440701157479e+17*l1**3*l2**3*l3**8 + 1.52872586892383e+17*l1**3*l2**3*l3**7 + 1.69085363114876e+15*l1**3*l2**3*l3**6 - 1.16318352971344e+15*l1**3*l2**3*l3**5 + 4020737967196.25*l1**3*l2**3*l3**4 + 6549832809600.45*l1**3*l2**3*l3**3 - 156990981269.608*l1**3*l2**3*l3**2 - 22841559842.938*l1**3*l2**3*l3 + 471801015.899017*l1**3*l2**3 + 6.82068762831483e+20*l1**3*l2**2*l3**12 - 2.02163929812258e+19*l1**3*l2**2*l3**11 + 1.50182263130815e+19*l1**3*l2**2*l3**10 + 3.22635177871639e+17*l1**3*l2**2*l3**9 - 9.97790125890122e+16*l1**3*l2**2*l3**8 - 2.62131918760237e+15*l1**3*l2**2*l3**7 - 232962680463982.0*l1**3*l2**2*l3**6 + 12699207219987.3*l1**3*l2**2*l3**5 + 2694514601341.34*l1**3*l2**2*l3**4 + 47258929154.8646*l1**3*l2**2*l3**3 + 10656711748.5358*l1**3*l2**2*l3**2 - 1291195514.09435*l1**3*l2**2*l3 - 219929029.244081*l1**3*l2**2 - 3.0095437439064e+20*l1**3*l2*l3**13 + 1.39589380108464e+18*l1**3*l2*l3**12 - 9.42236567495097e+18*l1**3*l2*l3**11 - 3.54431108444496e+16*l1**3*l2*l3**10 + 9.16745248292469e+16*l1**3*l2*l3**9 + 349367493778850.0*l1**3*l2*l3**8 - 714834436881648.0*l1**3*l2*l3**7 - 2936494020986.03*l1**3*l2*l3**6 + 3819699505997.66*l1**3*l2*l3**5 - 564438893.272064*l1**3*l2*l3**4 - 12991634676.9174*l1**3*l2*l3**3 + 298302748.055086*l1**3*l2*l3**2 + 45988545.3205603*l1**3*l2*l3 + 1619250.98719465*l1**3*l2 + 5.52897601520092e+19*l1**3*l3**14 - 2.40671345014593e+17*l1**3*l3**13 + 2.1517133111952e+18*l1**3*l3**12 + 1.55238955149832e+16*l1**3*l3**11 - 2.21049066180828e+16*l1**3*l3**10 - 231966787620576.0*l1**3*l3**9 + 61048658059226.4*l1**3*l3**8 + 3101389454472.17*l1**3*l3**7 + 101579914315.734*l1**3*l3**6 - 36957919352.6507*l1**3*l3**5 - 537126014.634714*l1**3*l3**4 + 200872533.74607*l1**3*l3**3 + 4862019.4283726*l1**3*l3**2 - 616700.477811405*l1**3*l3 - 355496.804929562*l1**3 + 7.50259858832303e+19*l1**2*l2**15 - 2.74431451818013e+20*l1**2*l2**14*l3 + 3.32642180430883e+18*l1**2*l2**14 + 7.50067674388298e+20*l1**2*l2**13*l3**2 - 2.96025754367948e+18*l1**2*l2**13*l3 + 3.66207957563922e+18*l1**2*l2**13 - 1.28991818549654e+21*l1**2*l2**12*l3**3 + 1.87723649111382e+18*l1**2*l2**12*l3**2 - 1.00515651640768e+19*l1**2*l2**12*l3 - 5.31880456273897e+16*l1**2*l2**12 + 1.85531688553711e+21*l1**2*l2**11*l3**4 + 1.90611705251557e+19*l1**2*l2**11*l3**3 + 2.15776162085184e+19*l1**2*l2**11*l3**2 - 280552328584020.0*l1**2*l2**11*l3 - 5.0515762063255e+16*l1**2*l2**11 - 1.97644245374086e+21*l1**2*l2**10*l3**5 - 3.29599406997484e+19*l1**2*l2**10*l3**4 - 2.72297644004769e+19*l1**2*l2**10*l3**3 + 2.93177183370314e+16*l1**2*l2**10*l3**2 + 1.07661400434595e+17*l1**2*l2**10*l3 + 430425949147486.0*l1**2*l2**10 + 1.92265549366016e+21*l1**2*l2**9*l3**6 + 5.23460175406738e+19*l1**2*l2**9*l3**5 + 2.99872233619165e+19*l1**2*l2**9*l3**4 - 3.24973113943172e+17*l1**2*l2**9*l3**3 - 2.05452703716175e+17*l1**2*l2**9*l3**2 + 384209755666871.0*l1**2*l2**9*l3 + 517132839656664.0*l1**2*l2**9 - 1.54493548215005e+21*l1**2*l2**8*l3**7 - 3.47649757873579e+19*l1**2*l2**8*l3**6 - 2.2517598507033e+19*l1**2*l2**8*l3**5 + 3.26142081978939e+17*l1**2*l2**8*l3**4 + 2.02730237815341e+17*l1**2*l2**8*l3**3 - 491630220708066.0*l1**2*l2**8*l3**2 - 485637681006743.0*l1**2*l2**8*l3 + 3112118951856.53*l1**2*l2**8 + 1.3939049565632e+21*l1**2*l2**7*l3**8 + 1.8442301352261e+19*l1**2*l2**7*l3**7 + 1.80944952197818e+19*l1**2*l2**7*l3**6 - 4.2082849287605e+17*l1**2*l2**7*l3**5 - 2.01837126201181e+17*l1**2*l2**7*l3**4 + 2.43919455088601e+15*l1**2*l2**7*l3**3 + 1.28690297066609e+15*l1**2*l2**7*l3**2 + 9419369282701.31*l1**2*l2**7*l3 + 587176888849.116*l1**2*l2**7 - 1.2016164861167e+21*l1**2*l2**6*l3**9 + 1.61370136832284e+19*l1**2*l2**6*l3**8 - 1.22855103212626e+19*l1**2*l2**6*l3**7 + 5.49882564024706e+16*l1**2*l2**6*l3**6 + 1.20506401829577e+17*l1**2*l2**6*l3**5 - 1.16201896130603e+15*l1**2*l2**6*l3**4 - 481351737047025.0*l1**2*l2**6*l3**3 - 17124277825457.7*l1**2*l2**6*l3**2 - 3504550738115.49*l1**2*l2**6*l3 - 43461794781.944*l1**2*l2**6 + 1.1535932961961e+21*l1**2*l2**5*l3**10 - 2.31766505249052e+19*l1**2*l2**5*l3**9 + 1.35708714148364e+19*l1**2*l2**5*l3**8 + 4.71327912021176e+16*l1**2*l2**5*l3**7 - 1.15842322868808e+17*l1**2*l2**5*l3**6 + 1.26686464224838e+15*l1**2*l2**5*l3**5 + 1.05428184203597e+15*l1**2*l2**5*l3**4 + 23839813767297.3*l1**2*l2**5*l3**3 + 511607615439.05*l1**2*l2**5*l3**2 - 216898472896.431*l1**2*l2**5*l3 - 34994514094.8028*l1**2*l2**5 - 8.98382933518573e+20*l1**2*l2**4*l3**11 + 2.61730087703369e+19*l1**2*l2**4*l3**10 - 1.25097769483517e+19*l1**2*l2**4*l3**9 - 2.63017808047531e+17*l1**2*l2**4*l3**8 + 8.60760013068406e+16*l1**2*l2**4*l3**7 + 1.05572053520698e+15*l1**2*l2**4*l3**6 - 142917883726187.0*l1**2*l2**4*l3**5 - 22709828279606.4*l1**2*l2**4*l3**4 - 2873296779821.76*l1**2*l2**4*l3**3 + 201369989253.119*l1**2*l2**4*l3**2 + 28825247647.9813*l1**2*l2**4*l3 + 58841879.9765607*l1**2*l2**4 + 6.18438961845702e+20*l1**2*l2**3*l3**12 - 1.19854329817267e+19*l1**2*l2**3*l3**11 + 1.19948893447666e+19*l1**2*l2**3*l3**10 + 1.44952036435084e+17*l1**2*l2**3*l3**9 - 1.0091856310059e+17*l1**2*l2**3*l3**8 - 664010835032015.0*l1**2*l2**3*l3**7 + 691786237739750.0*l1**2*l2**3*l3**6 + 13424295359015.3*l1**2*l2**3*l3**5 - 2373422350597.8*l1**2*l2**3*l3**4 - 200606713931.535*l1**2*l2**3*l3**3 - 18126497568.434*l1**2*l2**3*l3**2 + 1578497388.93643*l1**2*l2**3*l3 + 244983905.428018*l1**2*l2**3 - 2.97673427422279e+20*l1**2*l2**2*l3**13 + 4.76529263128893e+18*l1**2*l2**2*l3**12 - 7.42629938194824e+18*l1**2*l2**2*l3**11 - 9.74919341829516e+16*l1**2*l2**2*l3**10 + 6.7576745938447e+16*l1**2*l2**2*l3**9 + 914697956582255.0*l1**2*l2**2*l3**8 - 206767958318049.0*l1**2*l2**2*l3**7 - 10635755459855.9*l1**2*l2**2*l3**6 - 29583822917.1765*l1**2*l2**2*l3**5 + 122303645646.71*l1**2*l2**2*l3**4 + 5545059398.05623*l1**2*l2**2*l3**3 - 866126889.828896*l1**2*l2**2*l3**2 - 72278997.9738157*l1**2*l2**2*l3 + 432844.586699181*l1**2*l2**2 + 1.07152524912614e+20*l1**2*l2*l3**14 + 2.8880561401751e+17*l1**2*l2*l3**13 + 3.59626936808639e+18*l1**2*l2*l3**12 + 5.33049424309662e+15*l1**2*l2*l3**11 - 4.1090540743235e+16*l1**2*l2*l3**10 - 109251160157348.0*l1**2*l2*l3**9 + 321251385940056.0*l1**2*l2*l3**8 + 1795779035910.69*l1**2*l2*l3**7 - 1931716786178.54*l1**2*l2*l3**6 - 28555476736.8971*l1**2*l2*l3**5 + 7486275953.67216*l1**2*l2*l3**4 + 309047787.007327*l1**2*l2*l3**3 + 14690298.4466565*l1**2*l2*l3**2 + 3418235.53610439*l1**2*l2*l3 + 684801.755829903*l1**2*l2 - 1.82954301212009e+19*l1**2*l3**15 - 2.11446967405678e+17*l1**2*l3**14 - 7.73197320313596e+17*l1**2*l3**13 - 23379360715335.0*l1**2*l3**12 + 9.7874000395086e+15*l1**2*l3**11 + 38420975566687.1*l1**2*l3**10 - 56647763784062.1*l1**2*l3**9 - 814877090823.624*l1**2*l3**8 + 249950997353.367*l1**2*l3**7 + 18401990736.3337*l1**2*l3**6 - 1494985603.13048*l1**2*l3**5 - 240004515.882656*l1**2*l3**4 - 5960746.09269768*l1**2*l3**3 + 1709117.7680522*l1**2*l3**2 + 376575.682278398*l1**2*l3 - 766.946502057613*l1**2 - 1.36055713534837e+19*l1*l2**16 + 6.26862165061207e+19*l1*l2**15*l3 - 7.42643007473599e+17*l1*l2**15 - 1.90557928870437e+20*l1*l2**14*l3**2 + 1.54717293223667e+18*l1*l2**14*l3 - 6.90341786667576e+17*l1*l2**14 + 3.84411791569547e+20*l1*l2**13*l3**3 - 2.16604210513133e+18*l1*l2**13*l3**2 + 2.43320813050035e+18*l1*l2**13*l3 + 1.38873402649097e+16*l1*l2**13 - 6.07507957337945e+20*l1*l2**12*l3**4 - 1.87723649111382e+18*l1*l2**12*l3**3 - 5.82102159951139e+18*l1*l2**12*l3**2 - 1.70201746007647e+16*l1*l2**12*l3 + 1.09723844994687e+16*l1*l2**12 + 7.27444567956927e+20*l1*l2**11*l3**5 + 8.66416842052532e+18*l1*l2**11*l3**4 + 8.92893450741564e+18*l1*l2**11*l3**3 + 1.34665117720336e+16*l1*l2**11*l3**2 - 3.15725160167863e+16*l1*l2**11*l3 - 140313922450735.0*l1*l2**11 - 6.9601330170203e+20*l1*l2**10*l3**6 - 1.90611705251557e+19*l1*l2**10*l3**5 - 1.08265315892923e+19*l1*l2**10*l3**4 + 6.78936635173361e+16*l1*l2**10*l3**3 + 6.62100344359388e+16*l1*l2**10*l3**2 + 66779033462664.6*l1*l2**10*l3 - 125131058351525.0*l1*l2**10 + 4.68354077758556e+20*l1*l2**9*l3**7 + 2.45484771914884e+19*l1*l2**9*l3**6 + 9.46209892861881e+18*l1*l2**9*l3**5 - 1.47056178899464e+17*l1*l2**9*l3**4 - 8.36109116275174e+16*l1*l2**9*l3**3 + 54330306672843.5*l1*l2**9*l3**2 + 207206394319647.0*l1*l2**9*l3 - 1475776482341.69*l1*l2**9 - 1.63872060924414e+20*l1*l2**8*l3**8 - 2.784911278026e+19*l1*l2**8*l3**7 - 6.4011188487615e+18*l1*l2**8*l3**6 + 2.57547037640143e+17*l1*l2**8*l3**5 + 8.52765692101619e+16*l1*l2**8*l3**4 - 889717826448980.0*l1*l2**8*l3**3 - 451455601241927.0*l1*l2**8*l3**2 - 1399352343077.58*l1*l2**8*l3 - 294718295069.546*l1*l2**8 - 1.45664054155035e+20*l1*l2**7*l3**9 + 2.48785407503656e+19*l1*l2**7*l3**8 + 1.88760047920674e+18*l1*l2**7*l3**7 - 2.71013549412176e+17*l1*l2**7*l3**6 - 5.38330284568489e+16*l1*l2**7*l3**5 + 1.40597384920333e+15*l1*l2**7*l3**4 + 355041891527619.0*l1*l2**7*l3**3 + 4093020896943.59*l1*l2**7*l3**2 + 1400958119901.16*l1*l2**7*l3 + 32634830556.4538*l1*l2**7 + 3.27847854430989e+20*l1*l2**6*l3**10 - 2.16604210513133e+19*l1*l2**6*l3**9 + 1.65165041930589e+18*l1*l2**6*l3**8 + 2.78307909955361e+17*l1*l2**6*l3**7 + 2.0307676329147e+16*l1*l2**6*l3**6 - 2.09585063458611e+15*l1*l2**6*l3**5 - 389607513149753.0*l1*l2**6*l3**4 - 5376042899959.02*l1*l2**6*l3**3 - 1206337571037.25*l1*l2**6*l3**2 + 57739533150.3925*l1*l2**6*l3 + 14744290692.217*l1*l2**6 - 3.79643619110198e+20*l1*l2**5*l3**11 + 1.47290863148931e+19*l1*l2**5*l3**10 - 4.267412565841e+18*l1*l2**5*l3**9 - 2.03260162059132e+17*l1*l2**5*l3**8 + 1.74065797106974e+16*l1*l2**5*l3**7 + 1.80112053182216e+15*l1*l2**5*l3**6 + 47478786349505.1*l1*l2**5*l3**5 + 1246880538141.75*l1*l2**5*l3**4 + 1910738313865.94*l1*l2**5*l3**3 - 72389288062.134*l1*l2**5*l3**2 - 20175881845.6283*l1*l2**5*l3 - 254874882.289776*l1*l2**5 + 3.03101903315386e+20*l1*l2**4*l3**12 - 8.66416842052532e+18*l1*l2**4*l3**11 + 4.73104946430941e+18*l1*l2**4*l3**10 + 1.43081687577857e+17*l1*l2**4*l3**9 - 3.36456427855306e+16*l1*l2**4*l3**8 - 1.4970361675615e+15*l1*l2**4*l3**7 + 12685440791459.1*l1*l2**4*l3**6 + 4150621356586.04*l1*l2**4*l3**5 - 831189829767.978*l1*l2**4*l3**4 + 72350803592.1382*l1*l2**4*l3**3 + 19176569656.1911*l1*l2**4*l3**2 - 731366192.969534*l1*l2**4*l3 - 144716093.525965*l1*l2**4 - 1.86925525334752e+20*l1*l2**3*l3**13 + 2.88805614017511e+18*l1*l2**3*l3**12 - 3.93692057792449e+18*l1*l2**3*l3**11 - 5.88224715597857e+16*l1*l2**3*l3**10 + 3.79006974267386e+16*l1*l2**3*l3**9 + 702986924601663.0*l1*l2**3*l3**8 - 208718400680637.0*l1*l2**3*l3**7 - 4007184917868.63*l1*l2**3*l3**6 + 1596395593139.89*l1*l2**3*l3**5 - 13443908185.2058*l1*l2**3*l3**4 - 12526470652.8688*l1*l2**3*l3**3 + 431080729.393499*l1*l2**3*l3**2 + 81685235.022436*l1*l2**3*l3 - 1081699.23380824*l1*l2**3 + 8.23739553363315e+19*l1*l2**2*l3**14 - 4.33208421026267e+17*l1*l2**2*l3**13 + 2.23223362685391e+18*l1*l2**2*l3**12 + 1.85164536865462e+16*l1*l2**2*l3**11 - 2.50832734882552e+16*l1*l2**2*l3**10 - 296572608816327.0*l1*l2**2*l3**9 + 132191995869923.0*l1*l2**2*l3**8 + 2676727242206.5*l1*l2**2*l3**7 - 809374699195.268*l1*l2**2*l3**6 - 18934359237.9426*l1*l2**2*l3**5 + 6438335818.08812*l1*l2**2*l3**4 - 110392483.521243*l1*l2**2*l3**3 - 64479135.6387486*l1*l2**2*l3**2 - 4269084.32367307*l1*l2**2*l3 - 494270.465739107*l1*l2**2 - 2.54077238493916e+19*l1*l2*l3**15 - 3.09434586447333e+17*l1*l2*l3**14 - 8.95541784540215e+17*l1*l2*l3**13 + 2.24441862867227e+15*l1*l2*l3**12 + 1.20381880792616e+16*l1*l2*l3**11 + 10866061334568.8*l1*l2*l3**10 - 100745117366177.0*l1*l2*l3**9 + 27106098655.254*l1*l2*l3**8 + 784928285878.041*l1*l2*l3**7 + 1847254559.79927*l1*l2*l3**6 - 5551083529.58115*l1*l2*l3**5 + 14070884.342221*l1*l2*l3**4 + 29542922.5017978*l1*l2*l3**3 - 1642336.03181861*l1*l2*l3**2 - 369793.162192174*l1*l2*l3 + 1690.4126984127*l1*l2 + 3.91788853163254e+18*l1*l3**16 + 1.03144862149111e+17*l1*l3**15 + 1.73800580750025e+17*l1*l3**14 - 1.30924420005883e+15*l1*l3**13 - 2.63104300139886e+15*l1*l3**12 + 6070821223878.6*l1*l3**11 + 21353115067254.0*l1*l3**10 - 76424139264.1235*l1*l3**9 - 162389036875.681*l1*l3**8 - 2142302163.10055*l1*l3**7 + 1507223777.96932*l1*l3**6 + 75343189.7960744*l1*l3**5 - 6777071.09225231*l1*l3**4 - 1423028.10789102*l1*l3**3 - 191157.368867986*l1*l3**2 + 156.519694297472*l1*l3 + 25.5322751322752*l1 + 1.24258703589036e+18*l2**17 - 6.982628000489e+18*l2**16*l3 + 8.03670384245156e+16*l2**16 + 2.36698357731812e+19*l2**15*l3**2 - 2.5442399330114e+17*l2**15*l3 + 6.43136451193059e+16*l2**15 - 5.38557233221299e+19*l2**14*l3**3 + 4.38365664133722e+17*l2**14*l3**2 - 2.8605257047968e+17*l2**14*l3 - 1.75345205365021e+15*l2**14 + 9.24321956259035e+19*l2**13*l3**4 + 2.40671345014596e+16*l2**13*l3**3 + 7.99947406941558e+17*l2**13*l3**2 + 4.25504365019118e+15*l2**13*l3 - 1.17019834381695e+15*l2**13 - 1.20803644567047e+20*l2**12*l3**5 - 1.48614555546511e+18*l2**12*l3**4 - 1.53960630532292e+18*l2**12*l3**3 - 6.07863378598739e+15*l2**12*l3**2 + 4.39271252671007e+15*l2**12*l3 + 22877783483760.4*l2**12 + 1.2461838823653e+20*l2**11*l3**6 + 3.82667438573202e+18*l2**11*l3**5 + 2.38190941697179e+18*l2**11*l3**4 - 2.80552328584034e+15*l2**11*l3**3 - 1.07367875714804e+16*l2**11*l3**2 - 50031125883948.9*l2**11*l3 + 14854306608602.0*l2**11 - 1.01441061833611e+20*l2**10*l3**7 - 5.29476959032103e+18*l2**10*l3**6 - 3.04986289727359e+18*l2**10*l3**5 + 1.87736266544149e+16*l2**10*l3**4 + 1.74907803704335e+16*l2**10*l3**3 + 86656839143185.4*l2**10*l3**2 - 36803086980502.5*l2**10*l3 + 219484104389.083*l2**10 + 7.21536512533828e+19*l2**9*l3**8 + 4.67590041742636e+18*l2**9*l3**7 + 3.46582388612087e+18*l2**9*l3**6 - 3.6238009108771e+16*l2**9*l3**5 - 2.29031594430298e+16*l2**9*l3**4 - 47361854295239.4*l2**9*l3**3 + 81930681232172.9*l2**9*l3**2 + 455533046845.269*l2**9*l3 + 56144527963.9852*l2**9 - 5.52034387752107e+19*l2**8*l3**9 - 1.69802229312974e+18*l2**8*l3**8 - 3.51015179262684e+18*l2**8*l3**7 + 3.30350366907699e+16*l2**8*l3**6 + 2.41817116542052e+16*l2**8*l3**5 + 8503874087922.72*l2**8*l3**4 - 100656533031032.0*l2**8*l3**3 - 2407360386819.89*l2**8*l3**2 - 253877485351.616*l2**8*l3 - 6494254311.79432*l2**8 + 5.77229210027062e+19*l2**7*l3**10 - 1.50935314944866e+18*l2**7*l3**9 + 3.26251205236833e+18*l2**7*l3**8 - 1.40276164292017e+16*l2**7*l3**7 - 2.32159379686091e+16*l2**7*l3**6 + 48755544770760.6*l2**7*l3**5 + 121279251052781.0*l2**7*l3**4 + 5235994723573.54*l2**7*l3**3 + 264830028372.452*l2**7*l3**2 - 15060255925.0302*l2**7*l3 - 2719472839.89115*l2**7 - 6.45534029850251e+19*l2**6*l3**11 + 3.27313029219846e+18*l2**6*l3**10 - 2.73011806093199e+18*l2**6*l3**9 - 1.22741643755515e+16*l2**6*l3**8 + 2.03056288198976e+16*l2**6*l3**7 - 9673156775013.14*l2**6*l3**6 - 100302457314467.0*l2**6*l3**5 - 6324756352892.97*l2**6*l3**4 - 116847853763.691*l2**6*l3**3 + 44539359941.8269*l2**6*l3**2 + 4724308022.64595*l2**6*l3 + 64843325.3437352*l2**6 + 6.23091941182651e+19*l2**5*l3**12 - 2.88805614017511e+18*l2**5*l3**11 + 2.07949433167252e+18*l2**5*l3**10 + 2.20233577938466e+16*l2**5*l3**9 - 1.74119534764568e+16*l2**5*l3**8 - 8291277235725.36*l2**5*l3**7 + 97419013451136.0*l2**5*l3**6 + 4526718475427.7*l2**5*l3**5 - 415172294908.695*l2**5*l3**4 - 67655698252.6483*l2**5*l3**3 - 3168030515.48893*l2**5*l3**2 + 275085788.890421*l2**5*l3 + 33518792.9345054*l2**5 - 4.64629402180949e+19*l2**4*l3**13 + 1.59444766072167e+18*l2**4*l3**12 - 1.38630131694254e+18*l2**4*l3**11 - 1.81190045543855e+16*l2**4*l3**10 + 1.34342842523362e+16*l2**4*l3**9 + 30472215481725.4*l2**4*l3**8 - 72830504183644.0*l2**4*l3**7 - 1504953185755.33*l2**4*l3**6 + 645914508627.503*l2**4*l3**5 + 48378185823.9098*l2**4*l3**4 - 2003650359.95168*l2**4*l3**3 - 401180100.166279*l2**4*l3**2 - 17161574.3825346*l2**4*l3 + 857444.514604093*l2**4 + 2.64091987502581e+19*l2**3*l3**14 - 4.57275555527726e+17*l2**3*l3**13 + 7.93969805657264e+17*l2**3*l3**12 + 6.82677332887815e+15*l2**3*l3**11 - 9.16126377721193e+15*l2**3*l3**10 + 3779499594632.32*l2**3*l3**9 + 61904576796969.3*l2**3*l3**8 - 720570455918.876*l2**3*l3**7 - 711399477758.57*l2**3*l3**6 - 13251485835.2267*l2**3*l3**5 + 5659372932.38041*l2**3*l3**4 + 314462945.526909*l2**3*l3**3 - 5804931.65064541*l2**3*l3**2 + 214361.128651023*l2**3*l3 + 181352.368759118*l2**3 - 1.0771144664426e+19*l2**2*l3**15 + 5.15724310745562e+15*l2**2*l3**14 - 3.55293762766828e+17*l2**2*l3**13 - 701380821460084.0*l2**2*l3**12 + 4.77021282830005e+15*l2**2*l3**11 - 14208556288571.8*l2**2*l3**10 - 34579950584355.7*l2**2*l3**9 + 1002925650244.46*l2**2*l3**8 + 415507466396.398*l2**2*l3**7 - 5253130154.42918*l2**2*l3**6 - 4798629535.60862*l2**2*l3**5 - 86503959.4220633*l2**2*l3**4 + 30172614.5631638*l2**2*l3**3 + 1293586.96482098*l2**2*l3**2 - 64812.5311908028*l2**2*l3 - 2253.8835978836*l2**2 + 2.95872947164765e+18*l2*l3**16 + 5.84487552178296e+16*l2*l3**15 + 1.14278200991651e+17*l2*l3**14 - 935174428613446.0*l2*l3**13 - 1.78946459524674e+15*l2*l3**12 + 15755788935124.6*l2*l3**11 + 16596961458197.7*l2*l3**10 - 590310592936.678*l2*l3**9 - 195717795698.425*l2*l3**8 + 7530127962.51509*l2*l3**7 + 2480734486.96218*l2*l3**6 - 39590351.8537946*l2*l3**5 - 21034719.0572302*l2*l3**4 + 862391.309880655*l2*l3**3 + 253882.970801489*l2*l3**2 + 1252.15755437978*l2*l3 - 107.434215167548*l2 - 4.10742823558176e+17*l3**17 - 1.59014995813213e+16*l3**16 - 1.9070171365312e+16*l3**15 + 303931689299370.0*l3**14 + 337900963593082.0*l3**13 - 4169260490329.08*l3**12 - 3372088331516.06*l3**11 + 128565731816.247*l3**10 + 38395179922.583*l3**9 - 1882531990.62877*l3**8 - 561221123.482208*l3**7 + 8911560.08340665*l3**6 + 5633811.73944685*l3**5 + 53590.2821627558*l3**4 - 21082.444749276*l3**3 - 78.2598471487361*l3**2 + 89.0920634920634*l3 + 1.07851851851852) + l2_dot*(1.24258703589036e+18*l1**17 - 1.36055713534837e+19*l1**16*l2 - 6.982628000489e+18*l1**16*l3 + 8.03670384245156e+16*l1**16 + 7.50259858832303e+19*l1**15*l2**2 + 6.26862165061207e+19*l1**15*l2*l3 - 7.42643007473599e+17*l1**15*l2 + 2.36698357731812e+19*l1**15*l3**2 - 2.54423993301141e+17*l1**15*l3 + 5.58690365223026e+16*l1**15 - 2.76260730356127e+20*l1**14*l2**3 - 2.74431451818013e+20*l1**14*l2**2*l3 + 3.32642180430883e+18*l1**14*l2**2 - 1.90557928870437e+20*l1**14*l2*l3**2 + 1.54717293223666e+18*l1**14*l2*l3 - 5.9391738625697e+17*l1**14*l2 - 5.38557233221299e+19*l1**14*l3**3 + 4.38365664133722e+17*l1**14*l3**2 - 2.55807841935237e+17*l1**14*l3 - 1.75345205365021e+15*l1**14 + 7.57433863161779e+20*l1**13*l2**4 + 7.74056642128129e+20*l1**13*l2**3*l3 - 9.57871953158077e+18*l1**13*l2**3 + 7.50067674388299e+20*l1**13*l2**2*l3**2 - 2.96025754367948e+18*l1**13*l2**2*l3 + 3.14624162371943e+18*l1**13*l2**2 + 3.84411791569547e+20*l1**13*l2*l3**3 - 2.16604210513133e+18*l1**13*l2*l3**2 + 2.11494242859146e+18*l1**13*l2*l3 + 1.38873402649097e+16*l1**13*l2 + 9.24321956259035e+19*l1**13*l3**4 + 2.40671345014598e+16*l1**13*l3**3 + 7.47367158084901e+17*l1**13*l3**2 + 4.25504365019118e+15*l1**13*l3 - 927278426024092.0*l1**13 - 1.63480022290245e+21*l1**12*l2**5 - 1.54279691595549e+21*l1**12*l2**4*l3 + 1.94763285953059e+19*l1**12*l2**4 - 1.95620343353916e+21*l1**12*l2**3*l3**2 - 3.1287274851897e+18*l1**12*l2**3*l3 - 1.10156772486624e+19*l1**12*l2**3 - 1.28991818549654e+21*l1**12*l2**2*l3**3 + 1.87723649111382e+18*l1**12*l2**2*l3**2 - 8.20869633321447e+18*l1**12*l2**2*l3 - 5.31880456273897e+16*l1**12*l2**2 - 6.07507957337945e+20*l1**12*l2*l3**4 - 1.87723649111382e+18*l1**12*l2*l3**3 - 5.59516336796591e+18*l1**12*l2*l3**2 - 1.70201746007647e+16*l1**12*l2*l3 + 8.96310788345832e+15*l1**12*l2 - 1.20803644567047e+20*l1**12*l3**5 - 1.48614555546511e+18*l1**12*l3**4 - 1.38704463745924e+18*l1**12*l3**3 - 6.07863378598739e+15*l1**12*l3**2 + 3.24403021141329e+15*l1**12*l3 + 22877783483760.5*l1**12 + 2.86939485434628e+21*l1**11*l2**6 + 2.21062184350019e+21*l1**11*l2**5*l3 - 2.85917557877336e+19*l1**11*l2**5 + 3.83636295013389e+21*l1**11*l2**4*l3**2 + 3.18408189454306e+19*l1**11*l2**4*l3 + 2.83300233909575e+19*l1**11*l2**4 + 2.72827505132593e+21*l1**11*l2**3*l3**3 + 8.3753628065078e+18*l1**11*l2**3*l3**2 + 1.97227828478718e+19*l1**11*l2**3*l3 + 1.28212414162903e+17*l1**11*l2**3 + 1.85531688553711e+21*l1**11*l2**2*l3**4 + 1.90611705251557e+19*l1**11*l2**2*l3**3 + 1.96670685530505e+19*l1**11*l2**2*l3**2 - 280552328584057.0*l1**11*l2**2*l3 - 4.24321505068786e+16*l1**11*l2**2 + 7.27444567956927e+20*l1**11*l2*l3**5 + 8.66416842052532e+18*l1**11*l2*l3**4 + 9.29920001821226e+18*l1**11*l2*l3**3 + 1.34665117720336e+16*l1**11*l2*l3**2 - 2.36284197374141e+16*l1**11*l2*l3 - 140313922450735.0*l1**11*l2 + 1.2461838823653e+20*l1**11*l3**6 + 3.82667438573202e+18*l1**11*l3**5 + 1.83165803568159e+18*l1**11*l3**4 - 2.80552328584034e+15*l1**11*l3**3 - 7.81674181938581e+15*l1**11*l3**2 - 50031125883948.9*l1**11*l3 + 24096079032513.5*l1**11 - 4.17581151602659e+21*l1**10*l2**7 - 2.14028615717469e+21*l1**10*l2**6*l3 + 2.88564942672496e+19*l1**10*l2**6 - 6.04995307355592e+21*l1**10*l2**5*l3**2 - 9.37174217486822e+19*l1**10*l2**5*l3 - 5.64146678150083e+19*l1**10*l2**5 - 4.04429887818641e+21*l1**10*l2**4*l3**3 - 2.74004326299113e+19*l1**10*l2**4*l3**2 - 3.09559391706971e+19*l1**10*l2**4*l3 - 2.09853141780857e+17*l1**10*l2**4 - 3.58200213673406e+21*l1**10*l2**3*l3**4 - 7.41267742644944e+19*l1**10*l2**3*l3**3 - 4.59942178196846e+19*l1**10*l2**3*l3**2 + 1.70762850664815e+17*l1**10*l2**3*l3 + 1.3122520691396e+17*l1**10*l2**3 - 1.97644245374086e+21*l1**10*l2**2*l3**5 - 3.29599406997484e+19*l1**10*l2**2*l3**4 - 2.60532163896405e+19*l1**10*l2**2*l3**3 + 2.93177183370316e+16*l1**10*l2**2*l3**2 + 7.5947541161435e+16*l1**10*l2**2*l3 + 430425949147486.0*l1**10*l2**2 - 6.9601330170203e+20*l1**10*l2*l3**6 - 1.90611705251557e+19*l1**10*l2*l3**5 - 1.24330357494012e+19*l1**10*l2*l3**4 + 6.78936635173361e+16*l1**10*l2*l3**3 + 5.42313641725516e+16*l1**10*l2*l3**2 + 66779033462664.8*l1**10*l2*l3 - 189277056623699.0*l1**10*l2 - 1.01441061833611e+20*l1**10*l3**7 - 5.29476959032103e+18*l1**10*l3**6 - 1.51800902641336e+18*l1**10*l3**5 + 1.8773626654415e+16*l1**10*l3**4 + 1.07768360338823e+16*l1**10*l3**3 + 86656839143185.7*l1**10*l3**2 - 76345778034575.2*l1**10*l3 - 84894795093.8914*l1**10 + 5.09702458035085e+21*l1**9*l2**8 + 8.86337611867598e+20*l1**9*l2**7*l3 - 1.43371358387264e+19*l1**9*l2**7 + 8.17766261018856e+21*l1**9*l2**6*l3**2 + 1.73524039755521e+20*l1**9*l2**6*l3 + 8.9577677095679e+19*l1**9*l2**6 + 3.89622223338111e+21*l1**9*l2**5*l3**3 + 3.82667438573202e+19*l1**9*l2**5*l3**2 + 2.81800119098858e+19*l1**9*l2**5*l3 + 2.24909450081534e+17*l1**9*l2**5 + 5.51663977214389e+21*l1**9*l2**4*l3**4 + 1.65702221042547e+20*l1**9*l2**4*l3**3 + 8.22987789446323e+19*l1**9*l2**4*l3**2 - 6.0926614024166e+17*l1**9*l2**4*l3 - 2.94314395647032e+17*l1**9*l2**4 + 2.85915500857293e+21*l1**9*l2**3*l3**5 + 8.13469146149322e+19*l1**9*l2**3*l3**4 + 4.31885417967228e+19*l1**9*l2**3*l3**3 - 1.77215554222248e+17*l1**9*l2**3*l3**2 - 1.40287360775454e+17*l1**9*l2**3*l3 - 793931133597510.0*l1**9*l2**3 + 1.92265549366016e+21*l1**9*l2**2*l3**6 + 5.23460175406738e+19*l1**9*l2**2*l3**5 + 3.22000127699872e+19*l1**9*l2**2*l3**4 - 3.24973113943172e+17*l1**9*l2**2*l3**3 - 1.68025965458437e+17*l1**9*l2**2*l3**2 + 384209755666871.0*l1**9*l2**2*l3 + 715839105486221.0*l1**9*l2**2 + 4.68354077758556e+20*l1**9*l2*l3**7 + 2.45484771914884e+19*l1**9*l2*l3**6 + 1.17899914856083e+19*l1**9*l2*l3**5 - 1.47056178899464e+17*l1**9*l2*l3**4 - 6.8633169588052e+16*l1**9*l2*l3**3 + 54330306672844.1*l1**9*l2*l3**2 + 459686853852791.0*l1**9*l2*l3 - 210825211763.099*l1**9*l2 + 7.21536512533827e+19*l1**9*l3**8 + 4.67590041742636e+18*l1**9*l3**7 + 5.24752008522266e+17*l1**9*l3**6 - 3.6238009108771e+16*l1**9*l3**5 - 9.86273411151814e+15*l1**9*l3**4 - 47361854295239.3*l1**9*l3**3 + 154589798552132.0*l1**9*l3**2 + 811300591695.499*l1**9*l3 - 129343977961.554*l1**9 - 5.24624169583991e+21*l1**8*l2**9 + 1.26397086121091e+21*l1**8*l2**8*l3 - 1.18474767286022e+19*l1**8*l2**8 - 9.81241934181011e+21*l1**8*l2**7*l3**2 - 2.30404993068684e+20*l1**8*l2**7*l3 - 1.1522431231756e+20*l1**8*l2**7 - 1.66501990435211e+21*l1**8*l2**6*l3**3 - 1.24547421045051e+19*l1**8*l2**6*l3**2 - 3.73774070585144e+17*l1**8*l2**6*l3 - 1.11729964858591e+17*l1**8*l2**6 - 6.74333338960309e+21*l1**8*l2**5*l3**4 - 2.84617932614257e+20*l1**8*l2**5*l3**3 - 1.21542167966259e+20*l1**8*l2**5*l3**2 + 1.2439690249416e+18*l1**8*l2**5*l3 + 5.0335219810234e+17*l1**8*l2**5 - 3.29925238972273e+21*l1**8*l2**4*l3**5 - 8.44756421001218e+19*l1**8*l2**4*l3**4 - 4.37439108083204e+19*l1**8*l2**4*l3**3 + 2.61965736815341e+17*l1**8*l2**4*l3**2 + 1.38028368734875e+17*l1**8*l2**4*l3 + 893969763492943.0*l1**8*l2**4 - 2.24037252438007e+21*l1**8*l2**3*l3**6 - 1.36027444202248e+20*l1**8*l2**3*l3**5 - 5.18171323449478e+19*l1**8*l2**3*l3**4 + 9.67905533614916e+17*l1**8*l2**3*l3**3 + 3.48277382391178e+17*l1**8*l2**3*l3**2 - 2.08770108858518e+15*l1**8*l2**3*l3 - 1.73669252389285e+15*l1**8*l2**3 - 1.54493548215005e+21*l1**8*l2**2*l3**7 - 3.47649757873579e+19*l1**8*l2**2*l3**6 - 2.64348874697032e+19*l1**8*l2**2*l3**5 + 3.26142081978939e+17*l1**8*l2**2*l3**4 + 1.5470800238256e+17*l1**8*l2**2*l3**3 - 491630220708067.0*l1**8*l2**2*l3**2 - 1.22097687418451e+15*l1**8*l2**2*l3 + 1724625526940.63*l1**8*l2**2 - 1.63872060924414e+20*l1**8*l2*l3**8 - 2.784911278026e+19*l1**8*l2*l3**7 - 8.58719469668894e+18*l1**8*l2*l3**6 + 2.57547037640143e+17*l1**8*l2*l3**5 + 7.55877673377551e+16*l1**8*l2*l3**4 - 889717826448979.0*l1**8*l2*l3**3 - 855361794685343.0*l1**8*l2*l3**2 - 2324347959688.17*l1**8*l2*l3 + 720054792214.688*l1**8*l2 - 5.52034387752107e+19*l1**8*l3**9 - 1.69802229312974e+18*l1**8*l3**8 + 5.83522885418147e+17*l1**8*l3**7 + 3.303503669077e+16*l1**8*l3**6 + 2.64670643196554e+15*l1**8*l3**5 + 8503874087922.86*l1**8*l3**4 - 180836108999991.0*l1**8*l3**3 - 984290207418.969*l1**8*l3**2 + 435382003690.239*l1**8*l3 - 1298850862.35886*l1**8 + 4.55605960516938e+21*l1**7*l2**10 - 3.33935575861832e+21*l1**7*l2**9*l3 + 3.76960089534284e+19*l1**7*l2**9 + 1.07205648924521e+22*l1**7*l2**8*l3**2 + 2.24804227053987e+20*l1**7*l2**8*l3 + 1.20911617048858e+20*l1**7*l2**8 - 2.5058876758923e+21*l1**7*l2**7*l3**3 - 3.78747933811535e+19*l1**7*l2**7*l3**2 - 4.6227873904705e+19*l1**7*l2**7*l3 - 1.09415408147773e+17*l1**7*l2**7 + 8.32135368826249e+21*l1**7*l2**6*l3**4 + 3.63606268048046e+20*l1**7*l2**6*l3**3 + 1.57601183376267e+20*l1**7*l2**6*l3**2 - 1.70744147176243e+18*l1**7*l2**6*l3 - 6.73823684975541e+17*l1**7*l2**6 + 8.70711899902024e+20*l1**7*l2**5*l3**5 + 5.02521768390468e+19*l1**7*l2**5*l3**4 + 8.5921218517615e+18*l1**7*l2**5*l3**3 - 1.43081687577857e+17*l1**7*l2**5*l3**2 + 1.36716297976135e+16*l1**7*l2**5*l3 - 367934285537481.0*l1**7*l2**5 + 3.85824552934983e+21*l1**7*l2**4*l3**6 + 1.66352033674086e+20*l1**7*l2**4*l3**5 + 7.39842288653862e+19*l1**7*l2**4*l3**4 - 1.65806426193164e+18*l1**7*l2**4*l3**3 - 5.70494241127968e+17*l1**7*l2**4*l3**2 + 4.73949249166928e+15*l1**7*l2**4*l3 + 3.00146855538342e+15*l1**7*l2**4 + 4.43730960125512e+20*l1**7*l2**3*l3**7 + 9.24177964856034e+19*l1**7*l2**3*l3**6 + 2.28521867898541e+19*l1**7*l2**3*l3**5 - 6.50881402314957e+17*l1**7*l2**3*l3**4 - 1.6497520937226e+17*l1**7*l2**3*l3**3 + 1.3974699751154e+15*l1**7*l2**3*l3**2 + 1.86013822053033e+15*l1**7*l2**3*l3 - 4129162361817.26*l1**7*l2**3 + 1.3939049565632e+21*l1**7*l2**2*l3**8 + 1.8442301352261e+19*l1**7*l2**2*l3**7 + 2.33528360170371e+19*l1**7*l2**2*l3**6 - 4.2082849287605e+17*l1**7*l2**2*l3**5 - 1.81619273484449e+17*l1**7*l2**2*l3**4 + 2.43919455088601e+15*l1**7*l2**2*l3**3 + 2.11655964247515e+15*l1**7*l2**2*l3**2 + 6857842959779.66*l1**7*l2**2*l3 - 1765100197294.1*l1**7*l2**2 - 1.45664054155035e+20*l1**7*l2*l3**9 + 2.48785407503656e+19*l1**7*l2*l3**8 + 2.88358979190803e+18*l1**7*l2*l3**7 - 2.71013549412176e+17*l1**7*l2*l3**6 - 4.64180865476907e+16*l1**7*l2*l3**5 + 1.40597384920332e+15*l1**7*l2*l3**4 + 863841210923742.0*l1**7*l2*l3**3 - 2737715964180.83*l1**7*l2*l3**2 - 2120428922074.57*l1**7*l2*l3 + 16471353158.2102*l1**7*l2 + 5.77229210027062e+19*l1**7*l3**10 - 1.50935314944866e+18*l1**7*l3**9 - 9.55661289764318e+17*l1**7*l3**8 - 1.40276164292017e+16*l1**7*l3**7 + 4.26157867618413e+15*l1**7*l3**6 + 48755544770760.4*l1**7*l3**5 + 147797578971131.0*l1**7*l3**4 + 1251398221250.97*l1**7*l3**3 - 723817512807.944*l1**7*l3**2 - 12751187725.2811*l1**7*l3 - 494443643.750355*l1**7 - 3.32306033969879e+21*l1**6*l2**11 + 4.32478796017064e+21*l1**6*l2**10*l3 - 5.046878104956e+19*l1**6*l2**10 - 1.04639066815244e+22*l1**6*l2**9*l3**2 - 1.58313610750599e+20*l1**6*l2**9*l3 - 1.0344114963312e+20*l1**6*l2**9 + 6.55299938205732e+21*l1**6*l2**8*l3**3 + 7.91688389425502e+19*l1**6*l2**8*l3**2 + 8.67455395884542e+19*l1**6*l2**8*l3 + 3.23476834857391e+17*l1**6*l2**8 - 9.37339601898269e+21*l1**6*l2**7*l3**4 - 3.91620412607744e+20*l1**6*l2**7*l3**3 - 1.796334042791e+20*l1**6*l2**7*l3**2 + 1.66535862247482e+18*l1**6*l2**7*l3 + 7.13608858485956e+17*l1**6*l2**7 + 1.60089641855604e+21*l1**6*l2**6*l3**5 + 9.60278666608224e+19*l1**6*l2**6*l3**4 + 5.08018989605831e+19*l1**6*l2**6*l3**3 - 2.72977415712264e+17*l1**6*l2**6*l3**2 - 2.82935945502972e+17*l1**6*l2**6*l3 - 665959639510495.0*l1**6*l2**6 - 2.92671166082053e+21*l1**6*l2**5*l3**6 - 2.78986223140915e+20*l1**6*l2**5*l3**5 - 8.72458668712779e+19*l1**6*l2**5*l3**4 + 2.39591688610765e+18*l1**6*l2**5*l3**3 + 7.77928404968449e+17*l1**6*l2**5*l3**2 - 6.90861817464632e+15*l1**6*l2**5*l3 - 3.8731958924673e+15*l1**6*l2**5 - 1.74752080721108e+21*l1**6*l2**4*l3**7 + 2.1227212630287e+19*l1**6*l2**4*l3**6 - 1.61880366782563e+19*l1**6*l2**4*l3**5 + 1.08012646504853e+17*l1**6*l2**4*l3**4 + 3.28338535516524e+16*l1**6*l2**4*l3**3 - 797415359786308.0*l1**6*l2**4*l3**2 - 1.61586537491088e+15*l1**6*l2**4*l3 + 4265257565482.2*l1**6*l2**4 + 3.88264590109823e+20*l1**6*l2**3*l3**8 - 1.16677468063074e+20*l1**6*l2**3*l3**7 - 1.45508876412664e+19*l1**6*l2**3*l3**6 + 1.18617524525329e+18*l1**6*l2**3*l3**5 + 2.45678743786905e+17*l1**6*l2**3*l3**4 - 6.11641143773887e+15*l1**6*l2**3*l3**3 - 3.33035249261339e+15*l1**6*l2**3*l3**2 - 15479841173705.5*l1**6*l2**3*l3 + 2435032755221.88*l1**6*l2**3 - 1.2016164861167e+21*l1**6*l2**2*l3**9 + 1.61370136832284e+19*l1**6*l2**2*l3**8 - 1.70643715456348e+19*l1**6*l2**2*l3**7 + 5.49882564024706e+16*l1**6*l2**2*l3**6 + 1.05310377991011e+17*l1**6*l2**2*l3**5 - 1.16201896130602e+15*l1**6*l2**2*l3**4 - 1.5832511792651e+15*l1**6*l2**2*l3**3 + 11265972253590.6*l1**6*l2**2*l3**2 + 4733918687849.68*l1**6*l2**2*l3 - 47502664131.505*l1**6*l2**2 + 3.27847854430989e+20*l1**6*l2*l3**10 - 2.16604210513133e+19*l1**6*l2*l3**9 + 1.97487507678531e+18*l1**6*l2*l3**8 + 2.78307909955361e+17*l1**6*l2*l3**7 + 1.67222520479842e+16*l1**6*l2*l3**6 - 2.09585063458611e+15*l1**6*l2*l3**5 - 715221196461978.0*l1**6*l2*l3**4 + 2925199813213.02*l1**6*l2*l3**3 + 2880047649912.61*l1**6*l2*l3**2 + 57739533150.3926*l1**6*l2*l3 + 5309643084.18287*l1**6*l2 - 6.45534029850251e+19*l1**6*l3**11 + 3.27313029219846e+18*l1**6*l3**10 + 5.14769576562361e+17*l1**6*l3**9 - 1.22741643755515e+16*l1**6*l3**8 - 6.65968435615806e+15*l1**6*l3**7 - 9673156775012.94*l1**6*l3**6 - 63782770552188.9*l1**6*l3**5 + 67200536249.489*l1**6*l3**4 + 729091195684.681*l1**6*l3**3 + 89797096.6569033*l1**6*l3**2 - 1572947227.02568*l1**6*l3 + 41338552.6357069*l1**6 + 2.01520288240847e+21*l1**5*l2**12 - 3.93505054371013e+21*l1**5*l2**11*l3 + 4.63533010498105e+19*l1**5*l2**11 + 8.90379068531806e+21*l1**5*l2**10*l3**2 + 7.06129726272814e+19*l1**5*l2**10*l3 + 7.15672292704455e+19*l1**5*l2**10 - 8.90519768702737e+21*l1**5*l2**9*l3**3 - 6.91689445571938e+19*l1**5*l2**9*l3**2 - 9.77879001678618e+19*l1**5*l2**9*l3 - 4.14375789318618e+17*l1**5*l2**9 + 1.073246093322e+22*l1**5*l2**8*l3**4 + 3.15159126296609e+20*l1**5*l2**8*l3**3 + 1.76136502008851e+20*l1**5*l2**8*l3**2 - 1.1017289943495e+18*l1**5*l2**8*l3 - 5.97802301637393e+17*l1**5*l2**8 - 6.15259631667458e+21*l1**5*l2**7*l3**5 - 1.71550534726401e+20*l1**5*l2**7*l3**4 - 1.09297098724589e+20*l1**5*l2**7*l3**3 + 5.10044133365773e+17*l1**5*l2**7*l3**2 + 5.11613370894903e+17*l1**5*l2**7*l3 + 1.62721630672413e+15*l1**5*l2**7 + 5.85342332164106e+21*l1**5*l2**6*l3**6 + 2.48661633669077e+20*l1**5*l2**6*l3**5 + 1.11119703063979e+20*l1**5*l2**6*l3**4 - 2.36842275790641e+18*l1**5*l2**6*l3**3 - 9.20035737117981e+17*l1**5*l2**6*l3**2 + 6.54096735157844e+15*l1**5*l2**6*l3 + 3.80995172016605e+15*l1**5*l2**6 - 2.50860999498902e+21*l1**5*l2**5*l3**7 - 16384.0*l1**5*l2**5*l3**6 - 2.82872520320255e+19*l1**5*l2**5*l3**5 + 2.71013549412176e+17*l1**5*l2**5*l3**4 + 2.83626952112099e+17*l1**5*l2**5*l3**3 - 264470484134419.0*l1**5*l2**5*l3**2 + 381994936348629.0*l1**5*l2**5*l3 + 379485381173.59*l1**5*l2**5 + 2.89368414701237e+21*l1**5*l2**4*l3**8 + 1.81947536831032e+19*l1**5*l2**4*l3**7 + 3.87002589028966e+19*l1**5*l2**4*l3**6 - 9.54439021842882e+17*l1**5*l2**4*l3**5 - 3.91657591690137e+17*l1**5*l2**4*l3**4 + 7.27612726647941e+15*l1**5*l2**4*l3**3 + 3.97608427351149e+15*l1**5*l2**4*l3**2 + 25042646895124.2*l1**5*l2**4*l3 - 1781287432278.39*l1**5*l2**4 - 1.49358168292005e+21*l1**5*l2**3*l3**9 + 6.93133473642026e+19*l1**5*l2**3*l3**8 - 1.01584605365929e+19*l1**5*l2**3*l3**7 - 5.42027098824353e+17*l1**5*l2**3*l3**6 + 2.18932880701207e+16*l1**5*l2**3*l3**5 + 2.53628044672314e+15*l1**5*l2**3*l3**4 + 1.29983669538166e+15*l1**5*l2**3*l3**3 - 26740166323409.7*l1**5*l2**3*l3**2 - 7124575599923.6*l1**5*l2**3*l3 + 54070680344.1245*l1**5*l2**3 + 1.1535932961961e+21*l1**5*l2**2*l3**10 - 2.31766505249052e+19*l1**5*l2**2*l3**9 + 1.66469786864502e+19*l1**5*l2**2*l3**8 + 4.71327912021176e+16*l1**5*l2**2*l3**7 - 1.14849394645796e+17*l1**5*l2**2*l3**6 + 1.26686464224838e+15*l1**5*l2**2*l3**5 + 1.40534140763805e+15*l1**5*l2**2*l3**4 + 3205296165983.94*l1**5*l2**2*l3**3 - 4087798140334.72*l1**5*l2**2*l3**2 - 171871643001.324*l1**5*l2**2*l3 - 20881883577.9793*l1**5*l2**2 - 3.79643619110198e+20*l1**5*l2*l3**11 + 1.4729086314893e+19*l1**5*l2*l3**10 - 5.16647506451925e+18*l1**5*l2*l3**9 - 2.03260162059132e+17*l1**5*l2*l3**8 + 2.01961067451191e+16*l1**5*l2*l3**7 + 1.80112053182216e+15*l1**5*l2*l3**6 + 280714537930533.0*l1**5*l2*l3**5 - 11560751076466.5*l1**5*l2*l3**4 - 2903279652216.37*l1**5*l2*l3**3 + 59227599323.5642*l1**5*l2*l3**2 + 8475368742.73561*l1**5*l2*l3 - 214580986.218871*l1**5*l2 + 6.23091941182651e+19*l1**5*l3**12 - 2.88805614017511e+18*l1**5*l3**11 + 2.2246799904374e+17*l1**5*l3**10 + 2.20233577938466e+16*l1**5*l3**9 + 2.46334052628223e+15*l1**5*l3**8 - 8291277235725.25*l1**5*l3**7 + 16729717979805.2*l1**5*l3**6 - 240566625565.393*l1**5*l3**5 - 382440979071.341*l1**5*l3**4 - 8774459159.04654*l1**5*l3**3 + 928014485.183408*l1**5*l3**2 + 134057152.642251*l1**5*l3 + 14809413.5869989*l1**5 - 9.97726716488485e+20*l1**4*l2**13 + 2.68364491596603e+21*l1**4*l2**12*l3 - 3.16181979512921e+19*l1**4*l2**12 - 6.29071698447244e+21*l1**4*l2**11*l3**2 - 7.94215438548155e+18*l1**4*l2**11*l3 - 3.93454329246359e+19*l1**4*l2**11 + 8.29008904063106e+21*l1**4*l2**10*l3**3 + 2.87000578929901e+19*l1**4*l2**10*l3**2 + 7.73272691870872e+19*l1**4*l2**10*l3 + 3.54548005248072e+17*l1**4*l2**10 - 9.92785754345536e+21*l1**4*l2**9*l3**4 - 2.00479230397155e+20*l1**4*l2**9*l3**3 - 1.40757599376671e+20*l1**4*l2**9*l3**2 + 4.22698841733278e+17*l1**4*l2**9*l3 + 3.91331214376588e+17*l1**4*l2**9 + 7.43082034322747e+21*l1**4*l2**8*l3**5 + 2.1985327367083e+20*l1**4*l2**8*l3**4 + 1.2769963512962e+20*l1**4*l2**8*l3**3 - 4.55546843538325e+17*l1**4*l2**8*l3**2 - 5.45614209533623e+17*l1**4*l2**8*l3 - 1.8967182496939e+15*l1**4*l2**8 - 5.12716359722882e+21*l1**4*l2**7*l3**6 - 3.11910063138912e+20*l1**4*l2**7*l3**5 - 1.15411268910494e+20*l1**4*l2**7*l3**4 + 2.01997676580504e+18*l1**4*l2**7*l3**3 + 8.89925872969905e+17*l1**4*l2**7*l3**2 - 4.04926137819949e+15*l1**4*l2**7*l3 - 2.85814028263332e+15*l1**4*l2**7 + 1.14349744182574e+21*l1**4*l2**6*l3**7 + 2.07218028057564e+20*l1**4*l2**6*l3**6 + 4.87016861826338e+19*l1**4*l2**6*l3**5 - 1.31579042105912e+18*l1**4*l2**6*l3**4 - 5.39134302314818e+17*l1**4*l2**6*l3**3 + 1.78492773824641e+15*l1**4*l2**6*l3**2 + 907491216042442.0*l1**4*l2**6*l3 - 7459824234081.78*l1**4*l2**6 + 5.44194937438765e+20*l1**4*l2**5*l3**8 - 1.99275873672082e+20*l1**4*l2**5*l3**7 - 2.30341854358518e+19*l1**4*l2**5*l3**6 + 1.88531164808471e+18*l1**4*l2**5*l3**5 + 4.39534603885947e+17*l1**4*l2**5*l3**4 - 8.77989566769386e+15*l1**4*l2**5*l3**3 - 3.75934399535332e+15*l1**4*l2**5*l3**2 - 24564901906325.4*l1**4*l2**5*l3 + 131861346350.027*l1**4*l2**5 - 1.83291799429041e+21*l1**4*l2**4*l3**9 + 1.03970021046304e+20*l1**4*l2**4*l3**8 - 1.19475437636375e+19*l1**4*l2**4*l3**7 - 7.95365851535735e+17*l1**4*l2**4*l3**6 - 4.46868783192156e+16*l1**4*l2**4*l3**5 + 2.39260040744593e+15*l1**4*l2**4*l3**4 - 281139040139944.0*l1**4*l2**4*l3**3 + 36042640555660.2*l1**4*l2**4*l3**2 + 8291708274053.58*l1**4*l2**4*l3 + 6686676661.77355*l1**4*l2**4 + 1.42957750428646e+21*l1**4*l2**3*l3**10 - 7.5570802334582e+19*l1**4*l2**3*l3**9 + 1.30288664107925e+19*l1**4*l2**3*l3**8 + 8.47268032323782e+17*l1**4*l2**3*l3**7 + 5.43744187213598e+15*l1**4*l2**3*l3**6 - 5.49846325401643e+15*l1**4*l2**3*l3**5 - 1.36807114155648e+15*l1**4*l2**3*l3**4 - 14648587481611.0*l1**4*l2**3*l3**3 + 2238025986480.96*l1**4*l2**3*l3**2 + 303001060433.742*l1**4*l2**3*l3 + 43492805777.6199*l1**4*l2**3 - 8.98382933518573e+20*l1**4*l2**2*l3**11 + 2.61730087703369e+19*l1**4*l2**2*l3**10 - 1.37861049686976e+19*l1**4*l2**2*l3**9 - 2.63017808047531e+17*l1**4*l2**2*l3**8 + 8.07103977877071e+16*l1**4*l2**2*l3**7 + 1.05572053520698e+15*l1**4*l2**2*l3**6 - 562362075469031.0*l1**4*l2**2*l3**5 + 16602485426344.1*l1**4*l2**2*l3**4 + 4156559204177.93*l1**4*l2**2*l3**3 - 188285269454.54*l1**4*l2**2*l3**2 - 23662025439.5741*l1**4*l2**2*l3 + 361046100.508353*l1**4*l2**2 + 3.03101903315386e+20*l1**4*l2*l3**12 - 8.66416842052532e+18*l1**4*l2*l3**11 + 5.43584631771772e+18*l1**4*l2*l3**10 + 1.43081687577857e+17*l1**4*l2*l3**9 - 3.40476963022608e+16*l1**4*l2*l3**8 - 1.4970361675615e+15*l1**4*l2*l3**7 - 39095874040725.5*l1**4*l2*l3**6 + 6640994170537.64*l1**4*l2*l3**5 + 1608867372091.85*l1**4*l2*l3**4 + 34828445346.2154*l1**4*l2*l3**3 + 35716272.8366294*l1**4*l2*l3**2 - 731366192.969533*l1**4*l2*l3 - 84275249.4196067*l1**4*l2 - 4.64629402180949e+19*l1**4*l3**13 + 1.59444766072167e+18*l1**4*l3**12 - 6.06270879693848e+17*l1**4*l3**11 - 1.81190045543855e+16*l1**4*l3**10 + 2.43712686378457e+15*l1**4*l3**9 + 30472215481725.5*l1**4*l3**8 - 5539120887776.59*l1**4*l3**7 + 866830446579.527*l1**4*l3**6 + 42430141927.1733*l1**4*l3**5 - 13678022044.347*l1**4*l3**4 - 1218905186.67639*l1**4*l3**3 + 152860970.808674*l1**4*l3**2 + 13058847.6706446*l1**4*l3 + 424599.927904912*l1**4 + 3.91435633435894e+20*l1**3*l2**14 - 1.38137123958205e+21*l1**3*l2**13*l3 + 1.61731143849806e+19*l1**3*l2**13 + 3.56003933373645e+21*l1**3*l2**12*l3**2 - 1.54751674844383e+19*l1**3*l2**12*l3 + 1.66562955705057e+19*l1**3*l2**12 - 5.83007203011451e+21*l1**3*l2**11*l3**3 + 1.01081964906129e+19*l1**3*l2**11*l3**2 - 4.39458293103517e+19*l1**3*l2**11*l3 - 2.17802124424071e+17*l1**3*l2**11 + 7.86077867258602e+21*l1**3*l2**10*l3**4 + 7.09017782412989e+19*l1**3*l2**10*l3**3 + 8.70983339767148e+19*l1**3*l2**10*l3**2 + 1.52433431863991e+16*l1**3*l2**10*l3 - 1.94910152737745e+17*l1**3*l2**10 - 7.94228603476429e+21*l1**3*l2**9*l3**5 - 1.22261043267413e+20*l1**3*l2**9*l3**4 - 1.02433812469511e+20*l1**3*l2**9*l3**3 + 9.7725727790105e+16*l1**3*l2**9*l3**2 + 3.93524629242251e+17*l1**3*l2**9*l3 + 1.46219390567349e+15*l1**3*l2**9 + 7.15497395547997e+21*l1**3*l2**8*l3**6 + 1.75882618936664e+20*l1**3*l2**8*l3**5 + 1.01109494507776e+20*l1**3*l2**8*l3**4 - 1.00016905140208e+18*l1**3*l2**8*l3**3 - 6.75900156673183e+17*l1**3*l2**8*l3**2 + 1.11967675490991e+15*l1**3*l2**8*l3 + 1.60828302009996e+15*l1**3*l2**8 - 5.35622629656153e+21*l1**3*l2**7*l3**7 - 1.14367023150934e+20*l1**3*l2**7*l3**6 - 6.8174338861132e+19*l1**3*l2**7*l3**5 + 9.53877917185715e+17*l1**3*l2**7*l3**4 + 6.13662524484697e+17*l1**3*l2**7*l3**3 - 976528207763192.0*l1**3*l2**7*l3**2 - 1.35940220206359e+15*l1**3*l2**7*l3 + 11145124230419.3*l1**3*l2**7 + 4.16067684413124e+21*l1**3*l2**6*l3**8 + 5.48730666633271e+19*l1**3*l2**6*l3**7 + 4.58993463975121e+19*l1**3*l2**6*l3**6 - 1.05263233684729e+18*l1**3*l2**6*l3**5 - 5.25880299456158e+17*l1**3*l2**6*l3**4 + 5.33688964634589e+15*l1**3*l2**6*l3**3 + 2.9821538234693e+15*l1**3*l2**6*l3**2 + 11768564499490.1*l1**3*l2**6*l3 + 1121483265302.29*l1**3*l2**6 - 2.99703706204582e+21*l1**3*l2**5*l3**9 + 2.51260884195234e+19*l1**3*l2**5*l3**8 - 2.60078331396588e+19*l1**3*l2**5*l3**7 + 1.80675699608117e+17*l1**3*l2**5*l3**6 + 2.77119401986312e+17*l1**3*l2**5*l3**5 - 1.73195568924039e+15*l1**3*l2**5*l3**4 - 1.02109000958799e+15*l1**3*l2**5*l3**3 - 31158460404216.3*l1**3*l2**5*l3**2 - 7917422650697.63*l1**3*l2**5*l3 - 99161651022.5581*l1**3*l2**5 + 2.20665590885755e+21*l1**3*l2**4*l3**10 - 3.75447298222764e+19*l1**3*l2**4*l3**9 + 2.22801021876793e+19*l1**3*l2**4*l3**8 + 6.17215122884873e+16*l1**3*l2**4*l3**7 - 1.99014826858053e+17*l1**3*l2**4*l3**6 + 1.91408032595675e+15*l1**3*l2**4*l3**5 + 1.60616076394704e+15*l1**3*l2**4*l3**4 + 29195527093264.9*l1**3*l2**4*l3**3 + 2238492732286.94*l1**3*l2**4*l3**2 - 291494203904.992*l1**3*l2**4*l3 - 55827561188.1036*l1**3*l2**4 - 1.30254623153966e+21*l1**3*l2**3*l3**11 + 3.25387658459729e+19*l1**3*l2**3*l3**10 - 1.59339421186967e+19*l1**3*l2**3*l3**9 - 3.25440701157479e+17*l1**3*l2**3*l3**8 + 1.12225985354951e+17*l1**3*l2**3*l3**7 + 1.69085363114876e+15*l1**3*l2**3*l3**6 - 228751532791147.0*l1**3*l2**3*l3**5 - 28709876159024.9*l1**3*l2**3*l3**4 - 5306701727066.18*l1**3*l2**3*l3**3 + 297895454080.963*l1**3*l2**3*l3**2 + 49081766079.7185*l1**3*l2**3*l3 - 38588334.3324546*l1**3*l2**3 + 6.18438961845702e+20*l1**3*l2**2*l3**12 - 1.19854329817267e+19*l1**3*l2**2*l3**11 + 1.03912593140448e+19*l1**3*l2**2*l3**10 + 1.44952036435084e+17*l1**3*l2**2*l3**9 - 8.2993285764487e+16*l1**3*l2**2*l3**8 - 664010835032014.0*l1**3*l2**2*l3**7 + 494554447002339.0*l1**3*l2**2*l3**6 + 4743567264669.72*l1**3*l2**2*l3**5 - 426211598106.73*l1**3*l2**2*l3**4 - 149807213537.055*l1**3*l2**2*l3**3 - 21305566251.1142*l1**3*l2**2*l3**2 + 1497909596.79462*l1**3*l2**2*l3 + 214763483.374839*l1**3*l2**2 - 1.86925525334752e+20*l1**3*l2*l3**13 + 2.88805614017511e+18*l1**3*l2*l3**12 - 3.90164124630537e+18*l1**3*l2*l3**11 - 5.88224715597857e+16*l1**3*l2*l3**10 + 3.40959929150402e+16*l1**3*l2*l3**9 + 702986924601663.0*l1**3*l2*l3**8 - 126029869547331.0*l1**3*l2*l3**7 - 7422553348430.84*l1**3*l2*l3**6 - 1005226650158.45*l1**3*l2*l3**5 + 83536956204.256*l1**3*l2*l3**4 + 12606870557.8676*l1**3*l2*l3**3 - 697148360.59186*l1**3*l2*l3**2 - 95020288.3718481*l1**3*l2*l3 - 389147.89508955*l1**3*l2 + 2.64091987502581e+19*l1**3*l3**14 - 4.57275555527726e+17*l1**3*l3**13 + 5.31019715606107e+17*l1**3*l3**12 + 6.82677332887816e+15*l1**3*l3**11 - 4.38193037062142e+15*l1**3*l3**10 + 3779499594632.34*l1**3*l3**9 + 18318116029428.1*l1**3*l3**8 - 246213729451.904*l1**3*l3**7 + 137355297256.647*l1**3*l3**6 - 1706144836.48127*l1**3*l3**5 - 1789261006.47087*l1**3*l3**4 + 86130867.7917772*l1**3*l3**3 + 25569742.6337316*l1**3*l3**2 + 1253188.13672906*l1**3*l3 + 185526.227273717*l1**3 - 1.15408521784635e+20*l1**2*l2**15 + 5.16903864810132e+20*l1**2*l2**14*l3 - 5.98755924775589e+18*l1**2*l2**14 - 1.50294935219523e+21*l1**2*l2**13*l3**2 + 1.2996252630788e+19*l1**2*l2**13*l3 - 5.13264765114763e+18*l1**2*l2**13 + 2.91118527353702e+21*l1**2*l2**12*l3**3 - 1.87001635076338e+19*l1**2*l2**12*l3**2 + 1.73844375566447e+19*l1**2*l2**12*l3 + 9.43357204863813e+16*l1**2*l2**12 - 4.37255402258589e+21*l1**2*l2**11*l3**4 - 6.93133473642026e+18*l1**2*l2**11*l3**3 - 3.87843953320727e+19*l1**2*l2**11*l3**2 - 1.1895418731963e+17*l1**2*l2**11*l3 + 7.00938579862046e+16*l1**2*l2**11 + 4.97405342437864e+21*l1**2*l2**10*l3**5 + 5.31763336809742e+19*l1**2*l2**10*l3**4 + 5.53262056927996e+19*l1**2*l2**10*l3**3 + 9.67905533614915e+16*l1**2*l2**10*l3**2 - 1.91373831872289e+17*l1**2*l2**10*l3 - 760777835590842.0*l1**2*l2**10 - 4.45259884351369e+21*l1**2*l2**9*l3**6 - 1.20287538238293e+20*l1**2*l2**9*l3**5 - 6.09196984784117e+19*l1**2*l2**9*l3**4 + 3.42273840872521e+17*l1**2*l2**9*l3**3 + 3.67561342111803e+17*l1**2*l2**9*l3**2 + 293737984120351.0*l1**2*l2**9*l3 - 649110351574482.0*l1**2*l2**9 + 2.80842830659599e+21*l1**2*l2**8*l3**7 + 1.57579563148304e+20*l1**2*l2**8*l3**6 + 4.79895915050733e+19*l1**2*l2**8*l3**5 - 7.5012678855156e+17*l1**2*l2**8*l3**4 - 4.26487415162772e+17*l1**2*l2**8*l3**3 + 133404524754300.0*l1**2*l2**8*l3**2 + 957657511529948.0*l1**2*l2**8*l3 - 8922988851076.96*l1**2*l2**8 - 9.39707878459614e+20*l1**2*l2**7*l3**8 - 1.67837319689033e+20*l1**2*l2**7*l3**7 - 2.85523013989951e+19*l1**2*l2**7*l3**6 + 1.21198605948302e+18*l1**2*l2**7*l3**5 + 3.8749096783621e+17*l1**2*l2**7*l3**4 - 3.14218147548767e+15*l1**2*l2**7*l3**3 - 1.78619587695555e+15*l1**2*l2**7*l3**2 + 1599259820660.08*l1**2*l2**7*l3 - 1179348051801.66*l1**2*l2**7 - 5.55006634784036e+20*l1**2*l2**6*l3**9 + 1.36352350518017e+20*l1**2*l2**6*l3**8 + 7.75333258201061e+18*l1**2*l2**6*l3**7 - 1.18421137895321e+18*l1**2*l2**6*l3**6 - 2.21622285650653e+17*l1**2*l2**6*l3**5 + 4.00266723475942e+15*l1**2*l2**6*l3**4 + 1.21686880532198e+15*l1**2*l2**6*l3**3 + 14054512152750.1*l1**2*l2**6*l3**2 + 5764455754043.13*l1**2*l2**6*l3 + 131020378100.763*l1**2*l2**6 + 1.16886667001433e+21*l1**2*l2**5*l3**10 - 9.48726442047523e+19*l1**2*l2**5*l3**9 + 4.79362259304541e+18*l1**2*l2**5*l3**8 + 1.02682152261756e+18*l1**2*l2**5*l3**7 + 7.72093208790341e+16*l1**2*l2**5*l3**6 - 5.26793740061631e+15*l1**2*l2**5*l3**5 - 1.10378158367778e+15*l1**2*l2**5*l3**4 - 26577529731478.1*l1**2*l2**5*l3**3 - 4996469747462.0*l1**2*l2**5*l3**2 + 126229061586.284*l1**2*l2**5*l3 + 45092248331.253*l1**2*l2**5 - 1.10299060314175e+21*l1**2*l2**4*l3**11 + 4.9710666312764e+19*l1**2*l2**4*l3**10 - 1.05503977981215e+19*l1**2*l2**4*l3**9 - 6.21774098224364e+17*l1**2*l2**4*l3**8 + 3.36073762477029e+16*l1**2*l2**4*l3**7 + 3.63806363323971e+15*l1**2*l2**4*l3**6 + 179242143658631.0*l1**2*l2**4*l3**5 + 29367763761803.5*l1**2*l2**4*l3**4 + 5478593152682.71*l1**2*l2**4*l3**3 - 274663662360.154*l1**2*l2**4*l3**2 - 61606714300.074*l1**2*l2**4*l3 - 544223431.217994*l1**2*l2**4 + 6.82068762831483e+20*l1**2*l2**3*l3**12 - 2.02163929812258e+19*l1**2*l2**3*l3**11 + 9.21543187343872e+18*l1**2*l2**3*l3**10 + 3.22635177871639e+17*l1**2*l2**3*l3**9 - 6.10728652882935e+16*l1**2*l2**3*l3**8 - 2.62131918760237e+15*l1**2*l2**3*l3**7 - 18679574842990.6*l1**2*l2**3*l3**6 - 12204520919528.8*l1**2*l2**3*l3**5 - 1038805722499.71*l1**2*l2**3*l3**4 + 275856680930.025*l1**2*l2**3*l3**3 + 44615229499.074*l1**2*l2**3*l3**2 - 1492664994.44888*l1**2*l2**3*l3 - 285700420.018153*l1**2*l2**3 - 2.97673427422279e+20*l1**2*l2**2*l3**13 + 4.76529263128893e+18*l1**2*l2**2*l3**12 - 5.40636543548517e+18*l1**2*l2**2*l3**11 - 9.74919341829517e+16*l1**2*l2**2*l3**10 + 4.86996043928396e+16*l1**2*l2**2*l3**9 + 914697956582255.0*l1**2*l2**2*l3**8 - 197947535996849.0*l1**2*l2**2*l3**7 + 6298779675015.02*l1**2*l2**2*l3**6 + 648385791835.593*l1**2*l2**2*l3**5 - 172968450396.205*l1**2*l2**2*l3**4 - 20130496167.4871*l1**2*l2**2*l3**3 + 1430625186.21273*l1**2*l2**2*l3**2 + 205308170.427158*l1**2*l2**2*l3 - 1125395.92541787*l1**2*l2**2 + 8.23739553363315e+19*l1**2*l2*l3**14 - 4.33208421026266e+17*l1**2*l2*l3**13 + 1.88675813620528e+18*l1**2*l2*l3**12 + 1.85164536865462e+16*l1**2*l2*l3**11 - 2.01664338256242e+16*l1**2*l2*l3**10 - 296572608816327.0*l1**2*l2*l3**9 + 97792799298465.8*l1**2*l2*l3**8 + 1253657062805.58*l1**2*l2*l3**7 + 313552138364.42*l1**2*l2*l3**6 + 24360669507.3529*l1**2*l2*l3**5 - 312613735.159134*l1**2*l2*l3**4 - 553625340.301205*l1**2*l2*l3**3 - 73545262.2547023*l1**2*l2*l3**2 - 3230257.31559504*l1**2*l2*l3 - 491140.071853158*l1**2*l2 - 1.0771144664426e+19*l1**2*l3**15 + 5.15724310745562e+15*l1**2*l3**14 - 2.68037935012821e+17*l1**2*l3**13 - 701380821460085.0*l1**2*l3**12 + 3.01977283899073e+15*l1**2*l3**11 - 14208556288571.8*l1**2*l3**10 - 16782413557002.6*l1**2*l3**9 + 255813806058.975*l1**2*l3**8 - 109487366904.275*l1**2*l3**7 - 6119030729.33509*l1**2*l3**6 + 1275935413.46734*l1**2*l3**5 + 104892046.914739*l1**2*l3**4 - 2797026.44068661*l1**2*l3**3 - 784067.051335089*l1**2*l3**2 - 56986.5464759292*l1**2*l3 + 1267.80952380952*l1**2 + 2.31139995715999e+19*l1*l2**16 - 1.29196727131924e+20*l1*l2**15*l3 + 1.47153336666065e+18*l1*l2**15 + 4.34777633458962e+20*l1*l2**14*l3**2 - 5.18818656610028e+18*l1*l2**14*l3 + 1.03017060414577e+18*l1*l2**14 - 1.00196623479682e+21*l1*l2**13*l3**3 + 1.12634189466829e+19*l1*l2**13*l3**2 - 4.33571147161073e+18*l1*l2**13*l3 - 2.67927473797752e+16*l1*l2**13 + 1.78001966686823e+21*l1*l2**12*l3**4 - 1.24667756717559e+19*l1*l2**12*l3**3 + 1.1029766129526e+19*l1*l2**12*l3**2 + 6.9623736210271e+16*l1*l2**12*l3 - 1.63731235407023e+16*l1*l2**12 - 2.51628679378898e+21*l1*l2**11*l3**5 + 5.05409824530644e+18*l1*l2**11*l3**4 - 1.85258967026386e+19*l1*l2**11*l3**3 - 1.18393082662462e+17*l1*l2**11*l3**2 + 5.79921161251011e+16*l1*l2**11*l3 + 255352441362365.0*l1*l2**11 + 2.96793022843935e+21*l1*l2**10*l3**6 + 1.1480023157196e+19*l1*l2**10*l3**5 + 2.33906581451469e+19*l1*l2**10*l3**4 + 6.45270355743277e+16*l1*l2**10*l3**3 - 1.29039725169923e+17*l1*l2**10*l3**2 - 437075506246793.0*l1*l2**10*l3 + 170964780025457.0*l1*l2**10 - 2.98968762329269e+21*l1*l2**9*l3**7 - 2.30563148523979e+19*l1*l2**9*l3**6 - 2.23482864137358e+19*l1*l2**9*l3**5 + 4.88628638950524e+16*l1*l2**9*l3**4 + 1.86353455412083e+17*l1*l2**9*l3**3 + 567869814093549.0*l1*l2**9*l3**2 - 383678483519268.0*l1*l2**9*l3 + 4162292037951.47*l1*l2**9 + 2.68014122311302e+21*l1*l2**8*l3**8 + 2.26196682693e+19*l1*l2**8*l3**7 + 1.73234338551178e+19*l1*l2**8*l3**6 - 1.8221873741533e+17*l1*l2**8*l3**5 - 2.05903339846073e+17*l1*l2**8*l3**4 + 88936349836199.0*l1*l2**8*l3**3 + 750045631533584.0*l1*l2**8*l3**2 - 5655009831952.7*l1*l2**8*l3 + 574967370952.717*l1*l2**8 - 2.18053763151336e+21*l1*l2**7*l3**9 - 9.46869834528839e+18*l1*l2**7*l3**8 - 1.14784909479247e+19*l1*l2**7*l3**7 + 1.70014711121925e+17*l1*l2**7*l3**6 + 1.73797937814058e+17*l1*l2**7*l3**5 - 488264103881597.0*l1*l2**7*l3**4 - 805921111510252.0*l1*l2**7*l3**3 + 1531494574021.93*l1*l2**7*l3**2 - 2810613019956.53*l1*l2**7*l3 - 85435523390.7163*l1*l2**7 + 1.63553252203771e+21*l1*l2**6*l3**10 - 2.76772046766781e+18*l1*l2**6*l3**9 + 8.04035181114279e+18*l1*l2**6*l3**8 - 7.79935473463614e+16*l1*l2**6*l3**7 - 1.28096864349185e+17*l1*l2**6*l3**6 + 713971095298563.0*l1*l2**6*l3**5 + 814697526737314.0*l1*l2**6*l3**4 + 7661990553218.9*l1*l2**6*l3**3 + 4183482328870.46*l1*l2**6*l3**2 + 33237753919.7217*l1*l2**6*l3 - 21852569500.959*l1*l2**6 - 1.09999146791926e+21*l1*l2**5*l3**11 + 7.65334877146404e+18*l1*l2**5*l3**10 - 6.45198061469378e+18*l1*l2**5*l3**9 - 3.57704218944643e+16*l1*l2**5*l3**8 + 8.76535474500108e+16*l1*l2**5*l3**7 - 88156828044803.9*l1*l2**5*l3**6 - 555189021334527.0*l1*l2**5*l3**5 - 14298467040647.3*l1*l2**5*l3**4 - 4885980914160.34*l1*l2**5*l3**3 + 67501760372.6651*l1*l2**5*l3**2 + 45535510646.6314*l1*l2**5*l3 + 690688545.507477*l1*l2**5 + 6.39393825022315e+20*l1*l2**4*l3**12 - 4.98189684180206e+18*l1*l2**4*l3**11 + 5.37487960575866e+18*l1*l2**4*l3**10 + 5.82146081811869e+16*l1*l2**4*l3**9 - 6.33751720181316e+16*l1*l2**4*l3**8 - 227832959938944.0*l1*l2**4*l3**7 + 415608590832031.0*l1*l2**4*l3**6 + 11428608845522.2*l1*l2**4*l3**5 + 3111103306787.0*l1*l2**4*l3**4 - 154245755743.239*l1*l2**4*l3**3 - 50949479889.1892*l1*l2**4*l3**2 + 384774728.194553*l1*l2**4*l3 + 208668985.470149*l1*l2**4 - 3.00954374390641e+20*l1*l2**3*l3**13 + 1.39589380108464e+18*l1*l2**3*l3**12 - 3.65151541595834e+18*l1*l2**3*l3**11 - 3.54431108444495e+16*l1*l2**3*l3**10 + 4.19972816971451e+16*l1*l2**3*l3**9 + 349367493778850.0*l1*l2**3*l3**8 - 239287238706086.0*l1*l2**3*l3**7 - 7490318595068.97*l1*l2**3*l3**6 - 1890157175040.73*l1*l2**3*l3**5 + 137402386041.736*l1*l2**3*l3**4 + 35986704794.6165*l1*l2**3*l3**3 - 991101926.213895*l1*l2**3*l3**2 - 250171590.800596*l1*l2**3*l3 + 1965526.656554*l1*l2**3 + 1.07152524912614e+20*l1*l2**2*l3**14 + 2.88805614017511e+17*l1*l2**2*l3**13 + 1.81690114559439e+18*l1*l2**2*l3**12 + 5.33049424309664e+15*l1*l2**2*l3**11 - 2.2411939494483e+16*l1*l2**2*l3**10 - 109251160157348.0*l1*l2**2*l3**9 + 141190437770387.0*l1*l2**2*l3**8 + 2365007107671.06*l1*l2**2*l3**7 + 668358330259.531*l1*l2**2*l3**6 - 54532493984.0744*l1*l2**2*l3**5 - 15603822945.7885*l1*l2**2*l3**4 + 792574539.858195*l1*l2**2*l3**3 + 177880577.533824*l1*l2**2*l3**2 + 2379408.52802636*l1*l2**2*l3 + 444804.891240446*l1*l2**2 - 2.54077238493915e+19*l1*l2*l3**15 - 3.09434586447333e+17*l1*l2*l3**14 - 5.68642751749348e+17*l1*l2*l3**13 + 2.24441862867226e+15*l1*l2*l3**12 + 7.7481079135556e+15*l1*l2*l3**11 + 10866061334568.8*l1*l2*l3**10 - 53930608270756.6*l1*l2*l3**9 - 1111350044865.48*l1*l2*l3**8 - 259544323123.411*l1*l2*l3**7 + 22051601307.6038*l1*l2*l3**6 + 5901809098.65114*l1*l2*l3**5 - 348574180.29593*l1*l2*l3**4 - 66280999.0918247*l1*l2*l3**3 + 1474144.9924155*l1*l2*l3**2 + 91418.2036710431*l1*l2*l3 - 187.823633156966*l1*l2 + 2.95872947164765e+18*l1*l3**16 + 5.84487552178296e+16*l1*l3**15 + 7.84631518274452e+16*l1*l3**14 - 935174428613445.0*l1*l3**13 - 1.14021791655302e+15*l1*l3**12 + 15755788935124.6*l1*l3**11 + 8761841675829.04*l1*l3**10 + 18447206029.2716*l1*l3**9 + 57912038877.8131*l1*l3**8 - 2283411886.41854*l1*l3**7 - 1341008445.016*l1*l3**6 + 61144388.3234695*l1*l3**5 + 18251829.6119028*l1*l3**4 + 169839.971161965*l1*l3**3 + 17016.5000979816*l1*l3**2 + 1721.71663727219*l1*l3 + 315.168959435626*l1 - 2.4045095001e+18*l2**17 + 1.65335189894899e+19*l2**16*l3 - 1.84371441091536e+17*l2**16 - 6.45983635659619e+19*l2**15*l3**2 + 9.21427435198725e+17*l2**15*l3 - 1.01461431878159e+17*l2**15 + 1.72301288270044e+20*l2**14*l3**3 - 2.59409328305014e+18*l2**14*l3**2 + 5.17312308408721e+17*l2**14*l3 + 3.83421515731513e+15*l2**14 - 3.45342809895511e+20*l2**13*l3**4 + 4.33208421026266e+18*l2**13*l3**3 - 1.50445329181988e+18*l2**13*l3**2 - 1.53368606292605e+16*l2**13*l3 + 1.88399347862505e+15*l2**13 + 5.36728983193205e+20*l2**12*l3**5 - 3.86879187110957e+18*l2**12*l3**4 + 2.92006859494446e+18*l2**12*l3**3 + 3.48118681051355e+16*l2**12*l3**2 - 8.4012305365472e+15*l2**12*l3 - 42365828268584.8*l2**12 - 6.55841757285021e+20*l2**11*l3**6 - 1.58843087709631e+18*l2**11*l3**5 - 4.19481726879574e+18*l2**11*l3**4 - 3.96513957732101e+16*l2**11*l3**3 + 2.18366763402786e+16*l2**11*l3**2 + 127605355063783.0*l2**11*l3 - 22532656612417.8*l2**11 + 6.17826851452948e+20*l2**10*l3**7 + 1.17688287712136e+19*l2**10*l3**6 + 4.55554747671672e+18*l2**10*l3**5 + 3.81083579659979e+15*l2**10*l3**4 - 3.75402109757658e+16*l2**10*l3**3 - 218537753123397.0*l2**10*l3**2 + 70835727696899.1*l2**10*l3 - 908807252135.931*l2**10 - 4.1741946982729e+20*l2**9*l3**8 - 2.26162301072284e+19*l2**9*l3**7 - 3.77393164963117e+18*l2**9*l3**6 + 8.45397683466555e+16*l2**9*l3**5 + 4.74253513553496e+16*l2**9*l3**4 + 97912661373450.5*l2**9*l3**3 - 154699492622398.0*l2**9*l3**2 + 2291971230738.83*l2**9*l3 - 120899295189.725*l2**9 + 1.40441206801212e+20*l2**8*l3**9 + 2.81005283817484e+19*l2**8*l3**8 + 2.20254395207253e+18*l2**8*l3**7 - 1.8362149905825e+17*l2**8*l3**6 - 4.38117388854447e+16*l2**8*l3**5 + 279919188727476.0*l2**8*l3**4 + 208274279824742.0*l2**8*l3**3 - 2614044389066.21*l2**8*l3**2 + 673072019109.655*l2**8*l3 + 23991860003.2263*l2**8 + 8.86337611867599e+19*l2**7*l3**10 - 2.56005547854093e+19*l2**7*l3**9 - 5.69475604715898e+17*l2**7*l3**8 + 2.3790837463926e+17*l2**7*l3**7 + 2.85494930164279e+16*l2**7*l3**6 - 809852275639900.0*l2**7*l3**5 - 215128517546711.0*l2**7*l3**4 - 36141464873.677*l2**7*l3**3 - 1405470973525.26*l2**7*l3**2 - 35546822119.4707*l2**7*l3 + 4971240623.84883*l2**7 - 1.94571468834062e+20*l2**6*l3**11 + 1.73524039755521e+19*l2**6*l3**10 - 6.08543385696317e+17*l2**6*l3**9 - 2.13430183970303e+17*l2**6*l3**8 - 9.21169218057948e+15*l2**6*l3**7 + 1.09016122526307e+15*l2**6*l3**6 + 144060700956964.0*l2**6*l3**5 + 4365211304273.46*l2**6*l3**4 + 1909040842958.75*l2**6*l3**3 + 10846206460.4881*l2**6*l3**2 - 14870699157.6799*l2**6*l3 - 288687643.754568*l2**6 + 1.84218486958349e+20*l2**5*l3**12 - 8.51976561351657e+18*l2**5*l3**11 + 1.10246994658655e+18*l2**5*l3**10 + 1.38218780549067e+17*l2**5*l3**9 - 4.87037884041749e+15*l2**5*l3**8 - 986945453520903.0*l2**5*l3**7 - 58885491397371.7*l2**5*l3**6 - 6620664596546.18*l2**5*l3**5 - 1555582879712.99*l2**5*l3**4 + 51889893711.0281*l2**5*l3**3 + 22366670432.1247*l2**5*l3**2 + 156698484.720189*l2**5*l3 - 67498671.7752862*l2**5 - 1.1867668584273e+20*l2**4*l3**13 + 2.65340157878588e+18*l2**4*l3**12 - 1.02667334231992e+18*l2**4*l3**11 - 6.0926614024166e+16*l2**4*l3**10 + 1.01862936996233e+16*l2**4*l3**9 + 592436561458660.0*l2**4*l3**8 - 11913497053223.1*l2**4*l3**7 + 5667998170891.67*l2**4*l3**6 + 813106801723.103*l2**4*l3**5 - 90191562474.3662*l2**4*l3**4 - 19732783627.8033*l2**4*l3**3 + 252828208.203635*l2**4*l3**2 + 123921549.643289*l2**4*l3 - 735423.564448895*l2**4 + 5.52897601520092e+19*l2**3*l3**14 - 2.40671345014593e+17*l2**3*l3**13 + 6.55141785167048e+17*l2**3*l3**12 + 1.55238955149832e+16*l2**3*l3**11 - 8.42889017464424e+15*l2**3*l3**10 - 231966787620576.0*l2**3*l3**9 + 33550458403440.4*l2**3*l3**8 - 2780633953718.3*l2**3*l3**7 - 147524687491.328*l2**3*l3**6 + 71568286035.5565*l2**3*l3**5 + 9817307107.63109*l2**3*l3**4 - 551280192.910835*l2**3*l3**3 - 124812972.145651*l2**3*l3**2 - 616700.477811406*l2**3*l3 - 120717.263483354*l2**3 - 1.82954301212009e+19*l2**2*l3**15 - 2.11446967405678e+17*l2**2*l3**14 - 2.90960181807716e+17*l2**2*l3**13 - 23379360715334.0*l2**2*l3**12 + 4.35952271413881e+15*l2**2*l3**11 + 38420975566687.0*l2**2*l3**10 - 26236327094173.2*l2**2*l3**9 + 857230369972.457*l2**2*l3**8 - 63934333715.1276*l2**2*l3**7 - 33263410233.0522*l2**2*l3**6 - 2189728684.3476*l2**2*l3**5 + 434918243.305013*l2**2*l3**4 + 63546224.6296145*l2**2*l3**3 - 887949.752142893*l2**2*l3**2 - 95592.0621856424*l2**2*l3 - 1940.84420928865*l2**2 + 3.91788853163254e+18*l2*l3**16 + 1.03144862149111e+17*l2*l3**15 + 8.15596300498372e+16*l2*l3**14 - 1.30924420005882e+15*l2*l3**13 - 1.36889010002839e+15*l2*l3**12 + 6070821223878.6*l2*l3**11 + 10594647450028.3*l2*l3**10 - 68518193823.0073*l2*l3**9 + 53857210140.8403*l2*l3**8 + 6516703585.95854*l2*l3**7 - 416251108.985887*l2*l3**6 - 146273238.593907*l2*l3**5 - 14688945.4770083*l2*l3**4 + 308350.238905703*l2*l3**3 + 45709.1018355215*l2*l3**2 - 1252.15755437978*l2*l3 - 397.070899470899*l2 - 4.10742823558176e+17*l3**17 - 1.59014995813213e+16*l3**16 - 1.05331047076858e+16*l3**15 + 303931689299370.0*l3**14 + 190870226665068.0*l3**13 - 4169260490329.08*l3**12 - 1753634435563.09*l3**11 + 9976550199.50381*l3**10 - 12744260532.2842*l3**9 - 439364365.785591*l3**8 + 265920135.741754*l3**7 + 15627209.4285576*l3**6 - 987398.786787903*l3**5 - 206116.469856753*l3**4 - 20038.9801206262*l3**3 + 391.29923574368*l3**2 + 89.0920634920634*l3 + 1.07851851851852) + l3_dot*(6.99276617070024e+17*l1**17 - 8.99556904243034e+18*l1**16*l2 - 2.64716440015592e+18*l1**16*l3 + 3.67453571406208e+16*l1**16 + 4.37908711824201e+19*l1**15*l2**2 + 5.3413464012915e+19*l1**15*l2*l3 - 4.4008474516954e+17*l1**15*l2 - 5.47211410401616e+18*l1**15*l3**2 + 1.71908103581852e+16*l1**15*l3 + 2.21668577184032e+16*l1**15 - 1.36611696666421e+20*l1**14*l2**3 - 2.37796512574693e+20*l1**14*l2**2*l3 + 1.38471977435181e+18*l1**14*l2**2 - 1.56598584982865e+20*l1**14*l2*l3**2 + 1.86176476179145e+18*l1**14*l2*l3 - 3.31115366849687e+17*l1**14*l2 + 7.66433913569303e+19*l1**14*l3**3 - 1.31251837084744e+18*l1**14*l3**2 - 4.41568279044198e+15*l1**14*l3 - 454227579612246.0*l1**14 + 2.97938268752797e+20*l1**13*l2**4 + 7.06129726272814e+20*l1**13*l2**3*l3 - 2.20214280688352e+18*l1**13*l2**3 + 5.91181389257768e+20*l1**13*l2**2*l3**2 - 4.25988280675828e+18*l1**13*l2**2*l3 + 1.48316714353541e+18*l1**13*l2**2 + 3.28890763592719e+20*l1**13*l2*l3**3 - 3.50176806996232e+18*l1**13*l2*l3**2 + 1.71198714773148e+18*l1**13*l2*l3 + 6.4059448360021e+15*l1**13*l2 - 3.39283343104257e+20*l1**13*l3**4 + 6.71473052590713e+18*l1**13*l3**3 - 8.34435538618781e+17*l1**13*l3**2 - 2.80552328584034e+15*l1**13*l3 - 272852624385010.0*l1**13 - 4.79091281592275e+20*l1**12*l2**5 - 1.48084975736477e+21*l1**12*l2**4*l3 - 4.66300730965773e+17*l1**12*l2**4 - 1.60399634776063e+21*l1**12*l2**3*l3**2 + 7.24420748493923e+18*l1**12*l2**3*l3 - 4.15218754567425e+18*l1**12*l2**3 - 9.6597249808686e+20*l1**12*l2**2*l3**3 - 3.68227157872326e+18*l1**12*l2**2*l3**2 - 6.91142558839386e+18*l1**12*l2**2*l3 - 1.30223039184422e+16*l1**12*l2**2 - 5.71809403004135e+20*l1**12*l2*l3**4 + 8.45959777726292e+18*l1**12*l2*l3**3 - 4.29769518474538e+18*l1**12*l2*l3**2 - 2.43612938653803e+16*l1**12*l2*l3 + 4.42751362366629e+15*l1**12*l2 + 9.70141049203715e+20*l1**12*l3**5 - 1.97681426011361e+19*l1**12*l3**4 + 5.11936978976637e+18*l1**12*l3**3 + 3.19128273764338e+16*l1**12*l3**2 - 898815197397549.0*l1**12*l3 - 2757853610458.42*l1**12 + 5.50547015331116e+20*l1**11*l2**6 + 2.51440462044976e+21*l1**11*l2**5*l3 + 1.02886999993738e+19*l1**11*l2**5 + 2.58873812522856e+21*l1**11*l2**4*l3**2 - 3.10466035068824e+18*l1**11*l2**4*l3 + 7.6229630499278e+18*l1**11*l2**4 + 3.00155839210002e+21*l1**11*l2**3*l3**3 + 1.67507256130156e+19*l1**11*l2**3*l3**2 + 1.9352478387333e+19*l1**11*l2**3*l3 + 2.712005842979e+15*l1**11*l2**3 + 6.90069542288186e+20*l1**11*l2**2*l3**4 + 2.5992505261576e+19*l1**11*l2**2*l3**3 + 1.2533353392225e+19*l1**11*l2**2*l3**2 + 2.80552328584033e+16*l1**11*l2**2*l3 - 1.61442729988135e+16*l1**11*l2**2 + 1.06315023895164e+21*l1**11*l2*l3**5 - 2.48733835072581e+19*l1**11*l2*l3**4 + 8.92623545521655e+18*l1**11*l2*l3**3 + 4.09606399732689e+16*l1**11*l2*l3**2 - 2.1008072947682e+16*l1**11*l2*l3 - 24968319197041.6*l1**11*l2 - 2.07335072498003e+21*l1**11*l3**6 + 3.97107719274077e+19*l1**11*l3**5 - 1.77922893593028e+19*l1**11*l3**4 - 1.21572675719748e+17*l1**11*l3**3 + 1.61353510909877e+16*l1**11*l3**2 + 71527029828421.8*l1**11*l3 + 11320436737210.0*l1**11 - 3.85200805685987e+20*l1**10*l2**7 - 3.54757584929676e+21*l1**10*l2**6*l3 - 2.81946480684595e+19*l1**10*l2**6 - 3.1815641020106e+21*l1**10*l2**5*l3**2 + 1.48012877183974e+18*l1**10*l2**5*l3 - 9.24778561633711e+18*l1**10*l2**5 - 4.37928253514608e+21*l1**10*l2**4*l3**3 - 8.54323106965549e+19*l1**10*l2**4*l3**2 - 3.71290892510937e+19*l1**10*l2**4*l3 + 7.268643246398e+16*l1**10*l2**4 - 3.89798715657788e+21*l1**10*l2**3*l3**4 - 1.37904680693361e+19*l1**10*l2**3*l3**3 - 3.23384971065798e+19*l1**10*l2**3*l3**2 - 4.74133435307016e+16*l1**10*l2**3*l3 + 3.62713762107784e+16*l1**10*l2**3 + 7.02686274721066e+20*l1**10*l2**2*l3**5 - 8.84647696437388e+19*l1**10*l2**2*l3**4 - 1.3244932575657e+19*l1**10*l2**2*l3**3 + 1.67489740164668e+17*l1**10*l2**2*l3**2 + 6.85321281043927e+16*l1**10*l2**2*l3 - 36111937533091.1*l1**10*l2**2 - 2.10407643336022e+21*l1**10*l2*l3**6 + 7.13349866623252e+19*l1**10*l2*l3**5 - 1.8168138290266e+19*l1**10*l2*l3**4 - 1.4607424574942e+17*l1**10*l2*l3**3 + 4.73871128541138e+16*l1**10*l2*l3**2 + 112451923876553.0*l1**10*l2*l3 - 110852513012189.0*l1**10*l2 + 3.50321297961242e+21*l1**10*l3**7 - 5.82665326280328e+19*l1**10*l3**6 + 4.31601792816938e+19*l1**10*l3**5 + 2.87636274880781e+17*l1**10*l3**4 - 7.58619193183226e+16*l1**10*l3**3 - 406467464998386.0*l1**10*l3**2 - 12082945081061.5*l1**10*l3 + 238401902408.898*l1**10 - 3.81536444874637e+19*l1**9*l2**8 + 4.47047669856619e+21*l1**9*l2**7*l3 + 4.69859228709917e+19*l1**9*l2**7 + 2.07139552742929e+21*l1**9*l2**6*l3**2 - 5.94458222186043e+18*l1**9*l2**6*l3 + 5.24532092188073e+18*l1**9*l2**6 + 6.81789419509663e+21*l1**9*l2**5*l3**3 + 1.59962209463949e+20*l1**9*l2**5*l3**2 + 5.95420709264971e+19*l1**9*l2**5*l3 - 2.19578955838437e+17*l1**9*l2**5 + 2.58969566805851e+21*l1**9*l2**4*l3**4 + 9.83142444384609e+19*l1**9*l2**4*l3**3 + 3.67450688418428e+19*l1**9*l2**4*l3**2 - 1.35600292148948e+16*l1**9*l2**4*l3 - 4.58361749788361e+16*l1**9*l2**4 + 5.75317856389237e+21*l1**9*l2**3*l3**5 + 2.1720588887567e+19*l1**9*l2**3*l3**4 + 5.80627774342043e+19*l1**9*l2**3*l3**3 - 3.3245450937208e+17*l1**9*l2**3*l3**2 - 1.77334825275203e+17*l1**9*l2**3*l3 + 305135537585416.0*l1**9*l2**3 - 3.8619347947967e+21*l1**9*l2**2*l3**6 + 1.67218450516139e+20*l1**9*l2**2*l3**5 - 1.12093877882121e+19*l1**9*l2**2*l3**4 - 4.64781691020882e+17*l1**9*l2**2*l3**3 - 7.62052563556775e+16*l1**9*l2**2*l3**2 + 135943876044441.0*l1**9*l2**2*l3 + 327739348358761.0*l1**9*l2**2 + 3.97166109257294e+21*l1**9*l2*l3**7 - 1.44450941277758e+20*l1**9*l2*l3**6 + 4.13901763724616e+19*l1**9*l2*l3**5 + 4.27608507483498e+17*l1**9*l2*l3**4 - 1.0763424371462e+17*l1**9*l2*l3**3 - 216317297111550.0*l1**9*l2*l3**2 + 451599519897448.0*l1**9*l2*l3 - 217037026038.262*l1**9*l2 - 4.82396446298391e+21*l1**9*l3**8 + 6.18112777238906e+19*l1**9*l3**7 - 7.93435352773515e+19*l1**9*l3**6 - 4.60386371206399e+17*l1**9*l3**5 + 2.18434211523805e+17*l1**9*l3**4 + 1.12251137960588e+15*l1**9*l3**3 - 170567820999259.0*l1**9*l3**2 - 1554835936752.86*l1**9*l3 - 46284985021.42*l1**9 + 5.60178497426796e+20*l1**8*l2**9 - 5.07499896477692e+21*l1**8*l2**8*l3 - 5.65162932484401e+19*l1**8*l2**8 + 2.85840860415737e+20*l1**8*l2**7*l3**2 + 3.08970434567662e+19*l1**8*l2**7*l3 + 5.12260583759615e+18*l1**8*l2**7 - 7.48487265889369e+21*l1**8*l2**6*l3**3 - 2.71675831036097e+20*l1**8*l2**6*l3**2 - 8.04326779691242e+19*l1**8*l2**6*l3 + 3.95017678646319e+17*l1**8*l2**6 - 4.12053409799483e+21*l1**8*l2**5*l3**4 - 4.54868842077572e+18*l1**8*l2**5*l3**3 - 2.69078099740193e+19*l1**8*l2**5*l3**2 - 7.995741364645e+16*l1**8*l2**5*l3 + 2.1350646059562e+16*l1**8*l2**5 - 9.55354160625617e+20*l1**8*l2**4*l3**5 - 3.10827042086346e+20*l1**8*l2**4*l3**4 - 6.27072438947805e+19*l1**8*l2**4*l3**3 + 1.20672570332207e+18*l1**8*l2**4*l3**2 + 2.98936174619923e+17*l1**8*l2**4*l3 - 1.02542548376876e+15*l1**8*l2**4 - 7.84641655864677e+21*l1**8*l2**3*l3**6 + 1.31803662097242e+20*l1**8*l2**3*l3**5 - 6.80719435603997e+19*l1**8*l2**3*l3**4 - 8.27629369322899e+16*l1**8*l2**3*l3**3 + 2.00506999536726e+17*l1**8*l2**3*l3**2 + 111790511447493.0*l1**8*l2**3*l3 - 585159406413594.0*l1**8*l2**3 + 8.05873188237054e+21*l1**8*l2**2*l3**7 - 2.82830947877524e+20*l1**8*l2**2*l3**6 + 6.2685142225168e+19*l1**8*l2**2*l3**5 + 1.24880855260968e+18*l1**8*l2**2*l3**4 + 2.36200232148685e+16*l1**8*l2**2*l3**3 - 2.01993584194705e+15*l1**8*l2**2*l3**2 - 1.19559898616747e+15*l1**8*l2**2*l3 - 213460526910.136*l1**8*l2**2 - 6.30109913195376e+21*l1**8*l2*l3**8 + 2.18043081340113e+20*l1**8*l2*l3**7 - 8.41132757873469e+19*l1**8*l2*l3**6 - 1.01966743823867e+18*l1**8*l2*l3**5 + 2.32661901660577e+17*l1**8*l2*l3**4 + 1.37691894606959e+15*l1**8*l2*l3**3 - 833724477492816.0*l1**8*l2*l3**2 + 862312763470.318*l1**8*l2*l3 + 482291495667.511*l1**8*l2 + 5.49192974541403e+21*l1**8*l3**9 - 4.34117385123955e+19*l1**8*l3**8 + 1.14522937542462e+20*l1**8*l3**7 + 5.18320427059002e+17*l1**8*l3**6 - 4.42420791440895e+17*l1**8*l3**5 - 2.0301227744482e+15*l1**8*l3**4 + 799766997317959.0*l1**8*l3**3 + 4569918820159.51*l1**8*l3**2 - 93360617957.8389*l1**8*l3 - 4145098122.46625*l1**8 - 9.44718485424014e+20*l1**7*l2**10 + 5.29839905525665e+21*l1**7*l2**9*l3 + 4.98963270646324e+19*l1**7*l2**9 - 3.72361283289464e+21*l1**7*l2**8*l3**2 - 5.42438830042175e+19*l1**7*l2**8*l3 - 1.7781259624237e+19*l1**7*l2**8 + 9.86973967460443e+21*l1**7*l2**7*l3**3 + 3.16056486597306e+20*l1**7*l2**7*l3**2 + 9.80714681326568e+19*l1**7*l2**7*l3 - 4.79664324070531e+17*l1**7*l2**7 - 2.3441759268029e+21*l1**7*l2**6*l3**4 - 2.51260884195234e+19*l1**7*l2**6*l3**3 - 2.07843794869119e+19*l1**7*l2**6*l3**2 + 2.2051413026705e+17*l1**7*l2**6*l3 + 4.89232193344956e+16*l1**7*l2**6 + 9.67387727876347e+21*l1**7*l2**5*l3**5 + 2.01008707356187e+20*l1**7*l2**5*l3**4 + 1.09619628167233e+20*l1**7*l2**5*l3**3 - 1.66648083178916e+18*l1**7*l2**5*l3**2 - 4.52143314917097e+17*l1**7*l2**5*l3 + 1.83017543495591e+15*l1**7*l2**5 - 6.46044582224889e+21*l1**7*l2**4*l3**6 + 2.94581726297861e+20*l1**7*l2**4*l3**5 - 1.28678570259647e+19*l1**7*l2**4*l3**4 - 7.5468576389105e+17*l1**7*l2**4*l3**3 - 6.50934999683672e+16*l1**7*l2**4*l3**2 + 87873365575204.0*l1**7*l2**4*l3 + 604146922080804.0*l1**7*l2**4 + 1.24044550170974e+22*l1**7*l2**3*l3**7 - 2.67289595773206e+20*l1**7*l2**3*l3**6 + 1.19445059273422e+20*l1**7*l2**3*l3**5 + 1.76747967007939e+17*l1**7*l2**3*l3**4 - 4.42824744975479e+17*l1**7*l2**3*l3**3 + 2.28116422408545e+15*l1**7*l2**3*l3**2 + 2.29394709058952e+15*l1**7*l2**3*l3 + 4388929140596.8*l1**7*l2**3 - 1.24153050631734e+22*l1**7*l2**2*l3**8 + 3.54116940730328e+20*l1**7*l2**2*l3**7 - 1.45395639641505e+20*l1**7*l2**2*l3**6 - 1.8836283341132e+18*l1**7*l2**2*l3**5 + 2.90727287924567e+17*l1**7*l2**2*l3**4 + 3.50926537361638e+15*l1**7*l2**2*l3**3 + 1.31529533516771e+15*l1**7*l2**2*l3**2 - 7853992085360.32*l1**7*l2**2*l3 - 1113708192816.43*l1**7*l2**2 + 8.18425506556592e+21*l1**7*l2*l3**9 - 2.37166138782558e+20*l1**7*l2*l3**8 + 1.39176967391588e+20*l1**7*l2*l3**7 + 1.58119292389961e+18*l1**7*l2*l3**6 - 4.93180789705074e+17*l1**7*l2*l3**5 - 3.4731239087427e+15*l1**7*l2*l3**4 + 1.36167148544483e+15*l1**7*l2*l3**3 + 6871396009107.31*l1**7*l2*l3**2 - 1672778130958.23*l1**7*l2*l3 - 6279382687.651*l1**7*l2 - 5.20545054759999e+21*l1**7*l3**10 + 9.77813293173572e+18*l1**7*l3**9 - 1.32227664055251e+20*l1**7*l3**8 - 3.69126706608422e+17*l1**7*l3**7 + 6.74749119961964e+17*l1**7*l3**6 + 2.39213978093284e+15*l1**7*l3**5 - 1.95658070457494e+15*l1**7*l3**4 - 9977303146688.7*l1**7*l3**3 + 1294561512084.59*l1**7*l3**2 + 25418992432.2379*l1**7*l3 - 640475989.970519*l1**7 + 1.02770161255304e+21*l1**6*l2**11 - 4.9030780048198e+21*l1**6*l2**10*l3 - 3.17084497056725e+19*l1**6*l2**10 + 6.18450892561951e+21*l1**6*l2**9*l3**2 + 6.40546784756337e+19*l1**6*l2**9*l3 + 2.57401748398746e+19*l1**6*l2**9 - 1.06004681281298e+22*l1**6*l2**8*l3**3 - 3.30917082611439e+20*l1**6*l2**8*l3**2 - 1.04376478397443e+20*l1**6*l2**8*l3 + 4.22441668765409e+17*l1**6*l2**8 + 4.33803311222661e+21*l1**6*l2**7*l3**4 + 2.45484771914884e+20*l1**6*l2**7*l3**3 + 7.26379233639729e+19*l1**6*l2**7*l3**2 - 4.85355528450378e+17*l1**6*l2**7*l3 - 1.31430958793295e+17*l1**6*l2**7 - 3.51177752949087e+21*l1**6*l2**6*l3**5 - 5.40283102423258e+20*l1**6*l2**6*l3**4 - 1.15505869181563e+20*l1**6*l2**6*l3**3 + 2.46857993921091e+18*l1**6*l2**6*l3**2 + 5.620500783276e+17*l1**6*l2**6*l3 - 2.32561468259919e+15*l1**6*l2**6 - 7.40040935891383e+21*l1**6*l2**5*l3**6 + 3.36602943137409e+20*l1**6*l2**5*l3**5 - 2.03102940853911e+19*l1**6*l2**5*l3**4 - 1.04084913904677e+18*l1**6*l2**5*l3**3 - 9.94152388988862e+16*l1**6*l2**5*l3**2 + 1.27001816222266e+15*l1**6*l2**5*l3 - 285704683726115.0*l1**6*l2**5 + 1.11779867902629e+22*l1**6*l2**4*l3**7 - 5.9688900277069e+20*l1**6*l2**4*l3**6 + 4.25410602459312e+19*l1**6*l2**4*l3**5 + 3.16182474314206e+18*l1**6*l2**4*l3**4 + 3.01076429626371e+17*l1**6*l2**4*l3**3 - 7.42521080908333e+15*l1**6*l2**4*l3**2 - 2.87307973271654e+15*l1**6*l2**4*l3 - 10886204517222.7*l1**6*l2**4 - 1.60362772976895e+22*l1**6*l2**3*l3**8 + 4.87937084882584e+20*l1**6*l2**3*l3**7 - 1.70122285311055e+20*l1**6*l2**3*l3**6 - 2.16810839529742e+18*l1**6*l2**3*l3**5 + 4.78963883612167e+17*l1**6*l2**3*l3**4 + 3.05820571886942e+15*l1**6*l2**3*l3**3 - 2.24145031693834e+15*l1**6*l2**3*l3**2 + 12361510407573.9*l1**6*l2**3*l3 + 1821096665717.15*l1**6*l2**3 + 1.47244004232354e+22*l1**6*l2**2*l3**9 - 3.74454528924579e+20*l1**6*l2**2*l3**8 + 2.20895623656359e+20*l1**6*l2**2*l3**7 + 2.71995482562221e+18*l1**6*l2**2*l3**6 - 7.05066178726704e+17*l1**6*l2**2*l3**5 - 8.00223409875762e+15*l1**6*l2**2*l3**4 - 834194302277244.0*l1**6*l2**2*l3**3 - 2979976720912.12*l1**6*l2**2*l3**2 + 2608365819561.4*l1**6*l2**2*l3 + 41354770049.673*l1**6*l2**2 - 8.54893415772432e+21*l1**6*l2*l3**10 + 1.87386709228362e+20*l1**6*l2*l3**9 - 1.78496758357011e+20*l1**6*l2*l3**8 - 1.79160717033764e+18*l1**6*l2*l3**7 + 8.17556011937895e+17*l1**6*l2*l3**6 + 6.44072793576705e+15*l1**6*l2*l3**5 - 1.98557963541688e+15*l1**6*l2*l3**4 - 11851012216233.3*l1**6*l2*l3**3 + 3196669911975.57*l1**6*l2*l3**2 - 14906318045.0469*l1**6*l2*l3 + 561033033.874537*l1**6*l2 + 4.10249814625904e+21*l1**6*l3**11 + 2.10587426887768e+19*l1**6*l3**10 + 1.22738277510303e+20*l1**6*l3**9 + 8.311362734302e+16*l1**6*l3**8 - 7.93242581531948e+17*l1**6*l3**7 - 1.78473285779856e+15*l1**6*l3**6 + 3.15232866469315e+15*l1**6*l3**5 + 11999813403642.8*l1**6*l3**4 - 4184784549495.2*l1**6*l3**3 - 44362972787.6794*l1**6*l3**2 + 4200980577.47475*l1**6*l3 + 41802252.2333483*l1**6 - 8.35753358528633e+20*l1**5*l2**12 + 3.94369619895027e+21*l1**5*l2**11*l3 + 1.21298357887355e+19*l1**5*l2**11 - 7.128626614261e+21*l1**5*l2**10*l3**2 - 4.42594603481835e+19*l1**5*l2**10*l3 - 2.51396938689589e+19*l1**5*l2**10 + 1.19393145923658e+22*l1**5*l2**9*l3**3 + 2.45809678230654e+20*l1**5*l2**9*l3**2 + 9.52813039735035e+19*l1**5*l2**9*l3 - 2.51187851525571e+17*l1**5*l2**9 - 1.13970470827072e+22*l1**5*l2**8*l3**4 - 2.60574865247299e+20*l1**5*l2**8*l3**3 - 1.14000812612791e+20*l1**5*l2**8*l3**2 + 4.90686022693475e+17*l1**5*l2**8*l3 + 1.7779024259238e+17*l1**5*l2**8 + 1.35258036425416e+22*l1**5*l2**7*l3**5 + 3.72126033661563e+20*l1**5*l2**7*l3**4 + 1.53893338792424e+20*l1**5*l2**7*l3**3 - 2.17484165118343e+18*l1**5*l2**7*l3**2 - 6.2052801798965e+17*l1**5*l2**7*l3 + 1.98122549844264e+15*l1**5*l2**7 - 1.22615879224806e+22*l1**5*l2**6*l3**6 + 6.06491789436772e+18*l1**5*l2**6*l3**5 - 9.44768998533265e+19*l1**5*l2**6*l3**4 + 9.46583556642529e+17*l1**5*l2**6*l3**3 + 4.79600786478659e+17*l1**5*l2**6*l3**2 - 1.82911245069492e+15*l1**5*l2**6*l3 - 214548090158460.0*l1**5*l2**6 + 1.66711929900785e+22*l1**5*l2**5*l3**7 - 1.91044913672583e+20*l1**5*l2**5*l3**6 + 1.36299227129596e+20*l1**5*l2**5*l3**5 - 1.08405419764871e+18*l1**5*l2**5*l3**4 - 7.44732706930579e+17*l1**5*l2**5*l3**3 + 7.03727470303487e+15*l1**5*l2**5*l3**2 + 3.11282106091059e+15*l1**5*l2**5*l3 + 16695662640471.5*l1**5*l2**5 - 1.83726653466101e+22*l1**5*l2**4*l3**8 + 5.16384437863309e+20*l1**5*l2**4*l3**7 - 1.58493481522701e+20*l1**5*l2**4*l3**6 - 2.13275880189582e+18*l1**5*l2**4*l3**5 + 4.82757119165511e+17*l1**5*l2**4*l3**4 + 2.07352796510532e+15*l1**5*l2**4*l3**3 + 742066728008720.0*l1**5*l2**4*l3**2 - 28535945359320.4*l1**5*l2**4*l3 - 2273495194858.9*l1**5*l2**4 + 1.85098336141281e+22*l1**5*l2**3*l3**9 - 4.67973396813624e+20*l1**5*l2**3*l3**8 + 2.38859406611701e+20*l1**5*l2**3*l3**7 + 2.59623124871665e+18*l1**5*l2**3*l3**6 - 9.65407283409021e+17*l1**5*l2**3*l3**5 - 3.33369580650946e+15*l1**5*l2**3*l3**4 + 3.58707719441602e+15*l1**5*l2**3*l3**3 + 42119489047935.4*l1**5*l2**3*l3**2 - 2761785362655.22*l1**5*l2**3*l3 - 139377922168.188*l1**5*l2**3 - 1.42044875788926e+22*l1**5*l2**2*l3**10 + 2.74654138930653e+20*l1**5*l2**2*l3**9 - 2.57357957017538e+20*l1**5*l2**2*l3**8 - 2.53675415505683e+18*l1**5*l2**2*l3**7 + 1.197866989817e+18*l1**5*l2**2*l3**6 + 9.0874524472071e+15*l1**5*l2**2*l3**5 - 1.49641360629865e+15*l1**5*l2**2*l3**4 - 34438298341502.3*l1**5*l2**2*l3**3 - 3169742133573.76*l1**5*l2**2*l3**2 + 153803184338.287*l1**5*l2**2*l3 + 922239081.412024*l1**5*l2**2 + 7.18932824721577e+21*l1**5*l2*l3**11 - 9.29954077136385e+19*l1**5*l2*l3**10 + 1.77170740894536e+20*l1**5*l2*l3**9 + 1.32603058105244e+18*l1**5*l2*l3**8 - 1.04968510979016e+18*l1**5*l2*l3**7 - 7.00885759045773e+15*l1**5*l2*l3**6 + 3.0223832155213e+15*l1**5*l2*l3**5 + 30841657876183.1*l1**5*l2*l3**4 - 4645567063917.82*l1**5*l2*l3**3 - 164424898057.133*l1**5*l2*l3**2 - 3529632077.61045*l1**5*l2*l3 + 252156643.26912*l1**5*l2 - 2.66993436312536e+21*l1**5*l3**12 - 3.52342849101363e+19*l1**5*l3**11 - 9.1240774683506e+19*l1**5*l3**10 + 1.78898868193752e+17*l1**5*l3**9 + 7.26473399196623e+17*l1**5*l3**8 + 396989188671224.0*l1**5*l3**7 - 3.66675858011034e+15*l1**5*l3**6 - 9629441547279.59*l1**5*l3**5 + 7584702674900.81*l1**5*l3**4 + 87680450807.1391*l1**5*l3**3 - 11948659580.568*l1**5*l3**2 - 333160166.084679*l1**5*l3 + 10545629.6756696*l1**5 + 5.22979861226731e+20*l1**4*l2**13 - 2.62075358517563e+21*l1**4*l2**12*l3 - 1.17327280694614e+17*l1**4*l2**12 + 5.92219978677407e+21*l1**4*l2**11*l3**2 + 1.70756319287853e+19*l1**4*l2**11*l3 + 1.78809897467211e+19*l1**4*l2**11 - 1.01676999093927e+22*l1**4*l2**10*l3**3 - 1.50305271745238e+20*l1**4*l2**10*l3**2 - 7.04232083669219e+19*l1**4*l2**10*l3 + 8.53814253324075e+16*l1**4*l2**10 + 1.0973440831241e+22*l1**4*l2**9*l3**4 + 2.87000578929901e+20*l1**4*l2**9*l3**3 + 1.12739137992941e+20*l1**4*l2**9*l3**2 - 3.52093172372962e+17*l1**4*l2**9*l3 - 1.60605043378956e+17*l1**4*l2**9 - 9.06055412576436e+21*l1**4*l2**8*l3**5 - 5.29055784178328e+20*l1**4*l2**8*l3**4 - 1.44127009435058e+20*l1**4*l2**8*l3**3 + 1.84007258510053e+18*l1**4*l2**8*l3**2 + 5.4861628298114e+17*l1**4*l2**8*l3 - 1.18168417013435e+15*l1**4*l2**8 + 1.46434320003084e+21*l1**4*l2**7*l3**6 + 5.89596661016748e+20*l1**4*l2**7*l3**5 + 9.17959572544022e+19*l1**4*l2**7*l3**4 - 2.4548328751103e+18*l1**4*l2**7*l3**3 - 6.34952431032256e+17*l1**4*l2**7*l3**2 + 2.45779677545335e+15*l1**4*l2**7*l3 + 519676767053673.0*l1**4*l2**7 + 6.60224444188338e+21*l1**4*l2**6*l3**7 - 7.26273917850535e+20*l1**4*l2**6*l3**6 - 2.84993373365878e+19*l1**4*l2**6*l3**5 + 4.15357722468662e+18*l1**4*l2**6*l3**4 + 6.61264293572072e+17*l1**4*l2**6*l3**3 - 9.43053061744174e+15*l1**4*l2**6*l3**2 - 2.66464947850072e+15*l1**4*l2**6*l3 - 14947883987596.2*l1**4*l2**6 - 1.48051199467373e+22*l1**4*l2**5*l3**8 + 6.79270804169185e+20*l1**4*l2**5*l3**7 - 8.94431351242717e+19*l1**4*l2**5*l3**6 - 3.92380486757629e+18*l1**4*l2**5*l3**5 - 4.59788960382502e+16*l1**4*l2**5*l3**4 + 9.00099639397988e+15*l1**4*l2**5*l3**3 + 323578556789217.0*l1**4*l2**5*l3**2 + 33504832069061.9*l1**4*l2**5*l3 + 2698661225102.5*l1**4*l2**5 + 1.79764318608543e+22*l1**4*l2**4*l3**9 - 6.15155957857298e+20*l1**4*l2**4*l3**8 + 1.81464847900891e+20*l1**4*l2**4*l3**7 + 4.58562781070603e+18*l1**4*l2**4*l3**6 - 4.56603470141543e+17*l1**4*l2**4*l3**5 - 1.48689352607122e+16*l1**4*l2**4*l3**4 - 1.89768585685925e+15*l1**4*l2**4*l3**3 - 54724671988212.2*l1**4*l2**4*l3**2 - 740635104371.253*l1**4*l2**4*l3 + 207779257027.005*l1**4*l2**4 - 1.61868881911876e+22*l1**4*l2**3*l3**10 + 3.87902040327269e+20*l1**4*l2**3*l3**9 - 2.42937297300033e+20*l1**4*l2**3*l3**8 - 3.50129306072874e+18*l1**4*l2**3*l3**7 + 1.12304430196595e+18*l1**4*l2**3*l3**6 + 1.27223271276638e+16*l1**4*l2**3*l3**5 - 2.54902151626714e+15*l1**4*l2**3*l3**4 + 22283471936174.8*l1**4*l2**3*l3**3 + 7517459831491.58*l1**4*l2**3*l3**2 - 260482135166.695*l1**4*l2**3*l3 - 11240827871.9856*l1**4*l2**3 + 1.0783575824265e+22*l1**4*l2**2*l3**11 - 1.4456526016664e+20*l1**4*l2**2*l3**10 + 2.25412276060098e+20*l1**4*l2**2*l3**9 + 1.96632113296335e+18*l1**4*l2**2*l3**8 - 1.34700452899449e+18*l1**4*l2**2*l3**7 - 1.05845771967904e+16*l1**4*l2**2*l3**6 + 3.03855508821913e+15*l1**4*l2**2*l3**5 + 9546429120147.85*l1**4*l2**2*l3**4 - 1709884667925.65*l1**4*l2**2*l3**3 + 236102223424.344*l1**4*l2**2*l3**2 + 24380806129.6941*l1**4*l2**2*l3 - 709572311.880389*l1**4*l2**2 - 4.78453296825982e+21*l1**4*l2*l3**12 + 1.4909589823654e+19*l1**4*l2*l3**11 - 1.34070729975615e+20*l1**4*l2*l3**10 - 6.04356474491439e+17*l1**4*l2*l3**9 + 9.97234017501742e+17*l1**4*l2*l3**8 + 5.3156299611261e+15*l1**4*l2*l3**7 - 3.55793846896688e+15*l1**4*l2*l3**6 - 23790683963484.7*l1**4*l2*l3**5 + 6754595285897.23*l1**4*l2*l3**4 + 55616473222.2901*l1**4*l2*l3**3 - 14677722054.7137*l1**4*l2*l3**2 + 263701164.28626*l1**4*l2*l3 + 11302354.8247515*l1**4*l2 + 1.41100364180946e+21*l1**4*l3**13 + 3.13263839454619e+19*l1**4*l3**12 + 5.34935912950446e+19*l1**4*l3**11 - 2.76764872148149e+17*l1**4*l3**10 - 5.13836730951103e+17*l1**4*l3**9 + 760565238738647.0*l1**4*l3**8 + 3.12840210288003e+15*l1**4*l3**7 + 1062502596247.15*l1**4*l3**6 - 8986863803879.69*l1**4*l3**5 - 38431553849.5739*l1**4*l3**4 + 25304462159.566*l1**4*l3**3 + 223854978.171698*l1**4*l3**2 - 67218437.936884*l1**4*l3 - 478190.210067668*l1**4 - 2.51096175661803e+20*l1**3*l2**14 + 1.39992144632855e+21*l1**3*l2**13*l3 - 3.52583520446378e+18*l1**3*l2**13 - 3.84610911109696e+21*l1**3*l2**12*l3**2 + 3.56193590621597e+18*l1**3*l2**12*l3 - 9.31860977429731e+18*l1**3*l2**12 + 7.5955094818989e+21*l1**3*l2**11*l3**3 + 4.83027389444287e+19*l1**3*l2**11*l3**2 + 4.03560922200192e+19*l1**3*l2**11*l3 + 8.51008730038236e+15*l1**3*l2**11 - 1.09444173069462e+22*l1**3*l2**10*l3**4 - 1.34679684670166e+20*l1**3*l2**10*l3**3 - 8.08253036514868e+19*l1**3*l2**10*l3**2 + 8.60360474324369e+16*l1**3*l2**10*l3 + 1.02792416203912e+17*l1**3*l2**10 + 1.34420479574565e+22*l1**3*l2**9*l3**5 + 2.48974506417596e+20*l1**3*l2**9*l3**4 + 1.17269003549048e+20*l1**3*l2**9*l3**3 - 8.23421084394139e+17*l1**3*l2**9*l3**2 - 3.83064636493261e+17*l1**3*l2**9*l3 + 365902804505369.0*l1**3*l2**9 - 1.43631331926781e+22*l1**3*l2**8*l3**6 - 2.1638760630262e+20*l1**3*l2**8*l3**5 - 1.17507209282343e+20*l1**3*l2**8*l3**4 + 1.33262356077416e+18*l1**3*l2**8*l3**3 + 6.22329983085897e+17*l1**3*l2**8*l3**2 - 1.26849455144857e+15*l1**3*l2**8*l3 - 502010213119869.0*l1**3*l2**8 + 1.56486553794131e+22*l1**3*l2**7*l3**7 + 7.39342371884828e+19*l1**3*l2**7*l3**6 + 1.12349053908127e+20*l1**3*l2**7*l3**5 - 1.669286355075e+18*l1**3*l2**7*l3**4 - 8.01508031725041e+17*l1**3*l2**7*l3**3 + 5.14590680745462e+15*l1**3*l2**7*l3**2 + 1.98922257989428e+15*l1**3*l2**7*l3 + 7777191472503.75*l1**3*l2**7 - 1.68423134019139e+22*l1**3*l2**6*l3**8 + 1.81658731217014e+20*l1**3*l2**6*l3**7 - 1.11825581603746e+20*l1**3*l2**6*l3**6 + 2.23880758210059e+17*l1**3*l2**6*l3**5 + 7.34925990763408e+17*l1**3*l2**6*l3**4 - 3.86926271000512e+15*l1**3*l2**6*l3**3 - 1.73377711226686e+15*l1**3*l2**6*l3**2 - 30239111891492.3*l1**3*l2**6*l3 - 2663590179491.1*l1**3*l2**6 + 1.75738483542261e+22*l1**3*l2**5*l3**9 - 3.16458751559688e+20*l1**3*l2**5*l3**8 + 1.4810177424856e+20*l1**3*l2**5*l3**7 + 1.19795844305382e+18*l1**3*l2**5*l3**6 - 8.43010135000147e+17*l1**3*l2**5*l3**5 + 1.58845281400668e+15*l1**3*l2**5*l3**4 + 3.20475372980969e+15*l1**3*l2**5*l3**3 + 75886911447720.0*l1**3*l2**5*l3**2 + 3986095191622.63*l1**3*l2**5*l3 - 180120147737.094*l1**3*l2**5 - 1.57762154532409e+22*l1**3*l2**4*l3**10 + 3.01922202320806e+20*l1**3*l2**4*l3**9 - 1.85262237091955e+20*l1**3*l2**4*l3**8 - 2.35944508339172e+18*l1**3*l2**4*l3**7 + 1.01395716381789e+18*l1**3*l2**4*l3**6 + 6.34176410106884e+15*l1**3*l2**4*l3**5 - 2.24837977650683e+15*l1**3*l2**4*l3**4 - 84074082662384.6*l1**3*l2**4*l3**3 - 7176799986383.87*l1**3*l2**4*l3**2 + 392150335179.055*l1**3*l2**4*l3 + 25482001539.717*l1**3*l2**4 + 1.15266928279285e+22*l1**3*l2**3*l3**11 - 1.58819020575129e+20*l1**3*l2**3*l3**10 + 1.91707431320946e+20*l1**3*l2**3*l3**9 + 1.88531164808471e+18*l1**3*l2**3*l3**8 - 1.22913902365288e+18*l1**3*l2**3*l3**7 - 8.39509536521533e+15*l1**3*l2**3*l3**6 + 3.93503616212348e+15*l1**3*l2**3*l3**5 + 72836345928226.5*l1**3*l2**3*l3**4 - 1912346934429.6*l1**3*l2**3*l3**3 - 656160213428.7*l1**3*l2**3*l3**2 - 48203366150.6477*l1**3*l2**3*l3 + 1314199278.04171*l1**3*l2**3 - 6.46272088641629e+21*l1**3*l2**2*l3**12 + 2.42596715774709e+19*l1**3*l2**2*l3**11 - 1.48170288469012e+20*l1**3*l2**2*l3**10 - 7.39722973033235e+17*l1**3*l2**2*l3**9 + 1.13933211799749e+18*l1**3*l2**2*l3**8 + 5.88326355649497e+15*l1**3*l2**2*l3**7 - 4.04270516219737e+15*l1**3*l2**2*l3**6 - 44535320090585.0*l1**3*l2**2*l3**5 + 3856920013592.62*l1**3*l2**2*l3**4 + 427665086906.862*l1**3*l2**2*l3**3 + 17260573912.5644*l1**3*l2**2*l3**2 - 1884858916.2057*l1**3*l2**2*l3 - 60617448.9257619*l1**3*l2**2 + 2.47709850495246e+21*l1**3*l2*l3**13 + 2.08060377765115e+19*l1**3*l2*l3**12 + 7.56987695683646e+19*l1**3*l2*l3**11 + 9.44526172899571e+15*l1**3*l2*l3**10 - 6.97755929479791e+17*l1**3*l2*l3**9 - 1.8304588974255e+15*l1**3*l2*l3**8 + 3.20834852579416e+15*l1**3*l2*l3**7 + 21191322044523.4*l1**3*l2*l3**6 - 7281883426742.83*l1**3*l2*l3**5 - 251579394440.996*l1**3*l2*l3**4 + 12346196583.449*l1**3*l2*l3**3 + 1927327403.49313*l1**3*l2*l3**2 + 80125822.192868*l1**3*l2*l3 + 615051.546052552*l1**3*l2 - 5.89144792485333e+20*l1**3*l3**14 - 1.90371033906543e+19*l1**3*l3**13 - 2.40147379718968e+19*l1**3*l3**12 + 2.24441862867227e+17*l1**3*l3**11 + 2.74554542888644e+17*l1**3*l3**10 - 1.13361365966511e+15*l1**3*l3**9 - 1.96193729665475e+15*l1**3*l3**8 + 3968784611440.35*l1**3*l3**7 + 7215366101653.57*l1**3*l3**6 + 9428695148.97546*l1**3*l3**5 - 29215366145.6796*l1**3*l3**4 - 494612904.1508*l1**3*l3**3 + 99623064.209686*l1**3*l3**2 + 1574729.82970559*l1**3*l3 + 21026.7041174036*l1**3 + 8.8598204126019e+19*l1**2*l2**15 - 5.58923494805647e+20*l1**2*l2**14*l3 + 2.51415601488458e+18*l1**2*l2**14 + 1.78779314561019e+21*l1**2*l2**13*l3**2 - 7.4367445609509e+18*l1**2*l2**13*l3 + 3.41956464544204e+18*l1**2*l2**13 - 3.91118294015283e+21*l1**2*l2**12*l3**3 - 4.58478912252798e+18*l1**2*l2**12*l3**2 - 1.66679335018383e+19*l1**2*l2**12*l3 - 2.54133650975704e+16*l1**2*l2**12 + 6.13113954060751e+21*l1**2*l2**11*l3**4 + 5.65336989439277e+19*l1**2*l2**11*l3**3 + 3.95753966937202e+19*l1**2*l2**11*l3**2 + 3.05802038156597e+16*l1**2*l2**11*l3 - 4.53741107897653e+16*l1**2*l2**11 - 7.15364717755233e+21*l1**2*l2**10*l3**5 - 1.73842929287665e+20*l1**2*l2**10*l3**4 - 6.36407724937916e+19*l1**2*l2**10*l3**3 + 2.89670279263015e+17*l1**2*l2**10*l3**2 + 1.9075186761065e+17*l1**2*l2**10*l3 + 2084630245164.77*l1**2*l2**10 + 5.55450539709285e+21*l1**2*l2**9*l3**6 + 3.17505671910501e+20*l1**2*l2**9*l3**5 + 6.9481671529642e+19*l1**2*l2**9*l3**4 - 1.00110422583069e+18*l1**2*l2**9*l3**3 - 3.77580702102793e+17*l1**2*l2**9*l3**2 + 474504363169889.0*l1**2*l2**9*l3 + 287604003199917.0*l1**2*l2**9 - 1.41552835125374e+21*l1**2*l2**8*l3**7 - 4.51132419446228e+20*l1**2*l2**8*l3**6 - 5.16309113502464e+19*l1**2*l2**8*l3**5 + 2.19882887527736e+18*l1**2*l2**8*l3**4 + 5.0297182394236e+17*l1**2*l2**8*l3**3 - 3.34813467605457e+15*l1**2*l2**8*l3**2 - 1.08743695900253e+15*l1**2*l2**8*l3 - 754735434432.273*l1**2*l2**8 - 4.03410399299399e+21*l1**2*l2**7*l3**8 + 5.023361076386e+20*l1**2*l2**7*l3**7 + 9.161654421155e+18*l1**2*l2**7*l3**6 - 3.15453038259887e+18*l1**2*l2**7*l3**5 - 3.92973932157565e+17*l1**2*l2**7*l3**4 + 7.10321516002498e+15*l1**2*l2**7*l3**3 + 1.40621169950284e+15*l1**2*l2**7*l3**2 + 12089320000244.0*l1**2*l2**7*l3 + 2003222158381.04*l1**2*l2**7 + 8.4080221137283e+21*l1**2*l2**6*l3**9 - 4.64020369971759e+20*l1**2*l2**6*l3**8 + 4.22948134800374e+19*l1**2*l2**6*l3**7 + 3.70777957456659e+18*l1**2*l2**6*l3**6 + 9.31907456085807e+16*l1**2*l2**6*l3**5 - 1.20128737154745e+16*l1**2*l2**6*l3**4 - 1.65051469581783e+15*l1**2*l2**6*l3**3 - 46409029160129.5*l1**2*l2**6*l3**2 - 5563963076872.62*l1**2*l2**6*l3 + 70173223998.2081*l1**2*l2**6 - 1.00952372643429e+22*l1**2*l2**5*l3**10 + 3.32595765242916e+20*l1**2*l2**5*l3**9 - 9.00086882733615e+19*l1**2*l2**5*l3**8 - 3.29929538414824e+18*l1**2*l2**5*l3**7 + 3.43394407164714e+17*l1**2*l2**5*l3**6 + 1.35720767458996e+16*l1**2*l2**5*l3**5 + 178310181293143.0*l1**2*l2**5*l3**4 + 63662060954199.6*l1**2*l2**5*l3**3 + 9756471504571.16*l1**2*l2**5*l3**2 - 286882481583.826*l1**2*l2**5*l3 - 31043256890.4924*l1**2*l2**5 + 8.78736746493228e+21*l1**2*l2**4*l3**11 - 1.71568585077278e+20*l1**2*l2**4*l3**10 + 1.13450012940929e+20*l1**2*l2**4*l3**9 + 2.28299457385257e+18*l1**2*l2**4*l3**8 - 6.91825167711178e+17*l1**2*l2**4*l3**7 - 1.25898970051488e+16*l1**2*l2**4*l3**6 + 1.10136560178071e+15*l1**2*l2**4*l3**5 - 52014062122686.5*l1**2*l2**4*l3**4 - 7616230929449.78*l1**2*l2**4*l3**3 + 660489716303.229*l1**2*l2**4*l3**2 + 72156857594.8582*l1**2*l2**4*l3 - 992013335.76788*l1**2*l2**4 - 5.7870494559181e+21*l1**2*l2**3*l3**12 + 4.27432308745916e+19*l1**2*l2**3*l3**11 - 1.03558702215447e+20*l1**2*l2**3*l3**10 - 9.99701464187773e+17*l1**2*l2**3*l3**9 + 8.13391834600801e+17*l1**2*l2**3*l3**8 + 7.86254025045913e+15*l1**2*l2**3*l3**7 - 2.67989999406956e+15*l1**2*l2**3*l3**6 + 19675639361383.6*l1**2*l2**3*l3**5 + 8149911771847.62*l1**2*l2**3*l3**4 - 540347615054.617*l1**2*l2**3*l3**3 - 64674750488.4053*l1**2*l2**3*l3**2 + 2200174642.60183*l1**2*l2**3*l3 + 158733420.257165*l1**2*l2**3 + 2.83553424742077e+21*l1**2*l2**2*l3**13 + 1.68951284200244e+19*l1**2*l2**2*l3**12 + 6.87139774019071e+19*l1**2*l2**2*l3**11 + 1.00016905140208e+17*l1**2*l2**2*l3**10 - 6.52867221671585e+17*l1**2*l2**2*l3**9 - 2.64497058740941e+15*l1**2*l2**2*l3**8 + 2.87934975875562e+15*l1**2*l2**2*l3**7 - 1855073626719.0*l1**2*l2**2*l3**6 - 6861869507748.0*l1**2*l2**2*l3**5 + 256672172637.109*l1**2*l2**2*l3**4 + 41715894807.676*l1**2*l2**2*l3**3 - 2564226795.67421*l1**2*l2**2*l3**2 - 305189640.308676*l1**2*l2**2*l3 - 1626258.94716978*l1**2*l2**2 - 9.45148990332984e+20*l1**2*l2*l3**14 - 1.94582782444298e+19*l1**2*l2*l3**13 - 3.02883362507622e+19*l1**2*l2*l3**12 + 1.59634274964315e+17*l1**2*l2*l3**11 + 3.39816326161211e+17*l1**2*l2*l3**10 - 125845525565032.0*l1**2*l2*l3**9 - 1.92329566888713e+15*l1**2*l2*l3**8 - 393038430501.222*l1**2*l2*l3**7 + 5893445864463.41*l1**2*l2*l3**6 - 31307116341.5979*l1**2*l2*l3**5 - 29106836316.1752*l1**2*l2*l3**4 + 606455115.149726*l1**2*l2*l3**3 + 181381501.808689*l1**2*l2*l3**2 - 1085821.56320537*l1**2*l2*l3 - 180200.990267163*l1**2*l2 + 1.84395096822029e+20*l1**2*l3**15 + 8.00146268121729e+18*l1**2*l3**14 + 7.81244696729771e+18*l1**2*l3**13 - 1.15610938737337e+17*l1**2*l3**12 - 1.05295307214746e+17*l1**2*l3**11 + 784736319739944.0*l1**2*l3**10 + 863745096065206.0*l1**2*l3**9 - 5310254139681.18*l1**2*l3**8 - 3886997707488.74*l1**2*l3**7 + 49584032550.4455*l1**2*l3**6 + 22921979285.7701*l1**2*l3**5 - 321455748.654558*l1**2*l3**4 - 162279392.271456*l1**2*l3**3 - 988122.356493276*l1**2*l3**2 + 106842.751322751*l1**2*l3 - 1541.7189888301*l1**2 - 2.11083682973108e+19*l1*l2**16 + 1.54314411155272e+20*l1*l2**15*l3 - 9.33461002449455e+17*l1*l2**15 - 5.83856781675153e+20*l1*l2**14*l3**2 + 4.37334215512231e+18*l1*l2**14*l3 - 7.96489324133938e+17*l1*l2**14 + 1.51467230941949e+21*l1*l2**13*l3**3 - 8.15875859599468e+18*l1*l2**13*l3**2 + 4.40305661618564e+18*l1*l2**13*l3 + 1.3700305379187e+16*l1*l2**13 - 2.95867003846141e+21*l1*l2**12*l3**4 + 5.05409824530644e+18*l1*l2**12*l3**3 - 1.21155900283756e+19*l1*l2**12*l3**2 - 4.90966575022059e+16*l1*l2**12*l3 + 1.26854004454816e+16*l1*l2**12 + 4.66621008986446e+21*l1*l2**11*l3**5 + 1.33933603500621e+19*l1*l2**11*l3**4 + 2.21760204582649e+19*l1*l2**11*l3**3 + 4.37661632591093e+16*l1*l2**11*l3**2 - 6.19324071652763e+16*l1*l2**11*l3 - 68373509854150.5*l1*l2**11 - 6.21148468646157e+21*l1*l2**10*l3**6 - 3.71837228047545e+19*l1*l2**10*l3**5 - 2.90592422021375e+19*l1*l2**10*l3**4 + 6.26566867171008e+16*l1*l2**10*l3**3 + 1.50338021925939e+17*l1*l2**10*l3**2 + 138495038270819.0*l1*l2**10*l3 - 100994918246892.0*l1*l2**10 + 7.32955941757761e+21*l1*l2**9*l3**7 + 4.51619778919883e+19*l1*l2**9*l3**6 + 2.99695143351165e+19*l1*l2**9*l3**5 - 2.67226092976292e+17*l1*l2**9*l3**4 - 2.48768087430132e+17*l1*l2**9*l3**3 + 287182914510911.0*l1*l2**9*l3**2 + 425177491909286.0*l1*l2**9*l3 - 1434552623970.16*l1*l2**9 - 7.90845279355185e+21*l1*l2**8*l3**8 - 1.98966439085635e+19*l1*l2**8*l3**7 - 2.73848607386717e+19*l1*l2**8*l3**6 + 3.23196282528807e+17*l1*l2**8*l3**5 + 3.06717492878908e+17*l1*l2**8*l3**4 - 914875120625754.0*l1*l2**8*l3**3 - 826290934484632.0*l1*l2**8*l3**2 - 128753968612.463*l1*l2**8*l3 - 946639430988.526*l1*l2**8 + 7.91170578708712e+21*l1*l2**7*l3**9 - 1.95717375927938e+19*l1*l2**7*l3**8 + 2.85564595132485e+19*l1*l2**7*l3**7 - 1.29054071148655e+17*l1*l2**7*l3**6 - 3.31973624319453e+17*l1*l2**7*l3**5 + 1.18859356783079e+15*l1*l2**7*l3**4 + 1.30427797838989e+15*l1*l2**7*l3**3 + 17239478744742.6*l1*l2**7*l3**2 + 3624355075617.64*l1*l2**7*l3 + 8652591670.72646*l1*l2**7 - 7.19202500049196e+21*l1*l2**6*l3**10 + 4.16602098220259e+19*l1*l2**6*l3**9 - 3.57443672193243e+19*l1*l2**6*l3**8 - 2.2051413026705e+17*l1*l2**6*l3**7 + 3.56305829735582e+17*l1*l2**6*l3**6 + 60164909172058.6*l1*l2**6*l3**5 - 1.52980945815876e+15*l1*l2**6*l3**4 - 43548206331222.8*l1*l2**6*l3**3 - 6865101399183.62*l1*l2**6*l3**2 + 76853486581.6489*l1*l2**6*l3 + 20693579155.3742*l1*l2**6 + 5.74080764535461e+21*l1*l2**5*l3**11 - 2.9061064910512e+19*l1*l2**5*l3**10 + 4.40980335857847e+19*l1*l2**5*l3**9 + 3.59808361409023e+17*l1*l2**5*l3**8 - 4.04984164265809e+17*l1*l2**5*l3**7 - 1.53980356766202e+15*l1*l2**5*l3**6 + 1.84647259064561e+15*l1*l2**5*l3**5 + 61518985029268.6*l1*l2**5*l3**4 + 6401935959375.99*l1*l2**5*l3**3 - 370778626174.71*l1*l2**5*l3**2 - 60928610467.8042*l1*l2**5*l3 + 171952602.51846*l1*l2**5 - 3.85628364648395e+21*l1*l2**4*l3**12 + 5.05409824530646e+17*l1*l2**4*l3**11 - 4.4878672095751e+19*l1*l2**4*l3**10 - 2.09479072009412e+17*l1*l2**4*l3**9 + 4.27723154026142e+17*l1*l2**4*l3**8 + 1.98211131866013e+15*l1*l2**4*l3**7 - 1.96645374504163e+15*l1*l2**4*l3**6 - 54544247019038.2*l1*l2**4*l3**5 - 3302416202211.46*l1*l2**4*l3**4 + 535132969370.184*l1*l2**4*l3**3 + 75968560307.6282*l1*l2**4*l3**2 - 1258832480.10725*l1*l2**4*l3 - 172168827.824041*l1*l2**4 + 2.08600962632379e+21*l1*l2**3*l3**13 + 1.78337466655813e+19*l1*l2**3*l3**12 + 3.51216770948467e+19*l1*l2**3*l3**11 - 4.40467155876933e+16*l1*l2**3*l3**10 - 3.71495041822788e+17*l1*l2**3*l3**9 - 866863664837686.0*l1*l2**3*l3**8 + 1.91963021170532e+15*l1*l2**3*l3**7 + 31769477044736.9*l1*l2**3*l3**6 - 1237400807082.58*l1*l2**3*l3**5 - 464526795084.523*l1*l2**3*l3**4 - 51710550168.8815*l1*l2**3*l3**3 + 2908963462.05862*l1*l2**3*l3**2 + 379381675.866554*l1*l2**3*l3 + 1225980.76270797*l1*l2**3 - 8.60501880919289e+20*l1*l2**2*l3**14 - 1.76893438585725e+19*l1*l2**2*l3**13 - 2.01439072699779e+19*l1*l2**2*l3**12 + 1.59914827292899e+17*l1*l2**2*l3**11 + 2.41770226460236e+17*l1*l2**2*l3**10 - 377595631376263.0*l1*l2**2*l3**9 - 1.4479040110255e+15*l1*l2**2*l3**8 - 9846290336521.62*l1*l2**2*l3**7 + 3158127658713.22*l1*l2**2*l3**6 + 226923677330.342*l1*l2**2*l3**5 + 13930326436.1535*l1*l2**2*l3**4 - 2396783272.00178*l1*l2**2*l3**3 - 256990338.739145*l1*l2**2*l3**2 + 2067760.42560295*l1*l2**2*l3 + 180123.399307597*l1*l2**2 + 2.44574393615916e+20*l1*l2*l3**15 + 8.5971242601284e+18*l1*l2*l3**14 + 7.70188094951057e+18*l1*l2*l3**13 - 1.11005204676416e+17*l1*l2*l3**12 - 1.05369490498696e+17*l1*l2*l3**11 + 616306463586011.0*l1*l2*l3**10 + 748940894477342.0*l1*l2*l3**9 - 289696429378.046*l1*l2*l3**8 - 2639150259438.36*l1*l2*l3**7 - 55635715457.2878*l1*l2*l3**6 + 7512826958.04286*l1*l2*l3**5 + 1081667254.52565*l1*l2*l3**4 + 48223804.926087*l1*l2*l3**3 - 981938.862397572*l1*l2*l3**2 + 32815.6247958719*l1*l2*l3 + 4899.06643151088*l1*l2 - 3.92506973553217e+19*l1*l3**16 - 2.19698556377606e+18*l1*l3**15 - 1.64816269917003e+18*l1*l3**14 + 3.78745643588445e+16*l1*l3**13 + 2.61499313362464e+16*l1*l3**12 - 324139333984678.0*l1*l3**11 - 246050374382384.0*l1*l3**10 + 2389101417586.85*l1*l3**9 + 1283635684383.1*l1*l3**8 - 21609029902.6519*l1*l3**7 - 8791387729.89844*l1*l3**6 + 91268872.5288609*l1*l3**5 + 69803778.438906*l1*l3**4 + 432020.120819756*l1*l3**3 - 137650.375596055*l1*l3**2 - 4319.94356261023*l1*l3 - 188.33721340388*l1 + 2.60279153923138e+18*l2**17 - 2.20804204650644e+19*l2**16*l3 + 1.58800110683735e+17*l2**16 + 9.6225634444943e+19*l2**15*l3**2 - 9.78157109380736e+17*l2**15*l3 + 8.86766463209881e+16*l2**15 - 2.80440888943225e+20*l2**14*l3**3 + 2.73076022539771e+18*l2**14*l3**2 - 5.52742382236163e+17*l2**14*l3 - 2.86230173329187e+15*l2**14 + 5.96637634921453e+20*l2**13*l3**4 - 3.74243941497691e+18*l2**13*l3**3 + 1.71458295139577e+18*l2**13*l3**2 + 1.40743751506324e+16*l2**13*l3 - 1.70974792906508e+15*l2**13 - 9.61092218035175e+20*l2**12*l3**5 - 1.17628119875882e+18*l2**12*l3**4 - 3.44931026116381e+18*l2**12*l3**3 - 2.60913665583151e+16*l2**12*l3**2 + 9.67340826923939e+15*l2**12*l3 + 19812845531238.1*l2**12 + 1.16421780471747e+21*l2**11*l3**6 + 1.77615452620769e+19*l2**11*l3**5 + 4.80361394411566e+18*l2**11*l3**4 - 187034885722670.0*l2**11*l3**3 - 2.72698745066079e+16*l2**11*l3**2 - 58688542142904.4*l2**11*l3 + 17106069172579.7*l2**11 - 9.8573752209868e+20*l2**10*l3**7 - 4.57877233890262e+19*l2**10*l3**6 - 4.58518352355736e+18*l2**10*l3**5 + 1.28189034802188e+17*l2**10*l3**4 + 4.97645438786711e+16*l2**10*l3**3 - 50544901610094.1*l2**10*l3**2 - 81955173572322.3*l2**10*l3 + 782500361874.289*l2**10 + 3.7763324934331e+20*l2**9*l3**8 + 7.39875287005931e+19*l2**9*l3**7 + 2.23884984531975e+18*l2**9*l3**6 - 3.67383274280792e+17*l2**9*l3**5 - 6.12165893529438e+16*l2**9*l3**4 + 601649091720577.0*l2**9*l3**3 + 188294188197688.0*l2**9*l3**2 - 2667880112784.29*l2**9*l3 + 228037018677.789*l2**9 + 4.37146806293326e+20*l2**8*l3**9 - 8.63148439233182e+19*l2**8*l3**8 + 1.62750429331359e+18*l2**8*l3**7 + 6.23597688360161e+17*l2**8*l3**6 + 4.65214975713687e+16*l2**8*l3**5 - 1.74364851611128e+15*l2**8*l3**4 - 283690324765982.0*l2**8*l3**3 + 2592867749491.8*l2**8*l3**2 - 1212819535397.16*l2**8*l3 - 16068869742.8372*l2**8 - 1.09136932147961e+21*l2**7*l3**10 + 7.42780533956465e+19*l2**7*l3**9 - 5.80672866175214e+18*l2**7*l3**8 - 7.31600315138993e+17*l2**7*l3**7 - 3.41097084777413e+15*l2**7*l3**6 + 2.95987967472782e+15*l2**7*l3**5 + 246322018449790.0*l2**7*l3**4 + 3006518109178.76*l2**7*l3**3 + 3134820032763.87*l2**7*l3**2 + 28715828695.213*l2**7*l3 - 6507275518.44981*l2**7 + 1.32089896436205e+21*l2**6*l3**11 - 4.52582464299941e+19*l2**6*l3**10 + 8.94103288306203e+18*l2**6*l3**9 + 6.11674214395339e+17*l2**6*l3**8 - 5.07521944141064e+16*l2**6*l3**7 - 3.42544906463728e+15*l2**6*l3**6 - 56524463161937.0*l2**6*l3**5 - 13226646722987.4*l2**6*l3**4 - 4934384721925.57*l2**6*l3**3 + 41322699658.0097*l2**6*l3**2 + 25851407707.0016*l2**6*l3 + 149108734.984082*l2**6 - 1.13012578303713e+21*l2**5*l3**12 + 1.57038052622021e+19*l2**5*l3**11 - 1.00013275772783e+19*l2**5*l3**10 - 3.40356733293863e+17*l2**5*l3**9 + 9.09278190566114e+16*l2**5*l3**8 + 2.68878325536657e+15*l2**5*l3**7 - 228348315503998.0*l2**5*l3**6 + 20585387797500.7*l2**5*l3**5 + 5505767546348.67*l2**5*l3**4 - 177195328017.412*l2**5*l3**3 - 46967010844.3257*l2**5*l3**2 + 36872112.8331382*l2**5*l3 + 79658467.270978*l2**5 + 7.32283707607754e+20*l2**4*l3**13 + 1.91634558467869e+18*l2**4*l3**12 + 8.67905801251099e+18*l2**4*l3**11 + 8.76492233217951e+16*l2**4*l3**10 - 9.90008727368801e+16*l2**4*l3**9 - 1.29684079840831e+15*l2**4*l3**8 + 425037768491585.0*l2**4*l3**7 - 19939641468078.1*l2**4*l3**6 - 4483930098828.43*l2**4*l3**5 + 271015655308.467*l2**4*l3**4 + 50666811781.0133*l2**4*l3**3 - 1029908785.64409*l2**4*l3**2 - 228882721.007331*l2**4*l3 - 413881.871472361*l2**4 - 3.61324680188507e+20*l2**3*l3**14 - 6.51015988264472e+18*l2**3*l3**13 - 5.79784916300181e+18*l2**3*l3**12 + 3.95578783303487e+16*l2**3*l3**11 + 7.6856427291642e+16*l2**3*l3**10 + 159861021916726.0*l2**3*l3**9 - 441338454212718.0*l2**3*l3**8 + 13361047795486.4*l2**3*l3**7 + 2939256390749.45*l2**3*l3**6 - 230637428684.938*l2**3*l3**5 - 35023742288.4978*l2**3*l3**4 + 1629259170.17708*l2**3*l3**3 + 264196866.22848*l2**3*l3**2 - 1163321.35587151*l2**3*l3 - 112813.687808914*l2**3 + 1.31594918641892e+20*l2**2*l3**15 + 4.41717872153568e+18*l2**2*l3**14 + 2.85984892719894e+18*l2**2*l3**13 - 5.39128058095651e+16*l2**2*l3**12 - 4.27768749063883e+16*l2**2*l3**11 + 269082654733491.0*l2**2*l3**10 + 303084335696793.0*l2**2*l3**9 - 6624052858878.1*l2**2*l3**8 - 1594286383827.02*l2**2*l3**7 + 122383821625.868*l2**2*l3**6 + 16571248176.4845*l2**2*l3**5 - 1242698932.03759*l2**2*l3**4 - 174413838.804562*l2**2*l3**3 + 1853811.52989164*l2**2*l3**2 + 149656.907701352*l2**2*l3 - 1299.11346266902*l2**2 - 3.24085711702602e+19*l2*l3**16 - 1.61593617366941e+18*l2*l3**15 - 9.46304272128959e+17*l2*l3**14 + 2.59978491154538e+16*l2*l3**13 + 1.57977118036534e+16*l2*l3**12 - 202604800144774.0*l2*l3**11 - 133875680792756.0*l2*l3**10 + 2452913691504.42*l2*l3**9 + 694123333980.559*l2*l3**8 - 41210453287.1886*l2*l3**7 - 6229858734.44413*l2*l3**6 + 491425635.652069*l2*l3**5 + 72367021.7178938*l2*l3**4 - 1670367.87171913*l2*l3**3 - 150777.963289568*l2*l3**2 + 4586.02704291593*l2*l3 + 507.586596119929*l2 + 4.33348004529175e+18*l3**17 + 3.01483836656672e+17*l3**16 + 1.70279178179676e+17*l3**15 - 6.04189479057758e+15*l3**14 - 3.19469780145799e+15*l3**13 + 62485758141886.8*l3**12 + 34116454409135.5*l3**11 - 696758501197.423*l3**10 - 211195334220.04*l3**9 + 11550151557.4949*l3**8 + 1991544090.79572*l3**7 - 162065674.315877*l3**6 - 27582761.5998717*l3**5 + 638548.823616222*l3**4 + 90977.6297167243*l3**3 - 2324.31746031746*l3**2 - 319.762962962963*l3 - 0.0770370370370377)
| 20,548.666667
| 123,153
| 0.680263
|
4a177136674952c3e75060f31ab8d2a9fbacfbad
| 12,249
|
py
|
Python
|
2021.12.21/Distilling Knowledge via Knowledge Review/code/Detection/model/rcnn.py
|
ToniChopp/MIRACLE-Paper-Sharing-Album
|
72a3843101483fc8b53df2746c488da066eda2a1
|
[
"MIT"
] | 7
|
2021-11-01T08:44:06.000Z
|
2022-01-10T09:42:34.000Z
|
2021.12.21/Distilling Knowledge via Knowledge Review/code/Detection/model/rcnn.py
|
ToniChopp/MIRACLE-Paper-Sharing-Album
|
72a3843101483fc8b53df2746c488da066eda2a1
|
[
"MIT"
] | null | null | null |
2021.12.21/Distilling Knowledge via Knowledge Review/code/Detection/model/rcnn.py
|
ToniChopp/MIRACLE-Paper-Sharing-Album
|
72a3843101483fc8b53df2746c488da066eda2a1
|
[
"MIT"
] | 1
|
2021-11-16T16:31:05.000Z
|
2021-11-16T16:31:05.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures import ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from detectron2.modeling.backbone import Backbone, build_backbone
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from .kd_trans import build_kd_trans, hcl
from .teacher import build_teacher
__all__ = ["ReviewKD", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class ReviewKD(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
teacher_pixel_mean: Tuple[float],
teacher_pixel_std: Tuple[float],
teacher: nn.Module,
kd_args,
input_format: Optional[str] = None,
teacher_input_format: Optional[str] = None,
vis_period: int = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.teacher = teacher
self.kd_args = kd_args
if self.kd_args.USE_REVIEWKD:
self.kd_trans = build_kd_trans(self.kd_args)
self.input_format = input_format
self.teacher_input_format = teacher_input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
self.register_buffer("teacher_pixel_mean", torch.tensor(teacher_pixel_mean).view(-1, 1, 1), False)
self.register_buffer("teacher_pixel_std", torch.tensor(teacher_pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"kd_args": cfg.KD,
"teacher": build_teacher(cfg),
"teacher_input_format": cfg.TEACHER.INPUT.FORMAT,
"teacher_pixel_mean": cfg.TEACHER.MODEL.PIXEL_MEAN,
"teacher_pixel_std": cfg.TEACHER.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
losses = {}
if self.kd_args.USE_REVIEWKD:
teacher_images = self.teacher_preprocess_image(batched_inputs)
t_features = self.teacher.backbone(teacher_images.tensor)
t_features = [t_features[f] for f in t_features]
s_features = [features[f] for f in features]
s_features = self.kd_trans(s_features)
losses['loss_ReviewKD'] = hcl(s_features, t_features) * self.kd_args.REVIEWKD_LOSS_WEIGHT
if self.proposal_generator is not None:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self,
batched_inputs: Tuple[Dict[str, torch.Tensor]],
detected_instances: Optional[List[Instances]] = None,
do_postprocess: bool = True,
):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
When do_postprocess=True, same as in :meth:`forward`.
Otherwise, a list[Instances] containing raw network outputs.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator is not None:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
return ReviewKD._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def preprocess_image(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
def teacher_preprocess_image(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.teacher_pixel_mean) / self.teacher_pixel_std for x in images]
if self.input_format != self.teacher_input_format:
images = [x.index_select(0,torch.LongTensor([2,1,0]).to(self.device)) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
@staticmethod
def _postprocess(instances, batched_inputs: Tuple[Dict[str, torch.Tensor]], image_sizes):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results = []
for results_per_image, input_per_image, image_size in zip(
instances, batched_inputs, image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
| 42.679443
| 106
| 0.645359
|
4a17729041d683120c27a0d1d3218d0fc361b23d
| 280
|
py
|
Python
|
app/secret_key.py
|
chick0/dashboard
|
b29034ef0a770bf9e1aefaf0479a6bc93a82cc3a
|
[
"MIT"
] | 1
|
2021-07-27T07:43:20.000Z
|
2021-07-27T07:43:20.000Z
|
app/secret_key.py
|
chick0/upload
|
5f63590706d9a5083cbb2a42a1e5e386e9590424
|
[
"MIT"
] | null | null | null |
app/secret_key.py
|
chick0/upload
|
5f63590706d9a5083cbb2a42a1e5e386e9590424
|
[
"MIT"
] | null | null | null |
from secrets import token_bytes
SECRET_KEY = token_bytes(32)
try:
with open(".SECRET_KEY", mode="rb") as key_reader:
SECRET_KEY = key_reader.read()
except FileNotFoundError:
with open(".SECRET_KEY", mode="wb") as key_writer:
key_writer.write(SECRET_KEY)
| 25.454545
| 54
| 0.707143
|
4a17740fedd63df644d1055ddc218fe74606d434
| 7,511
|
py
|
Python
|
lib/klepto/tests/test_cache.py
|
JustinDeOcampo/Alfred-JapaneseTranslator
|
b8614c718384b2303813fdabb580d4ffd8e3f5c6
|
[
"MIT"
] | 2
|
2020-07-13T00:04:23.000Z
|
2021-02-05T17:07:39.000Z
|
lib/klepto/tests/test_cache.py
|
JustinDeOcampo/Alfred-JapaneseTranslator
|
b8614c718384b2303813fdabb580d4ffd8e3f5c6
|
[
"MIT"
] | null | null | null |
lib/klepto/tests/test_cache.py
|
JustinDeOcampo/Alfred-JapaneseTranslator
|
b8614c718384b2303813fdabb580d4ffd8e3f5c6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2013-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/klepto/blob/master/LICENSE
"""
The decorator should produce the behavior as displayed in the following:
>>> s = Spam()
>>> s.eggs()
new: (), {}
42
>>> s.eggs()
42
>>> s.eggs(1)
new: (1,), {}
64
>>> s.eggs(1)
64
>>> s.eggs(1, bar='spam')
new: (1,), {'bar': 'spam'}
78
>>> s2 = Spam()
>>> s2.eggs(1, bar='spam')
78
"""
from klepto.safe import inf_cache as memoized
#from klepto import inf_cache as memoized
from klepto.keymaps import picklemap
dumps = picklemap(flat=False, serializer='dill')
class Spam(object):
"""A simple class with a memoized method"""
@memoized(keymap=dumps, ignore='self')
def eggs(self, *args, **kwds):
#print ('new:', args, kwds)
from random import random
return int(100 * random())
def test_classmethod():
s = Spam()
assert s.eggs() == s.eggs()
assert s.eggs(1) == s.eggs(1)
s2 = Spam()
assert s.eggs(1, bar='spam') == s2.eggs(1, bar='spam')
assert s.eggs.info().hit == 3
assert s.eggs.info().miss == 3
assert s.eggs.info().load == 0
#print ('=' * 30)
# here caching saves time in a recursive function...
@memoized(keymap=dumps)
def fibonacci(n):
"Return the nth fibonacci number."
#print ('calculating %s' % n)
if n in (0, 1):
return n
return fibonacci(n-1) + fibonacci(n-2)
def test_recursive():
fibonacci(7)
fibonacci(9)
fibonacci(3)
assert fibonacci.info().hit == 9
assert fibonacci.info().miss == 10
assert fibonacci.info().load == 0
#print ('=' * 30)
def test_basic():
try:
from numpy import sum, asarray
@memoized(keymap=dumps, tol=3)
def add(*args):
#print ('new:', args)
return sum(args)
assert add(1,2,3.0001) == 6.0000999999999998
assert add(1,2,3.00012) == 6.0000999999999998
assert add(1,2,3.0234) == 6.0234000000000005
assert add(1,2,3.023) == 6.0234000000000005
assert add.info().hit == 2
assert add.info().miss == 2
assert add.info().load == 0
def cost(x,y):
#print ('new: %s or %s' % (str(x), str(y)))
x = asarray(x)
y = asarray(y)
return sum(x**2 - y**2)
cost1 = memoized(keymap=dumps, tol=1)(cost)
cost0 = memoized(keymap=dumps, tol=0)(cost)
costD = memoized(keymap=dumps, tol=0, deep=True)(cost)
#print ("rounding to one decimals...")
cost1([1,2,3.1234], 3.9876)# == -32.94723372
cost1([1,2,3.1234], 3.9876)# == -32.94723372
cost1([1,2,3.1234], 3.6789)# == -25.84728807
cost1([1,2,3.4321], 3.6789)# == -23.82360522
assert cost1.info().hit == 1
assert cost1.info().miss == 3
assert cost1.info().load == 0
#print ("\nrerun the above with rounding to zero decimals...")
cost0([1,2,3.1234], 3.9876)# == -32.94723372
cost0([1,2,3.1234], 3.9876)# == -32.94723372
cost0([1,2,3.1234], 3.6789)# == -32.94723372
cost0([1,2,3.4321], 3.6789)# == -23.82360522
assert cost0.info().hit == 2
assert cost0.info().miss == 2
assert cost0.info().load == 0
#print ("\nrerun again with deep rounding to zero decimals...")
costD([1,2,3.1234], 3.9876)# == -32.94723372
costD([1,2,3.1234], 3.9876)# == -32.94723372
costD([1,2,3.1234], 3.6789)# == -32.94723372
costD([1,2,3.4321], 3.6789)# == -32.94723372
assert costD.info().hit == 3
assert costD.info().miss == 1
assert costD.info().load == 0
#print ("")
except ImportError:
pass
import sys
import dill
from klepto.archives import cache, sql_archive, dict_archive
def test_memoized():
@memoized(cache=sql_archive())
def add(x,y):
return x+y
add(1,2)
add(1,2)
add(1,3)
#print ("sql_cache = %s" % add.__cache__())
_key4 = '((), '+str({'y':3, 'x':1})+')'
_key3 = '((), '+str({'y':2, 'x':1})+')'
key4_ = '((), '+str({'x':1, 'y':3})+')'
key3_ = '((), '+str({'x':1, 'y':2})+')'
assert add.__cache__() == {_key4: 4, _key3: 3} or {key4_: 4, key3_: 3}
@memoized(cache=dict_archive(cached=False)) # use archive backend 'direcly'
def add(x,y):
return x+y
add(1,2)
add(1,2)
add(1,3)
#print ("dict_cache = %s" % add.__cache__())
assert add.__cache__() == {_key4: 4, _key3: 3} or {key4_: 4, key3_: 3}
@memoized(cache=dict())
def add(x,y):
return x+y
add(1,2)
add(1,2)
add(1,3)
#print ("dict_cache = %s" % add.__cache__())
assert add.__cache__() == {_key4: 4, _key3: 3} or {key4_: 4, key3_: 3}
@memoized(cache=add.__cache__())
def add(x,y):
return x+y
add(1,2)
add(2,2)
#print ("re_dict_cache = %s" % add.__cache__())
_key2 = '((), '+str({'y':2, 'x':2})+')'
key2_ = '((), '+str({'x':2, 'y':2})+')'
assert add.__cache__() == {_key4: 4, _key3: 3, _key2: 4} or {key4_: 4, key3_: 3, key2_: 4}
@memoized(keymap=dumps)
def add(x,y):
return x+y
add(1,2)
add(1,2)
add(1,3)
#print ("pickle_dict_cache = %s" % add.__cache__())
_pkey4 = dill.dumps(eval(_key4))
_pkey3 = dill.dumps(eval(_key3))
pkey4_ = dill.dumps(eval(key4_))
pkey3_ = dill.dumps(eval(key3_))
assert add.__cache__() == {_pkey4: 4, _pkey3: 3} or {pkey4_: 4, pkey3_: 3}
from klepto import lru_cache
def test_lru():
@lru_cache(maxsize=3, cache=dict_archive('test'), purge=True)
def identity(x):
return x
identity(1)
identity(2)
identity(3)
ic = identity.__cache__()
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 0
identity(4)
assert len(ic.keys()) == 0
assert len(ic.archive.keys()) == 4
identity(5)
assert len(ic.keys()) == 1
assert len(ic.archive.keys()) == 4
@lru_cache(maxsize=3, cache=dict_archive('test'), purge=False)
def inverse(x):
return -x
inverse(1)
inverse(2)
inverse(3)
ic = inverse.__cache__()
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 0
inverse(4)
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 1
inverse(5)
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 2
@lru_cache(maxsize=3, cache=dict_archive('test', cached=False))
def foo(x):
return x
foo(1)
foo(2)
foo(3)
ic = foo.__cache__()
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 3
foo(4)
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 3
foo(5)
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 3
#XXX: should it be 'correct' expected behavior to ignore purge?
@lru_cache(maxsize=3, cache=None, purge=True)
def bar(x):
return -x
bar(1)
bar(2)
bar(3)
ic = bar.__cache__()
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 0
bar(4)
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 0
bar(5)
assert len(ic.keys()) == 3
assert len(ic.archive.keys()) == 0
if __name__ == '__main__':
test_classmethod()
test_recursive()
test_basic()
test_memoized()
test_lru()
| 27.715867
| 94
| 0.565704
|
4a17748670725abe834a8ca74cb5aba8bdf51d7d
| 16,960
|
py
|
Python
|
tests/i18n_tests.py
|
euku/spbot
|
e6d505c8965b4e6730b3dc4505f92e35a3edb2e2
|
[
"MIT"
] | 1
|
2022-02-10T00:20:42.000Z
|
2022-02-10T00:20:42.000Z
|
tests/i18n_tests.py
|
euku/spbot
|
e6d505c8965b4e6730b3dc4505f92e35a3edb2e2
|
[
"MIT"
] | null | null | null |
tests/i18n_tests.py
|
euku/spbot
|
e6d505c8965b4e6730b3dc4505f92e35a3edb2e2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""Test i18n module."""
#
# (C) Pywikibot team, 2007-2022
#
# Distributed under the terms of the MIT license.
#
from contextlib import suppress
import pywikibot
from pywikibot import bot, config, i18n
from pywikibot.exceptions import TranslationError
from tests.aspects import DefaultSiteTestCase, PwbTestCase, TestCase, unittest
class Site:
"""An object holding code and family, duck typing a pywikibot Site."""
class Family:
"""Nested class to hold the family name attribute."""
def __init__(self, code, family='wikipedia'):
"""Initializer."""
self.code = code
self.family = self.Family()
self.family.name = family
def __repr__(self):
return "'{site.family.name}:{site.code}'".format(site=self)
class TestTranslate(TestCase):
"""Test translate method with fallback True."""
net = False
xdict = {
'en': 'test-localized EN',
'commons': 'test-localized COMMONS',
'wikipedia': {
'nl': 'test-localized WP-NL',
'fy': 'test-localized WP-FY',
'wikipedia': { # test a deeply nested xdict
'de': 'test-localized WP-DE',
},
},
'wikisource': {
'en': 'test-localized WS-EN',
'fy': 'test-localized WS-FY',
'ja': 'test-localized WS-JA',
},
}
def test_translate_commons(self):
"""Test localization with xdict for commons.
Test whether the localzation is found either with the Site object
or with the site code.
"""
site = Site('commons')
for code in (site, 'commons'):
with self.subTest(code=code):
self.assertEqual(i18n.translate(code, self.xdict),
'test-localized COMMONS')
def test_translate_de(self):
"""Test localization fallbacks for 'de' with xdict.
'de' key is defined in a nested 'wikipedia' sub dict. This should
always fall back to this nested 'wikipedia' entry.
"""
site1 = Site('de', 'wikipedia')
site2 = Site('de', 'wikibooks')
site3 = Site('de', 'wikisource')
for code in (site1, site2, site3, 'de'):
with self.subTest(code=code):
self.assertEqual(i18n.translate(code, self.xdict),
'test-localized WP-DE')
def test_translate_en(self):
"""Test localization fallbacks for 'en' with xdict.
'en' key is defined directly in xdict. This topmost key goes over
site specific key. Therefore 'test-localized WS-EN' is not given
back.
"""
site1 = Site('en', 'wikipedia')
site2 = Site('en', 'wikibooks')
site3 = Site('en', 'wikisource')
for code in (site1, site2, site3, 'en'):
with self.subTest(code=code):
self.assertEqual(i18n.translate(code, self.xdict),
'test-localized EN')
def test_translate_fy(self):
"""Test localization fallbacks for 'fy' with xdict.
'fy' key is defined in 'wikipedia' and 'wikisource' sub dicts.
They should have different localizations for these two families but
'wikisource' should have a fallback to the 'wikipedia' entry.
Note: If the translate code is given as string, the result depends
on the current config.family entry. Therefore there is no test with
the code given as string.
"""
site1 = Site('fy', 'wikipedia')
site2 = Site('fy', 'wikibooks')
site3 = Site('fy', 'wikisource')
for code in (site1, site2):
with self.subTest(code=code):
self.assertEqual(i18n.translate(code, self.xdict),
'test-localized WP-FY')
self.assertEqual(i18n.translate(site3, self.xdict),
'test-localized WS-FY')
def test_translate_nl(self):
"""Test localization fallbacks for 'nl' with xdict.
'nl' key is defined in 'wikipedia' sub dict. Therefore all
localizations have a fallback to the 'wikipedia' entry.
"""
site1 = Site('nl', 'wikipedia')
site2 = Site('nl', 'wikibooks')
site3 = Site('nl', 'wikisource')
for code in (site1, site2, site3, 'nl'):
with self.subTest(code=code):
self.assertEqual(i18n.translate(code, self.xdict),
'test-localized WP-NL')
def test_translate_ja(self):
"""Test localization fallbacks for 'ja' with xdict.
'ja' key is defined in 'wkisource' sub dict only. Therefore there
is no fallback to the 'wikipedia' entry and the localization result
is None.
"""
site1 = Site('ja', 'wikipedia')
site2 = Site('ja', 'wikibooks')
site3 = Site('ja', 'wikisource')
for code in (site1, site2):
with self.subTest(code=code):
self.assertIsNone(i18n.translate(code, self.xdict))
self.assertEqual(i18n.translate(site3, self.xdict),
'test-localized WS-JA')
class TestFallbackTranslate(TestCase):
"""Test translate method with fallback True."""
net = False
msg_localized = {'en': 'test-localized EN',
'nl': 'test-localized NL',
'fy': 'test-localized FY'}
msg_semi_localized = {'en': 'test-semi-localized EN',
'nl': 'test-semi-localized NL'}
msg_non_localized = {'en': 'test-non-localized EN'}
msg_no_english = {'ja': 'test-no-english JA'}
def test_localized(self):
"""Test fully localized translations."""
for code, msg in self.msg_localized.items():
with self.subTest(code=code):
self.assertEqual(i18n.translate(code, self.msg_localized,
fallback=True),
msg)
def test_semi_localized(self):
"""Test translate by fallback to an alternative language."""
self.assertEqual(i18n.translate('en', self.msg_semi_localized,
fallback=True),
'test-semi-localized EN')
for code in ('nl', 'fy'):
with self.subTest(code=code):
self.assertEqual(i18n.translate(code, self.msg_semi_localized,
fallback=True),
'test-semi-localized NL')
def test_non_localized(self):
"""Test translate with missing localisation."""
for code in ('en', 'fy', 'nl', 'ru'):
with self.subTest(code=code):
self.assertEqual(i18n.translate(code, self.msg_non_localized,
fallback=True),
'test-non-localized EN')
def testNoEnglish(self):
"""Test translate with missing English text."""
for code in ('en', 'fy', 'nl'):
with self.subTest(code=code):
with self.assertRaises(KeyError):
i18n.translate(code, self.msg_no_english, fallback=True)
class UserInterfaceLangTestCase(TestCase):
"""Base class for tests using config.userinterface_lang."""
def setUp(self):
"""Change the userinterface language to the site's code."""
super().setUp()
self.orig_userinterface_lang = pywikibot.config.userinterface_lang
pywikibot.config.userinterface_lang = self.get_site().code
def tearDown(self):
"""Reset the userinterface language."""
pywikibot.config.userinterface_lang = self.orig_userinterface_lang
super().tearDown()
class TWNSetMessagePackageBase(TestCase):
"""Partial base class for TranslateWiki tests."""
message_package = None
def setUp(self):
"""Load the test translations."""
self.orig_messages_package_name = i18n._messages_package_name
i18n.set_messages_package(self.message_package)
super().setUp()
def tearDown(self):
"""Load the original translations back."""
super().tearDown()
i18n.set_messages_package(self.orig_messages_package_name)
class TWNTestCaseBase(TWNSetMessagePackageBase):
"""Base class for TranslateWiki tests."""
@classmethod
def setUpClass(cls):
"""Verify that the test translations are not empty."""
if not isinstance(cls.message_package, str):
raise TypeError('{}.message_package must be a package name'
.format(cls.__name__))
# The call to set_messages_package below exists only to confirm
# that the package exists and messages are available, so
# that tests can be skipped if the i18n data doesn't exist.
cls.orig_messages_package_name = i18n._messages_package_name
i18n.set_messages_package(cls.message_package)
has_messages = i18n.messages_available()
i18n._messages_package_name = cls.orig_messages_package_name
if not has_messages:
raise unittest.SkipTest("i18n messages package '{}' not available."
.format(cls.message_package))
super().setUpClass()
class TestTWTranslate(TWNTestCaseBase):
"""Test twtranslate method."""
net = False
message_package = 'tests.i18n'
def testLocalized(self):
"""Test fully localized entry."""
self.assertEqual(i18n.twtranslate('en', 'test-localized'),
'test-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-localized'),
'test-localized NL')
self.assertEqual(i18n.twtranslate('fy', 'test-localized'),
'test-localized FY')
def testSemiLocalized(self):
"""Test translating with fallback to alternative language."""
self.assertEqual(i18n.twtranslate('en', 'test-semi-localized'),
'test-semi-localized EN')
for code in ('nl', 'fy'):
with self.subTest(code=code):
self.assertEqual(i18n.twtranslate(code, 'test-semi-localized'),
'test-semi-localized NL')
def testNonLocalized(self):
"""Test translating non localized entries."""
for code in ('en', 'fy', 'nl', 'ru'):
with self.subTest(code=code):
self.assertEqual(i18n.twtranslate(code, 'test-non-localized'),
'test-non-localized EN')
def testNoEnglish(self):
"""Test translating into English with missing entry."""
with self.assertRaises(TranslationError):
i18n.twtranslate('en', 'test-no-english')
class InputTestCase(TWNTestCaseBase, UserInterfaceLangTestCase, PwbTestCase):
"""Test i18n.input."""
family = 'wikipedia'
code = 'nn'
alt_code = 'nb'
message_package = 'scripts.i18n'
message = 'pywikibot-enter-category-name'
@classmethod
def setUpClass(cls):
"""Verify that a translation does not yet exist."""
super().setUpClass()
if cls.code in i18n.twget_keys(cls.message):
raise unittest.SkipTest(
'{} has a translation for {}'
.format(cls.code, cls.message))
def test_pagegen_i18n_input(self):
"""Test i18n.input fallback via pwb."""
expect = i18n.twtranslate(self.alt_code, self.message, fallback=False)
result = self._execute(args=['listpages', '-cat'],
data_in='non-existant-category\r\n')
self.assertIn(expect, result['stderr'])
class MissingPackageTestCase(TWNSetMessagePackageBase,
UserInterfaceLangTestCase,
DefaultSiteTestCase):
"""Test missing messages package."""
message_package = 'scripts.foobar.i18n'
def _capture_output(self, text, *args, **kwargs):
self.output_text = text
def setUp(self):
"""Patch the output and input methods."""
super().setUp()
bot.set_interface('terminal')
self.output_text = ''
self.orig_raw_input = bot.ui._raw_input
self.orig_output = bot.ui.stream_output
bot.ui._raw_input = lambda *args, **kwargs: 'dummy input'
bot.ui.stream_output = self._capture_output
self.old_cc_setting = config.cosmetic_changes_mylang_only
def tearDown(self):
"""Restore the output and input methods."""
config.cosmetic_changes_mylang_only = self.old_cc_setting
bot.ui._raw_input = self.orig_raw_input
bot.ui.output = self.orig_output
bot.set_interface('buffer')
super().tearDown()
def test_i18n_input(self):
"""Test i18n.input falls back with missing message package."""
rv = i18n.input('pywikibot-enter-category-name',
fallback_prompt='dummy output')
self.assertEqual(rv, 'dummy input')
self.assertIn('dummy output: ', self.output_text)
def test_i18n_twtranslate(self):
"""Test i18n.twtranslate falls back with missing message package."""
rv = i18n.twtranslate(self.site, 'pywikibot-enter-category-name',
fallback_prompt='dummy message')
self.assertEqual(rv, 'dummy message')
def test_cosmetic_changes_hook(self):
"""Test summary result of Page._cosmetic_changes_hook."""
page = pywikibot.Page(self.site, 'Test')
page.text = 'Some content with spaces.'
# check cc settings
config.cosmetic_changes_mylang_only = False
self.assertFalse(page.isTalkPage())
self.assertNotIn(pywikibot.calledModuleName(),
config.cosmetic_changes_deny_script)
self.assertFalse(config.cosmetic_changes_mylang_only)
if page.content_model != 'wikitext':
self.skipTest('Wrong content model {!r} for cosmetic_changes'
.format(page.content_model))
summary = 'Working on Test page at site {}'.format(self.site)
msg = page._cosmetic_changes_hook(summary)
self.assertEqual(msg, summary + '; cosmetic changes')
class TestExtractPlural(TestCase):
"""Test extracting plurals from a dummy string."""
net = False
def test_standard(self):
"""Test default usage using a dict and no specific plurals."""
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo|one|other}}',
{'foo': 42}),
'other')
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo|one|other}}',
{'foo': 1}),
'one')
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo|one|other}}',
{'foo': 0}),
'other')
def test_empty_fields(self):
"""Test default usage using a dict and no specific plurals."""
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo||other}}', {'foo': 42}),
'other')
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo||other}}', {'foo': 1}),
'')
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo|one|}}', {'foo': 1}),
'one')
# two variants expected but only one given
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo|one}}', {'foo': 0}),
'one')
def test_specific(self):
"""Test using a specific plural."""
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo|one|other|12=dozen}}',
{'foo': 42}),
'other')
self.assertEqual(
i18n._extract_plural('en', '{{PLURAL:foo|one|other|12=dozen}}',
{'foo': 12}),
'dozen')
def test_more(self):
"""Test the number of plurals are more than expected."""
test = [(0, 2), (1, 0), (2, 1), (3, 2), (4, 2), (7, 2), (8, 3)]
for num, result in test:
self.assertEqual(
i18n._extract_plural(
'cy',
'{{PLURAL:num|0|1|2|3|4|5}}',
{'num': num}),
str(result))
def test_less(self):
"""Test the number of plurals are less than expected."""
test = [(0, 2), (1, 0), (2, 1), (3, 2), (4, 2), (7, 2), (8, 3)]
for num, result in test:
self.assertEqual(
i18n._extract_plural(
'cy',
'{{PLURAL:num|0|1}}',
{'num': num}),
str(min(result, 1)))
if __name__ == '__main__': # pragma: no cover
with suppress(SystemExit):
unittest.main()
| 36.789588
| 79
| 0.573349
|
4a1775ee2af3a89617af0407b435708d60637350
| 992
|
py
|
Python
|
Python/Swap Variables/swap_vars.py
|
dipakpawar152000/programming
|
f343857d413c7dcce876c7720c0ffc4e44b63a48
|
[
"Apache-2.0"
] | 33
|
2019-10-20T15:28:26.000Z
|
2021-12-17T22:34:22.000Z
|
Python/Swap Variables/swap_vars.py
|
dipakpawar152000/programming
|
f343857d413c7dcce876c7720c0ffc4e44b63a48
|
[
"Apache-2.0"
] | 111
|
2019-05-10T18:52:55.000Z
|
2022-02-04T08:53:42.000Z
|
Python/Swap Variables/swap_vars.py
|
dipakpawar152000/programming
|
f343857d413c7dcce876c7720c0ffc4e44b63a48
|
[
"Apache-2.0"
] | 141
|
2019-10-20T15:00:02.000Z
|
2021-03-23T05:51:12.000Z
|
#!/usr/bin/python
# swap two variables using temporary variable
def swap_temp(a, b):
print("-" * 32)
print("Swap with temporary varaiable")
print("-" * 32)
print("Before a: {} b: {}".format(a, b))
tmp = b
b = a
a = tmp
print("After a: {} b: {}".format(a, b))
# swap two variables using xor and no temporary variable
def swap_xor(a, b):
print("-" * 32)
print("Swap using xor")
print("-" * 32)
print("Before a: {} b: {}".format(a, b))
a ^= b
b ^= a
a ^= b
print("After a: {} b: {}".format(a, b))
# swap two variables using idiomatic python
def swap_python(a, b):
print("-" * 32)
print("Swap using python a, b = b, a")
print("-" * 32)
print("Before a: {} b: {}".format(a, b))
a, b = b, a
print("After a: {} b: {}".format(a, b))
def main():
a = 1
b = 2
swap_temp(a, b)
print("")
swap_xor(a, b)
print("")
swap_python(a, b)
if __name__ == "__main__":
exit(main())
| 21.106383
| 56
| 0.517137
|
4a17762ca734ab47084db0c6376aeceab450c119
| 1,299
|
py
|
Python
|
board/board.py
|
dwaffe/connect4deepLearning
|
167c26a3f4f7040ca714c4bdb1c14bfb51c64d9e
|
[
"MIT"
] | null | null | null |
board/board.py
|
dwaffe/connect4deepLearning
|
167c26a3f4f7040ca714c4bdb1c14bfb51c64d9e
|
[
"MIT"
] | null | null | null |
board/board.py
|
dwaffe/connect4deepLearning
|
167c26a3f4f7040ca714c4bdb1c14bfb51c64d9e
|
[
"MIT"
] | null | null | null |
from board.piece import Piece
from board.field import Field
import numpy as np
class Board:
def __init__(self, rows: int, columns: int) -> None:
self._rows = rows
self._columns = columns
self._board = [[Field() for i in range(columns)] for j in range(rows)]
self._move_counter = 0
def print(self):
print(self.__str__())
def __str__(self) -> str:
string_board = ''
for column in self._board:
string_board += '\n'
for field in column:
string_board += '[' + str(field) + ']'
return string_board
def put_piece(self, column_index: int, row_index: int, piece: Piece):
self._board[row_index][column_index].put_piece(piece)
self._move_counter += 1
def is_empty(self, column_index: int, row_index: int) -> bool:
return self._board[row_index][column_index].get_piece() is None
def get_array(self, sign: str) -> np.array:
return np.array([[int(field.is_sign(sign)) for field in row] for row in self._board], dtype=np.float32)
def get_array_by_signs(self, signs: list) -> np.array:
return np.stack((self.get_array(sign) for sign in signs))
def get_move_counter(self) -> int:
return self._move_counter
| 27.0625
| 111
| 0.622787
|
4a1776c2cda3b51aaa3036353fafab6e04c95268
| 908
|
py
|
Python
|
tests/data/test_data_transforms_tensor.py
|
wenliangzhao2018/d2go
|
a9dce74e5caf4c2260371a1abb603e3d5f14d763
|
[
"Apache-2.0"
] | 687
|
2021-03-03T07:50:15.000Z
|
2022-03-25T19:31:57.000Z
|
tests/data/test_data_transforms_tensor.py
|
wenliangzhao2018/d2go
|
a9dce74e5caf4c2260371a1abb603e3d5f14d763
|
[
"Apache-2.0"
] | 193
|
2021-03-03T17:15:57.000Z
|
2022-03-31T03:13:47.000Z
|
tests/data/test_data_transforms_tensor.py
|
wenliangzhao2018/d2go
|
a9dce74e5caf4c2260371a1abb603e3d5f14d763
|
[
"Apache-2.0"
] | 90
|
2021-03-03T16:08:36.000Z
|
2022-03-30T23:42:19.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import numpy as np
import torch
from d2go.data.transforms import tensor as tensor_aug
from detectron2.data.transforms.augmentation import AugmentationList
class TestDataTransformsTensor(unittest.TestCase):
def test_tensor_aug(self):
"""Data augmentation that that allows torch.Tensor as input"""
img = torch.ones(3, 8, 6)
augs = [tensor_aug.Tensor2Array(), tensor_aug.Array2Tensor()]
inputs = tensor_aug.AugInput(image=img)
transforms = AugmentationList(augs)(inputs)
self.assertArrayEqual(img, inputs.image)
# inverse is the same as itself
out_img = transforms.inverse().apply_image(img)
self.assertArrayEqual(img, out_img)
def assertArrayEqual(self, a1, a2):
self.assertTrue(np.array_equal(a1, a2))
| 30.266667
| 70
| 0.713656
|
4a177824e9266a09f6c8ad85eb48c70ae2e1b15d
| 5,054
|
py
|
Python
|
pyxley/charts/mg/axes.py
|
snowind/pyxley
|
cff9e50b8d80b9794c6907355e541f166959cd6c
|
[
"MIT"
] | 2,536
|
2015-06-26T20:12:30.000Z
|
2022-03-01T07:26:44.000Z
|
pyxley/charts/mg/axes.py
|
zhiaozhou/pyxley
|
2dab00022d977d986169cd8a629b3a2f91be893f
|
[
"MIT"
] | 51
|
2015-07-17T14:16:43.000Z
|
2021-07-09T21:34:36.000Z
|
pyxley/charts/mg/axes.py
|
zhiaozhou/pyxley
|
2dab00022d977d986169cd8a629b3a2f91be893f
|
[
"MIT"
] | 335
|
2015-07-16T20:22:00.000Z
|
2022-02-25T07:18:15.000Z
|
from .mg import OptionHelper
class Axes(OptionHelper):
"""Axes object for metricgraphics.
This class is used to specify axes options for the metricsgraphics api.
https://github.com/mozilla/metrics-graphics/wiki/Axes
"""
_allowed_axes = [
"inflator",
"max_x", "max_y",
"min_x", "min_y",
"min_y_from_data",
"missing_text",
"show_missing_background",
"show_year_markers",
"show_secondary_x_label",
"small_text",
"xax_count",
"xax_format",
"x_axis",
"x_extended_ticks",
"x_label",
"xax_start_at_min",
"xax_units",
"xax_tick_length",
"y_axis",
"y_extended_ticks",
"y_label",
"y_scale_type",
"yax_count",
"yax_format",
"yax_tick_length",
"yax_units"
]
def set_inflator(self, value):
""" Set inflator value.
Args:
value (float): inflator value.
"""
self.set_float("inflator", value)
def set_xlim(self, xlim):
""" Set x-axis limits.
Accepts a two-element list to set the x-axis limits.
Args:
xlim (list): lower and upper bounds
Raises:
ValueError: xlim must contain two elements
ValueError: Min must be less than max
"""
if len(xlim) != 2:
raise ValueError("xlim must contain two elements")
if xlim[1] < xlim[0]:
raise ValueError("Min must be less than Max")
self.options["min_x"] = xlim[0]
self.options["max_x"] = xlim[1]
def set_ylim(self, ylim):
""" Set y-axis limits.
Accepts a two-element list to set the y-axis limits.
Args:
ylim (list): lower and upper bounds
Raises:
ValueError: ylim must contain two elements
ValueError: Min must be less than max
"""
if len(ylim) != 2:
raise ValueError("ylim must contain two elements")
if ylim[1] < ylim[0]:
raise ValueError("Min must be less than Max")
self.options["min_y"] = ylim[0]
self.options["max_y"] = ylim[1]
def set_min_y_from_data(self, value):
""" Set flag to find the minimum y-value from the data."""
self.set_boolean("min_y_from_data", value)
def show_year_markers(self, value):
""" Set flag to show year markers."""
self.set_boolean("show_year_markers", value)
def show_secondary_x_label(self, value):
""" Set flag to show secondary x label."""
self.set_boolean("show_secondary_x_label", value)
def set_small_text(self, value):
""" Set flag to show small text."""
self.set_boolean("small_text", value)
def show_x_extended_ticks(self, value):
""" Set flag to show extended x-axis tick marks."""
self.set_boolean("x_extended_ticks", value)
def show_y_extended_ticks(self, value):
""" Set flag to show extended y-axis tick marks."""
self.set_boolean("y_extended_ticks", value)
def show_xaxis(self, value):
""" Set flag to show x-axis."""
self.set_boolean("x_axis", value)
def show_yaxis(self, value):
""" Set flag to show y-axis."""
self.set_boolean("y_axis", value)
def set_xlabel(self, label):
""" Set x-axis label."""
self.set_string("x_label", label)
def set_ylabel(self, label):
""" Set y-axis label."""
self.set_string("x_label", label)
def set_xticks_count(self, value):
""" Set xticks counts."""
self.options["xax_count"] = value
def set_yticks_count(self, value):
""" Set yticks counts."""
self.options["yax_count"] = value
def xaxis_start_at_min(self, value):
""" Set flag to start x-axis at the min value."""
self.set_boolean("xax_start_at_min", value)
def set_xticks_length(self, value):
""" Set the length of the x-axis ticks."""
self.set_integer("xax_tick_length", value)
def set_yticks_length(self, value):
""" Set the length of the y-axis ticks."""
self.set_integer("yax_tick_length", value)
def set_xunits(self, value):
""" Set the units on the x-axis."""
self.set_string("xax_units", value)
def set_yunits(self, value):
""" Set the units on the y-axis."""
self.set_string("yax_units", value)
def logscale(self):
""" Set flag to log scale the y-axis."""
self.set_boolean("y_scale_type", True)
def set_xformat(self, value):
""" Set the x-axis format."""
self.set_string("xax_format", value)
def set_yformat(self, value):
""" Set the y-axis format."""
self.set_string("yax_format", value)
def get(self):
""" Retrieve options set by user."""
return {k:v for k,v in list(self.options.items()) if k in self._allowed_axes}
| 29.383721
| 85
| 0.576177
|
4a17782784db5d8ecbbf8985ee6dc4475756beb8
| 1,025
|
py
|
Python
|
interface/uni_lab/common/resourcePlanId_for_others.py
|
aoruilin/SeleniumBase
|
304fd7a23661ebf561da47d4cd8f7365dba519ca
|
[
"MIT"
] | null | null | null |
interface/uni_lab/common/resourcePlanId_for_others.py
|
aoruilin/SeleniumBase
|
304fd7a23661ebf561da47d4cd8f7365dba519ca
|
[
"MIT"
] | null | null | null |
interface/uni_lab/common/resourcePlanId_for_others.py
|
aoruilin/SeleniumBase
|
304fd7a23661ebf561da47d4cd8f7365dba519ca
|
[
"MIT"
] | null | null | null |
import requests
from interface.uni_lab.common.login_for_others import login_interface
from interface.uni_lab.common.pointId_for_others import get_point_id
from base.data import Ips
ip = Ips.ip_for_uniLab
pointId = get_point_id()
URL = ip + '/pc/course/getResourcePlanList?pointId=%s&pageNum=1&pageSize=4' % pointId[1]
HEADERS = {
'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/68.0.3440.84 Safari/537.36'
}
def get_resourcePlan_id(url=URL, headers=HEADERS):
'''提供公用的resourcePlanId'''
token = login_interface()
headers['token'] = token
response = requests.get(url=url, headers=headers)
data_ret = response.json()
data = data_ret['data']
data_list = data['list']
resourceP_id_list = []
for r in data_list:
resourcePlan_id = r['id']
resourceP_id_list.append(resourcePlan_id)
return resourceP_id_list
# print(get_resourcePlan_id())
| 32.03125
| 104
| 0.69561
|
4a1778456ba0fd77158ac9bf61b8c0fc9d52345b
| 266
|
py
|
Python
|
tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_5/ar_12/test_artificial_32_Quantization_Lag1Trend_5_12_0.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_5/ar_12/test_artificial_32_Quantization_Lag1Trend_5_12_0.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_5/ar_12/test_artificial_32_Quantization_Lag1Trend_5_12_0.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12);
| 38
| 166
| 0.733083
|
4a177916750e9896c2ece29365ff8467c9be57a8
| 440
|
py
|
Python
|
AI/AI_dum.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | 1
|
2021-05-29T03:09:24.000Z
|
2021-05-29T03:09:24.000Z
|
AI/AI_dum.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | null | null | null |
AI/AI_dum.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | null | null | null |
# notes
# Jhon McCarthy 1956
# ^ AI nerd
# Shakey robot
# ^ Dum robot shakes all the time
# Q: why AI famous?
# A: cause humans are lazy..
# first program was "lisp" which is used to predict shapes
# ----------------------------------------------
# Human learn by Experience
# AI :1 -> runs from instruction (This is big cunt)
# AI :2 -> runs on data (STill cunt (by 2021))
# ----------------------------------------------
| 22
| 58
| 0.506818
|
4a177a7fef16a6b92da7fd787ca8da05807a9b09
| 206
|
py
|
Python
|
Chapter12/04_http_urequests.py
|
PacktPublishing/MicroPython-Cookbook
|
ffd6aa15c303459570a89ba31b5bc734f05cb387
|
[
"MIT"
] | 16
|
2019-07-01T16:24:22.000Z
|
2022-03-03T06:54:57.000Z
|
Chapter12/04_http_urequests.py
|
ccwu0918/MicroPython-Cookbook
|
ffd6aa15c303459570a89ba31b5bc734f05cb387
|
[
"MIT"
] | null | null | null |
Chapter12/04_http_urequests.py
|
ccwu0918/MicroPython-Cookbook
|
ffd6aa15c303459570a89ba31b5bc734f05cb387
|
[
"MIT"
] | 19
|
2019-04-17T08:30:12.000Z
|
2022-01-14T03:05:37.000Z
|
from netcheck import wait_for_networking
import urequests
def main():
wait_for_networking()
url = 'http://micropython.org/ks/test.html'
html = urequests.get(url).text
print(html)
main()
| 15.846154
| 47
| 0.703883
|
4a177b75b187d516e23b1aff0bb0150c7ee61fc6
| 5,732
|
py
|
Python
|
custom_components/ultrasync/sensor.py
|
chatziko/ha-ultrasync
|
c96ec26c1c2da59fa4b260d7ea3fd3c1e2f30653
|
[
"MIT"
] | 9
|
2020-12-22T02:53:37.000Z
|
2022-03-28T16:41:36.000Z
|
custom_components/ultrasync/sensor.py
|
chatziko/ha-ultrasync
|
c96ec26c1c2da59fa4b260d7ea3fd3c1e2f30653
|
[
"MIT"
] | 15
|
2020-12-09T03:05:48.000Z
|
2022-01-24T00:44:52.000Z
|
custom_components/ultrasync/sensor.py
|
chatziko/ha-ultrasync
|
c96ec26c1c2da59fa4b260d7ea3fd3c1e2f30653
|
[
"MIT"
] | 4
|
2020-12-22T02:38:48.000Z
|
2021-08-09T09:06:22.000Z
|
"""Monitor the Interlogix/Hills ComNav UltraSync Hub."""
import logging
from typing import Callable, List
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from . import UltraSyncEntity
from .const import (
DATA_COORDINATOR,
DATA_UNDO_UPDATE_LISTENER,
DOMAIN,
SENSOR_UPDATE_LISTENER,
SENSORS,
)
from .coordinator import UltraSyncDataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up UltraSync sensor based on a config entry."""
coordinator: UltraSyncDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
# At least one sensor must be pre-created or Home Assistant will not
# call any updates
hass.data[DOMAIN][entry.entry_id][SENSORS]["area01_state"] = UltraSyncSensor(
coordinator, entry.entry_id, entry.data[CONF_NAME], "area01_state", "Area1State"
)
async_add_entities([hass.data[DOMAIN][entry.entry_id][SENSORS]["area01_state"]])
@callback
def _auto_manage_sensors(areas: dict, zones: dict) -> None:
"""Dynamically create/delete sensors based on what was detected by the hub."""
_LOGGER.debug(
"Entering _auto_manage_sensors with %d area(s), and %d zone(s)",
len(areas),
len(zones),
)
# our list of sensors to add
new_sensors = []
# A pointer to our sensors
sensors = hass.data[DOMAIN][entry.entry_id][SENSORS]
# Track our detected sensors (for automatic updates if required)
detected_sensors = set()
for meta in areas:
bank_no = meta["bank"]
sensor_id = "area{:0>2}_state".format(bank_no + 1)
detected_sensors.add(sensor_id)
if sensor_id not in sensors:
# hash our entry
sensors[sensor_id] = UltraSyncSensor(
coordinator,
entry.entry_id,
entry.data[CONF_NAME],
sensor_id,
# Friendly Name
"Area{}State".format(bank_no + 1),
)
# Add our new area sensor
new_sensors.append(sensors[sensor_id])
_LOGGER.debug(
"Detected %s.Area%dState", entry.data[CONF_NAME], bank_no + 1
)
# Update our meta information
for key, value in meta.items():
sensors[sensor_id][key] = value
for meta in zones:
bank_no = meta["bank"]
sensor_id = "zone{:0>2}_state".format(bank_no + 1)
detected_sensors.add(sensor_id)
if sensor_id not in sensors:
# hash our entry
sensors[sensor_id] = UltraSyncSensor(
coordinator,
entry.entry_id,
entry.data[CONF_NAME],
sensor_id,
# Friendly Name
"Zone{}State".format(bank_no + 1),
)
# Add our new zone sensor
new_sensors.append(sensors[sensor_id])
_LOGGER.debug(
"Detected %s.Zone%dState", entry.data[CONF_NAME], bank_no + 1
)
# Update our meta information
for key, value in meta.items():
sensors[sensor_id][key] = value
if new_sensors:
# Add our newly detected sensors
async_add_entities(new_sensors)
for sensor_id in set(sensors.keys()).difference(detected_sensors):
# Tidy up sensors leaving our listing
hass.async_create_task(sensors[sensor_id].async_remove())
del sensors[sensor_id]
# register our callback which will be called the second we make a
# connection to our panel
hass.data[DOMAIN][entry.entry_id][DATA_UNDO_UPDATE_LISTENER].append(
async_dispatcher_connect(hass, SENSOR_UPDATE_LISTENER, _auto_manage_sensors)
)
class UltraSyncSensor(UltraSyncEntity):
"""Representation of a UltraSync sensor."""
def __init__(
self,
coordinator: UltraSyncDataUpdateCoordinator,
entry_id: str,
entry_name: str,
sensor_type: str,
sensor_name: str,
):
"""Initialize a new UltraSync sensor."""
self._sensor_type = sensor_type
self._unique_id = f"{entry_id}_{sensor_type}"
# Initialize our Attributes
self.__attributes = {}
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
name=f"{entry_name} {sensor_name}",
)
def __setitem__(self, key, value):
"""Set our sensor attributes."""
self.__attributes[key] = value
@property
def unique_id(self) -> str:
"""Return the unique ID of the sensor."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return self.__attributes
@property
def state(self):
"""Return the state of the sensor."""
value = self.coordinator.data.get(self._sensor_type)
if value is None:
_LOGGER.warning("Unable to locate value for %s", self._sensor_type)
return None
return value
| 32.384181
| 88
| 0.605024
|
4a177beaf25343b7a4d10ec405776f17757d2961
| 1,785
|
py
|
Python
|
telemetry/third_party/web-page-replay/setup.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | 226
|
2015-01-01T23:21:53.000Z
|
2021-08-25T06:20:54.000Z
|
telemetry/third_party/web-page-replay/setup.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | 55
|
2015-01-15T17:21:11.000Z
|
2021-08-18T10:12:31.000Z
|
telemetry/third_party/web-page-replay/setup.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | 64
|
2015-01-07T14:04:43.000Z
|
2022-01-04T12:08:16.000Z
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a distributable python package.
Creating new packages:
1. Generate the package, dist/webpagereplay-X.X.tar.gz:
python setup.py sdist
2. Upload the package file to the following:
http://code.google.com/p/web-page-replay/downloads/entry
Installing packages:
$ easy_install http://web-page-replay.googlecode.com/files/webpagereplay-X.X.tar.gz
- The replay and httparchive commands are now on your PATH.
"""
import setuptools
setuptools.setup(
name='webpagereplay',
version='1.1.2',
description='Record and replay web content',
author='Web Page Replay Project Authors',
author_email='web-page-replay-dev@googlegroups.com',
url='http://code.google.com/p/web-page-replay/',
license='Apache License 2.0',
install_requires=['dnspython>=1.8'],
packages=[
'',
'third_party',
'third_party.ipaddr'
],
package_dir={'': '.'},
package_data={
'': ['*.js', '*.txt', 'COPYING', 'LICENSE'],
},
entry_points={
'console_scripts': [
'httparchive = httparchive:main',
'replay = replay:main',
]
},
)
| 31.875
| 85
| 0.670028
|
4a177c3bf99f0c2fa306fac5ca17df185780db1b
| 506
|
py
|
Python
|
pygears/svgen/modules/flatten.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
pygears/svgen/modules/flatten.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
pygears/svgen/modules/flatten.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
from pygears.rtl.gear import RTLGearHierVisitor
from pygears.svgen.inst import SVGenInstPlugin, svgen_inst
from pygears.svgen.util import svgen_visitor
@svgen_visitor
class RemoveTupleFlattenVisitor(RTLGearHierVisitor):
def flatten_tuple(self, node):
node.bypass()
class SVGenFlattenPlugin(SVGenInstPlugin):
@classmethod
def bind(cls):
cls.registry['svgen']['flow'].insert(
cls.registry['svgen']['flow'].index(svgen_inst),
RemoveTupleFlattenVisitor)
| 28.111111
| 60
| 0.735178
|
4a177c83817bf05182a104fdd6be09d11a59e411
| 2,803
|
py
|
Python
|
docs/source/conf.py
|
Tiernan8r/quantum_computing_project
|
dd636aa302ab4abee84bad8a7df71fa0e019a36a
|
[
"Apache-2.0"
] | 4
|
2022-02-18T15:02:00.000Z
|
2022-02-22T15:23:41.000Z
|
docs/source/conf.py
|
Tiernan8r/quantum_computing_project
|
dd636aa302ab4abee84bad8a7df71fa0e019a36a
|
[
"Apache-2.0"
] | 7
|
2022-02-24T22:32:12.000Z
|
2022-03-22T09:07:50.000Z
|
docs/source/conf.py
|
Tiernan8r/quantum_computing_project
|
dd636aa302ab4abee84bad8a7df71fa0e019a36a
|
[
"Apache-2.0"
] | 4
|
2022-02-11T09:55:44.000Z
|
2022-02-23T12:08:49.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'Quantum Computing Project'
copyright = '2022, Tiernan8r'
author = 'Tiernan8r, hyoong, JabethM, nys1998, s1960329, RiddhiYaddav'
# The full version, including alpha/beta/rc tags
release = '1.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.githubpages",
"sphinx.ext.autodoc",
"m2r2",
]
m2r_parse_relative_links = True
m2r_anonymous_references = True
autodoc_type_aliases = {
"SCALARS": "qcp.matrices.types.SCALARS",
"SCALARS_T": "qcp.matrices.types.SCALARS_T",
"VECTOR": "qcp.matrices.types.VECTOR",
"MATRIX": "qcp.matrices.types.MATRIX",
"SPARSE": "qcp.matrices.types.SPARSE",
}
special_members = ["__init__", "__add__", "__sub__", "__mul__",
"__rmul__", "__len__", "__setitem__", "__getitem__",
"__str__"]
autodoc_default_options = {
"members": True,
"undoc-members": True,
"private-members": True,
"special-members": ",".join(special_members),
"exlude-members": "_abc_impl"
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.369048
| 79
| 0.660007
|
4a177c92bcaf7ad0f2ce47c2e53388d3017349f5
| 26,798
|
py
|
Python
|
vendor/plugit-development-client/plugIt/views.py
|
ebu/test-engine-ondemand
|
31d5ef4e2ddb6d80f22f166aa25cd6e883181c66
|
[
"BSD-3-Clause"
] | 4
|
2017-03-22T02:31:11.000Z
|
2020-09-16T20:47:16.000Z
|
vendor/plugit-development-client/plugIt/views.py
|
ebu/test-engine-ondemand
|
31d5ef4e2ddb6d80f22f166aa25cd6e883181c66
|
[
"BSD-3-Clause"
] | 1
|
2015-01-08T03:44:52.000Z
|
2015-01-08T03:44:52.000Z
|
vendor/plugit-development-client/plugIt/views.py
|
ebu/test-engine-ondemand
|
31d5ef4e2ddb6d80f22f166aa25cd6e883181c66
|
[
"BSD-3-Clause"
] | 2
|
2015-04-24T01:52:22.000Z
|
2020-09-16T20:47:24.000Z
|
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import RequestContext
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseNotFound
from django.utils.encoding import smart_str
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.db import connections
from django.core.paginator import InvalidPage, EmptyPage, Paginator
from django.core.cache import cache
from django import forms
from django.core.urlresolvers import reverse
from django.db.models import Q
from plugIt import PlugIt
from django.views.decorators.cache import cache_control
from django.template import Context, Template
from django.core.context_processors import csrf
from django.core.cache import cache
from django.contrib.auth.models import User as DUser, AnonymousUser
import json
import hashlib
import base64
from django.core.mail import send_mail
# Standalone mode: Load the main plugit interface
if settings.PIAPI_STANDALONE:
plugIt = PlugIt(settings.PIAPI_STANDALONE_URI)
baseURI = settings.PIAPI_BASEURI
def getPlugItObject(hproPk):
"""Return the plugit object and the baseURI to use if not in standalone mode"""
from hprojects.models import HostedProject
hproject = get_object_or_404(HostedProject, pk=hproPk)
if hproject.plugItURI == '' and not hproject.runURI:
raise Http404
plugIt = PlugIt(hproject.plugItURI)
baseURI = reverse('plugIt.views.main', args=(hproject.pk, ''))
return (plugIt, baseURI, hproject)
def generate_user(mode=None, pk=None):
"""Return a false user for standalone mode"""
user = None
if mode == 'log' or pk == "-1":
user = DUser(pk=-1, username='Logged', first_name='Logged', last_name='Hector', email='logeedin@plugit-standalone.ebuio')
user.ebuio_member = False
user.ebuio_admin = False
elif mode == 'mem' or pk == "-2":
user = DUser(pk=-2, username='Member', first_name='Member', last_name='Luc', email='memeber@plugit-standalone.ebuio')
user.ebuio_member = True
user.ebuio_admin = False
elif mode == 'adm' or pk == "-3":
user = DUser(pk=-3, username='Admin', first_name='Admin', last_name='Charles', email='admin@plugit-standalone.ebuio')
user.ebuio_member = True
user.ebuio_admin = True
elif mode == 'ano':
user = AnonymousUser()
user.email = 'nobody@plugit-standalone.ebuio'
user.first_name = 'Ano'
user.last_name = 'Nymous'
user.ebuio_member = False
user.ebuio_admin = False
user.ebuio_orga_member = user.ebuio_member
user.ebuio_orga_admin = user.ebuio_admin
return user
class SimpleOrga():
"""Simple orga class"""
pass
class SimpleUser():
"""Simple user class"""
pass
def gen404(request, baseURI, reason):
"""Return a 404 error"""
return HttpResponseNotFound(render_to_response('plugIt/404.html', {'reason': reason, 'ebuio_baseUrl': baseURI, 'ebuio_userMode': request.session.get('plugit-standalone-usermode', 'ano')}, context_instance=RequestContext(request)))
def gen403(request, baseURI, reason, project=None):
"""Return a 403 error"""
orgas = None
if not settings.PIAPI_STANDALONE:
from organizations.models import Organization
orgas = Organization.objects.order_by('name').all()
return HttpResponseNotFound(render_to_response('plugIt/403.html', {'reason': reason, 'orgas': orgas, 'ebuio_baseUrl': baseURI, 'ebuio_userMode': request.session.get('plugit-standalone-usermode', 'ano'), 'ebuio_project': project}, context_instance=RequestContext(request)))
def get_cache_key(request, meta, orgaMode, currentOrga):
"""Return the cache key to use"""
# Caching
cacheKey = None
if 'cache_time' in meta:
if meta['cache_time'] > 0:
# by default, no cache by user
useUser = False
# If a logged user in needed, cache the result by user
if ('only_logged_user' in meta and meta['only_logged_user']) or \
('only_member_user' in meta and meta['only_member_user']) or \
('only_admin_user' in meta and meta['only_admin_user']) or \
('only_orga_member_user' in meta and meta['only_orga_member_user']) or \
('only_orga_admin_user' in meta and meta['only_orga_admin_user']):
useUser = True
# If a value if present in meta, use it
if 'cache_by_user' in meta:
useUser = meta['cache_by_user']
cacheKey = '-'
# Add user info if needed
if useUser:
cacheKey += str(request.user.pk) + 'usr-'
# Add orga
if orgaMode:
cacheKey += str(currentOrga.pk) + 'org-'
# Add current query
cacheKey += request.get_full_path()
# Add current template (if the template changed, cache must be invalided)
cacheKey += meta['template_tag']
return cacheKey
def check_rights(request, meta):
"""Check if the user can access the page"""
# User must be logged ?
if ('only_logged_user' in meta and meta['only_logged_user']):
if not request.user.is_authenticated():
return gen403(request, baseURI, 'only_logged_user')
# User must be member of the project ?
if ('only_memeber_user' in meta and meta['only_memeber_user']):
if not request.user.ebuio_member:
return gen403(request, baseURI, 'only_memeber_user')
# User must be administrator of the project ?
if ('only_admin_user' in meta and meta['only_admin_user']):
if not request.user.ebuio_admin:
return gen403(request, baseURI, 'only_admin_user')
# User must be member of the orga ?
if ('only_orga_memeber_user' in meta and meta['only_orga_memeber_user']):
if not request.user.ebuio_orga_member:
return gen403(request, baseURI, 'only_orga_memeber_user')
# User must be administrator of the orga ?
if ('only_orga_admin_user' in meta and meta['only_orga_admin_user']):
if not request.user.ebuio_orga_admin:
return gen403(request, baseURI, 'only_orga_admin_user')
def find_in_cache(cacheKey):
"""Check if the content exists in cache and return it"""
# If we have to use cache, we try to find the result in cache
if cacheKey:
result = cache.get('plugit-result-' + cacheKey, None)
context = cache.get('plugit-context-' + cacheKey, None)
#We found a result, we can return it
if result and context:
return (result, context)
return (None, None)
def build_base_parameters(request):
"""Build the list of parameters to forward from the post and get parameters"""
getParameters = {}
postParameters = {}
files = {}
# Copy GET parameters, excluding ebuio_*
for v in request.GET:
if v[:6] != 'ebuio_':
val = request.GET.getlist(v)
if len(val) == 1:
getParameters[v] = val[0]
else:
getParameters[v] = val
# If using post, copy post parameters and files. Excluding ebuio_*
if request.method == 'POST':
for v in request.POST:
if v[:6] != 'ebuio_':
val = request.POST.getlist(v)
if len(val) == 1:
postParameters[v] = val[0]
else:
postParameters[v] = val
for v in request.FILES:
if v[:6] != 'ebuio_':
files[v] = request.FILES[v] # .chunks()
return (getParameters, postParameters, files)
def build_user_requested_parameters(request, meta):
"""Build the list of parameters requested by the plugit server"""
postParameters = {}
getParameters = {}
files = {}
# Add parameters requested by the server
if 'user_info' in meta:
for prop in meta['user_info']:
# Test if the value exist, otherwise return None
value = None
if hasattr(request.user, prop) and prop in settings.PIAPI_USERDATA:
value = getattr(request.user, prop)
else:
raise Exception('requested user attribute "%s", '
'does not exist or requesting is not allowed' % prop)
# Add informations to get or post parameters, depending on the current method
if request.method == 'POST':
postParameters['ebuio_u_' + prop] = value
else:
getParameters['ebuio_u_' + prop] = value
return (getParameters, postParameters, files)
def build_orga_parameters(request, orgaMode, currentOrga):
postParameters = {}
getParameters = {}
files = {}
# If orga mode, add the current orga pk
if orgaMode:
if request.method == 'POST':
postParameters['ebuio_orgapk'] = currentOrga.pk
else:
getParameters['ebuio_orgapk'] = currentOrga.pk
return (getParameters, postParameters, files)
def build_parameters(request, meta, orgaMode, currentOrga):
"""Return the list of get, post and file parameters to send"""
postParameters = {}
getParameters = {}
files = {}
def update_parameters(data):
tmp_getParameters, tmp_postParameters, tmp_files = data
getParameters.update(tmp_getParameters)
postParameters.update(tmp_postParameters)
files.update(tmp_files)
update_parameters(build_base_parameters(request))
update_parameters(build_user_requested_parameters(request, meta))
update_parameters(build_orga_parameters(request, orgaMode, currentOrga))
return (getParameters, postParameters, files)
def build_extra_headers(request, proxyMode, orgaMode, currentOrga):
"""Build the list of extra headers"""
things_to_add = {}
# If in proxymode, add needed infos to headers
if proxyMode:
things_to_add = {}
# User
for prop in settings.PIAPI_USERDATA:
if hasattr(request.user, prop):
things_to_add['user_' + prop] = getattr(request.user, prop)
# Orga
if orgaMode:
things_to_add['orga_pk'] = currentOrga.pk
things_to_add['orga_name'] = currentOrga.name
# General
things_to_add['base_url'] = baseURI
return things_to_add
def handle_special_cases(request, data, baseURI, meta):
"""Handle sepcial cases for returned values by the doAction function"""
if data is None:
return gen404(request, baseURI, 'data')
if data.__class__.__name__ == 'PlugItRedirect':
url = data.url
if not data.no_prefix:
url = baseURI + url
return HttpResponseRedirect(url)
if data.__class__.__name__ == 'PlugItFile':
response = HttpResponse(data.content, content_type=data.content_type)
response['Content-Disposition'] = data.content_disposition
return response
if data.__class__.__name__ == 'PlugItNoTemplate':
response = HttpResponse(data.content)
return response
if meta.get('json_only', None): # Just send the json back
result = json.dumps(data)
return HttpResponse(result)
def build_final_response(request, meta, result, hproject, proxyMode, context):
"""Build the final response to send back to the browser"""
if 'no_template' in meta and meta['no_template']: # Just send the json back
return HttpResponse(result)
#render the template into the whole page
if not settings.PIAPI_STANDALONE:
return render_to_response('plugIt/' + hproject.get_plugItTemplate_display(), {"project": hproject, "plugit_content": result, 'context': context}, context_instance=RequestContext(request))
if proxyMode: # Force inclusion inside template
return render_to_response('plugIt/base.html', {'plugit_content': result}, context_instance=RequestContext(request))
return HttpResponse(result)
def render_data(context, templateContent, proxyMode, rendered_data):
"""Render the template"""
if proxyMode:
# Update csrf_tokens
rendered_data = rendered_data.replace('{~__PLUGIT_CSRF_TOKEN__~}', unicode(context['csrf_token']))
result = rendered_data # Render in proxy mode
else:
# Render it
template = Template(templateContent)
result = template.render(context)
return result
def cache_if_needed(cacheKey, result, context, meta):
"""Cache the result, if needed"""
if cacheKey:
# This will be a method in django 1.7
flat_context = {}
for d in context.dicts:
flat_context.update(d)
del flat_context['csrf_token']
cache.set('plugit-result-' + cacheKey, result, meta['cache_time'])
cache.set('plugit-context-' + cacheKey, flat_context, meta['cache_time'])
def build_context(request, data, hproject, orgaMode, currentOrga, availableOrga):
# Return only wanted properties about the user
data['ebuio_u'] = SimpleUser()
for prop in settings.PIAPI_USERDATA:
if hasattr(request.user, prop):
setattr(data['ebuio_u'], prop, getattr(request.user, prop))
data['ebuio_u'].id = str(data['ebuio_u'].pk)
# Add current path
data['ebuio_baseUrl'] = baseURI
# Add userMode
if settings.PIAPI_STANDALONE:
data['ebuio_userMode'] = request.session.get('plugit-standalone-usermode', 'ano')
data['ebuio_realUsers'] = settings.PIAPI_REALUSERS
else:
data['ebuio_hpro_name'] = hproject.name
data['ebuio_hpro_pk'] = hproject.pk
from app.utils import create_secret
data['ebuio_hpro_key'] = create_secret(str(hproject.pk), hproject.name, str(request.user.pk))
# Add orga mode and orga
data['ebuio_orgamode'] = orgaMode
if orgaMode:
data['ebuio_orga'] = currentOrga
# If not standalone mode, list the available orgas
if not settings.PIAPI_STANDALONE:
data['ebuio_orgas'] = []
for (orga, _) in availableOrga:
tmpOrga = SimpleOrga()
tmpOrga.pk = orga.pk
tmpOrga.name = orga.name
data['ebuio_orgas'].append(tmpOrga)
context = Context(data)
# Add csrf information to the contact
context.update(csrf(request))
# Add media urls
context.update({'MEDIA_URL': settings.MEDIA_URL, 'STATIC_URL': settings.STATIC_URL})
return context
def get_template(request, query, meta, proxyMode):
"""Return (if needed) the template to use"""
templateContent = None
if not proxyMode:
templateContent = plugIt.getTemplate(query, meta)
if not templateContent:
return (None, gen404(request, baseURI, 'template'))
return (templateContent, None)
def get_current_orga(request, hproject, availableOrga):
"""Return the current orga to use"""
# Find the current orga
currentOrgaId = request.session.get('plugit-orgapk-' + str(hproject.pk), None)
if currentOrgaId is None:
(tmpOrga, _) = availableOrga[0]
currentOrgaId = tmpOrga.pk
from organizations.models import Organization
realCurrentOrga = get_object_or_404(Organization, pk=currentOrgaId)
return realCurrentOrga
def update_session(request, session_to_set, hproPk):
"""Update the session with users-realted values"""
for key, value in session_to_set.items():
request.session['plugit_' + str(hproPk) + '_' + key] = value
def get_current_session(request, hproPk):
"""Get the current session value"""
retour = {}
base_key = 'plugit_' + str(hproPk) + '_'
for key, value in request.session.iteritems():
if key.startswith(base_key):
retour[key[len(base_key):]] = value
return retour
def main(request, query, hproPk=None):
if not settings.PIAPI_STANDALONE:
(plugIt, baseURI, hproject) = getPlugItObject(hproPk)
else:
global plugIt, baseURI
# Check if settings are ok
if settings.PIAPI_ORGAMODE and settings.PIAPI_REALUSERS:
return gen404(request, baseURI, "Configuration error. PIAPI_ORGAMODE and PIAPI_REALUSERS both set to True !")
hproject = None
orgaMode = None
currentOrga = None
availableOrga = []
## If standalone mode, change the current user and orga mode based on parameters
if settings.PIAPI_STANDALONE:
if not settings.PIAPI_REALUSERS:
currentUserMode = request.session.get('plugit-standalone-usermode', 'ano')
request.user = generate_user(mode=currentUserMode)
orgaMode = settings.PIAPI_ORGAMODE
currentOrga = SimpleOrga()
currentOrga.name = request.session.get('plugit-standalone-organame', 'EBU')
currentOrga.pk = request.session.get('plugit-standalone-orgapk', '-1')
else:
request.user.ebuio_member = request.user.is_staff
request.user.ebuio_admin = request.user.is_superuser
proxyMode = settings.PIAPI_PROXYMODE
else:
request.user.ebuio_member = hproject.isMemberRead(request.user)
request.user.ebuio_admin = hproject.isMemberWrite(request.user)
orgaMode = hproject.plugItOrgaMode
proxyMode = hproject.plugItProxyMode
if orgaMode:
# List available orgas
if request.user.is_authenticated():
availableOrga = request.user.getOrgas()
if not availableOrga:
return gen403(request, baseURI, 'no_orga_in_orgamode', hproject)
# Build the current orga
realCurrentOrga = get_current_orga(request, hproject, availableOrga)
currentOrga = SimpleOrga()
currentOrga.pk = realCurrentOrga.pk
currentOrga.name = realCurrentOrga.name
# Get rights
request.user.ebuio_orga_member = realCurrentOrga.isMember(request.user)
request.user.ebuio_orga_admin = realCurrentOrga.isOwner(request.user)
# Get meta, if not in proxy mode
if not proxyMode:
meta = plugIt.getMeta(query)
if not meta:
return gen404(request, baseURI, 'meta')
else:
meta = {}
cacheKey = get_cache_key(request, meta, orgaMode, currentOrga)
# Check access rights
error = check_rights(request, meta)
if error:
return error
# Check cache
(cache, context) = find_in_cache(cacheKey)
if cache:
return build_final_response(request, meta, cache, hproject, proxyMode, context)
# Build parameters
getParameters, postParameters, files = build_parameters(request, meta, orgaMode, currentOrga)
# Bonus headers
things_to_add = build_extra_headers(request, proxyMode, orgaMode, currentOrga)
current_session = get_current_session(request, hproPk)
# Do the action
(data, session_to_set) = plugIt.doAction(query, request.method, getParameters, postParameters, files, things_to_add, proxyMode=proxyMode, session=current_session)
update_session(request, session_to_set, hproPk)
# Handle special case (redirect, etc..)
spe_cases = handle_special_cases(request, data, baseURI, meta)
if spe_cases:
return spe_cases
# Save data for proxyMode
if proxyMode:
rendered_data = data
data = {}
else:
rendered_data = None
# Get template
(templateContent, templateError) = get_template(request, query, meta, proxyMode)
if templateError:
return templateError
# Build the context
context = build_context(request, data, hproject, orgaMode, currentOrga, availableOrga)
# Render the result
result = render_data(context, templateContent, proxyMode, rendered_data)
# Cache the result for future uses if requested
cache_if_needed(cacheKey, result, context, meta)
# Return the final response
return build_final_response(request, meta, result, hproject, proxyMode, context)
@cache_control(public=True, max_age=3600)
def media(request, path, hproPk=None):
"""Ask the server for a media and return it to the client browser. Add cache headers of 1 hour"""
if not settings.PIAPI_STANDALONE:
(plugIt, baseURI, _) = getPlugItObject(hproPk)
else:
global plugIt, baseURI
(media, contentType) = plugIt.getMedia(path)
if not media: # No media returned
raise Http404
response = HttpResponse(media)
response['Content-Type'] = contentType
response['Content-Length'] = len(media)
return response
def setUser(request):
"""In standalone mode, change the current user"""
if not settings.PIAPI_STANDALONE and not settings.PIAPI_REALUSERS:
raise Http404
request.session['plugit-standalone-usermode'] = request.GET.get('mode')
return HttpResponse('')
def setOrga(request, hproPk=None):
"""Change the current orga"""
if settings.PIAPI_STANDALONE:
request.session['plugit-standalone-organame'] = request.GET.get('name')
request.session['plugit-standalone-orgapk'] = request.GET.get('pk')
else:
(_, _, hproject) = getPlugItObject(hproPk)
from organizations.models import Organization
orga = get_object_or_404(Organization, pk=request.GET.get('orga'))
if request.user.is_superuser or orga.isMember(request.user) or orga.isOwner(request.user):
request.session['plugit-orgapk-' + str(hproject.pk)] = orga.pk
return HttpResponse('')
def check_api_key(request, key, hproPk):
"""Check if an API key is valid"""
if settings.PIAPI_STANDALONE:
return True
(_, _, hproject) = getPlugItObject(hproPk)
if not hproject:
return False
if hproject.plugItApiKey is None or hproject.plugItApiKey == '':
return False
return hproject.plugItApiKey == key
def home(request, hproPk):
""" Route the request to runURI if defined otherwise go to plugIt """
(plugIt, baseURI, hproject) = getPlugItObject(hproPk)
if hproject.runURI:
return HttpResponseRedirect(hproject.runURI)
else:
return main(request, '', hproPk)
def api_home(request, key=None, hproPk=None):
"""Show the home page for the API with all methods"""
if not check_api_key(request, key, hproPk):
raise Http404
return render_to_response('plugIt/api.html', {}, context_instance=RequestContext(request))
def api_user(request, userPk, key=None, hproPk=None):
"""Return information about an user"""
if not check_api_key(request, key, hproPk):
raise Http404
if settings.PIAPI_STANDALONE:
if not settings.PIAPI_REALUSERS:
user = generate_user(pk=userPk)
if user is None:
raise Http404
else:
user = get_object_or_404(DUser, pk=userPk)
else:
from users.models import TechUser
user = get_object_or_404(TechUser, pk=userPk)
(_, _, hproject) = getPlugItObject(hproPk)
user.ebuio_member = hproject.isMemberRead(user)
user.ebuio_admin = hproject.isMemberWrite(user)
retour = {}
for prop in settings.PIAPI_USERDATA:
if hasattr(user, prop):
retour[prop] = getattr(user, prop)
retour['id'] = str(retour['pk'])
return HttpResponse(json.dumps(retour), content_type="application/json")
def api_orga(request, orgaPk, key=None, hproPk=None):
"""Return information about an organization"""
if not check_api_key(request, key, hproPk):
raise Http404
retour = {}
if settings.PIAPI_STANDALONE:
retour['pk'] = orgaPk
if orgaPk == "-1":
retour['name'] = 'EBU'
if orgaPk == "-2":
retour['name'] = 'RTS'
if orgaPk == "-3":
retour['name'] = 'BBC'
if orgaPk == "-4":
retour['name'] = 'CNN'
else:
from organizations.models import Organization
orga = get_object_or_404(Organization, pk=orgaPk)
retour['pk'] = orga.pk
retour['name'] = orga.name
return HttpResponse(json.dumps(retour), content_type="application/json")
def api_get_project_members(request, key=None, hproPk=True):
"""Return the list of project members"""
if not check_api_key(request, key, hproPk):
raise Http404
if settings.PIAPI_STANDALONE:
if not settings.PIAPI_REALUSERS:
users = [generate_user(pk="-1"), generate_user(pk="-2"), generate_user(pk="-3")]
else:
users = DUser.object.all()
else:
(_, _, hproject) = getPlugItObject(hproPk)
users = []
for u in hproject.getMembers():
u.ebuio_member = True
u.ebuio_admin = hproject.isMemberWrite(u)
users.append(u)
liste = []
for u in users:
retour = {}
for prop in settings.PIAPI_USERDATA:
if hasattr(u, prop):
retour[prop] = getattr(u, prop)
retour['id'] = str(retour['pk'])
liste.append(retour)
return HttpResponse(json.dumps({'members': liste}), content_type="application/json")
@csrf_exempt
def api_send_mail(request, key=None, hproPk=None):
"""Send a email. Posts parameters are used"""
if not check_api_key(request, key, hproPk):
raise Http404
sender = request.POST['sender'] or settings.MAIL_SENDER
dests = request.POST.getlist('dests')
subject = request.POST['subject']
message = request.POST['message']
if 'response_id' in request.POST:
from Crypto.Cipher import AES
key = hproPk + ':' + request.POST['response_id']
hash_key = hashlib.sha512(key + settings.EBUIO_MAIL_SECRET_HASH).hexdigest()[30:42]
encrypter = AES.new(((settings.EBUIO_MAIL_SECRET_KEY) * 32)[:32], AES.MODE_CFB, '87447JEUPEBU4hR!')
encrypted_key = encrypter.encrypt(hash_key + ':' + key)
base64_key = base64.urlsafe_b64encode(encrypted_key)
#subject = subject + ' ----------------------- [' + base64_key + ']'
sender = settings.MAIL_SENDER.replace('@', '+' + base64_key + '@')
# if not settings.PIAPI_STANDALONE:
# (_, _, hproject) = getPlugItObject(hproPk)
# subject = '[EBUIo:' + smart_str(hproject.name) + '] ' + subject
send_mail(subject, message, sender, dests, fail_silently=False)
return HttpResponse(json.dumps({}), content_type="application/json")
| 31.124274
| 276
| 0.655273
|
4a177f18e88e93530dc32ed9f6a98dc2fba37c1e
| 248
|
py
|
Python
|
processors/properties/propertyLowPrice.py
|
Zvezdin/blockchain-predictor
|
df6f939037471dd50b7b9c96673d89b04b646ef2
|
[
"MIT"
] | 35
|
2017-10-25T17:10:35.000Z
|
2022-03-20T18:12:06.000Z
|
processors/properties/propertyLowPrice.py
|
Zvezdin/blockchain-predictor
|
df6f939037471dd50b7b9c96673d89b04b646ef2
|
[
"MIT"
] | 2
|
2017-09-20T17:39:15.000Z
|
2018-04-01T17:20:29.000Z
|
processors/properties/propertyLowPrice.py
|
Zvezdin/blockchain-predictor
|
df6f939037471dd50b7b9c96673d89b04b646ef2
|
[
"MIT"
] | 10
|
2017-12-01T13:47:04.000Z
|
2021-12-16T06:53:17.000Z
|
from .property import Property
class PropertyLowPrice(Property):
def __init__(self):
super().__init__()
self.name = "lowPrice"
self.requires = ['tick']
def processTick(self, data):
return self.minOfColumn(data[self.requires[0]], 'low')
| 22.545455
| 56
| 0.717742
|
4a177f4d714c33b4f39b2974402fcaa91bc99a41
| 139
|
py
|
Python
|
backend/app/schemas/__init__.py
|
mkbeh/fastapi-admin-panel
|
ba12ad16fe1fdd0f9ec2282b8aa9965bce858cda
|
[
"MIT"
] | 7
|
2020-12-04T17:45:31.000Z
|
2022-02-08T02:12:58.000Z
|
backend/app/schemas/__init__.py
|
mkbeh/fastapi-admin-panel
|
ba12ad16fe1fdd0f9ec2282b8aa9965bce858cda
|
[
"MIT"
] | null | null | null |
backend/app/schemas/__init__.py
|
mkbeh/fastapi-admin-panel
|
ba12ad16fe1fdd0f9ec2282b8aa9965bce858cda
|
[
"MIT"
] | 1
|
2020-12-04T17:38:44.000Z
|
2020-12-04T17:38:44.000Z
|
from .common import *
from .general.token import *
from .general.auth import *
from .general.account import *
from .general.role import *
| 19.857143
| 30
| 0.748201
|
4a17803ef03cd4a3f7c5439385d1ff98c14d6b49
| 3,268
|
py
|
Python
|
container_sdk/model/container/pod_detail_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
container_sdk/model/container/pod_detail_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
container_sdk/model/container/pod_detail_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pod_detail.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from container_sdk.model.container import metadata_pb2 as container__sdk_dot_model_dot_container_dot_metadata__pb2
from container_sdk.model.container import pod_status_pb2 as container__sdk_dot_model_dot_container_dot_pod__status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pod_detail.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x10pod_detail.proto\x12\tcontainer\x1a,container_sdk/model/container/metadata.proto\x1a.container_sdk/model/container/pod_status.proto\"X\n\tPodDetail\x12%\n\x08metadata\x18\x01 \x01(\x0b\x32\x13.container.Metadata\x12$\n\x06status\x18\x02 \x01(\x0b\x32\x14.container.PodStatusBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[container__sdk_dot_model_dot_container_dot_metadata__pb2.DESCRIPTOR,container__sdk_dot_model_dot_container_dot_pod__status__pb2.DESCRIPTOR,])
_PODDETAIL = _descriptor.Descriptor(
name='PodDetail',
full_name='container.PodDetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metadata', full_name='container.PodDetail.metadata', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='container.PodDetail.status', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=125,
serialized_end=213,
)
_PODDETAIL.fields_by_name['metadata'].message_type = container__sdk_dot_model_dot_container_dot_metadata__pb2._METADATA
_PODDETAIL.fields_by_name['status'].message_type = container__sdk_dot_model_dot_container_dot_pod__status__pb2._PODSTATUS
DESCRIPTOR.message_types_by_name['PodDetail'] = _PODDETAIL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PodDetail = _reflection.GeneratedProtocolMessageType('PodDetail', (_message.Message,), {
'DESCRIPTOR' : _PODDETAIL,
'__module__' : 'pod_detail_pb2'
# @@protoc_insertion_point(class_scope:container.PodDetail)
})
_sym_db.RegisterMessage(PodDetail)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.904762
| 384
| 0.793452
|
4a1783083bcfba08d0e06f6a513f95b055201082
| 7,560
|
py
|
Python
|
astropy/cosmology/io/tests/test_ecsv.py
|
zabop/astropy
|
11b3214f18b74aea5e3f8349e50ae1b09c39d30e
|
[
"BSD-3-Clause"
] | 1
|
2019-03-11T12:26:49.000Z
|
2019-03-11T12:26:49.000Z
|
astropy/cosmology/io/tests/test_ecsv.py
|
nabobalis/astropy
|
9f77b9a0ffe18e4c767e36f00e2e8728135c0e11
|
[
"BSD-3-Clause"
] | 1
|
2019-10-09T18:54:27.000Z
|
2019-10-09T18:54:27.000Z
|
astropy/cosmology/io/tests/test_ecsv.py
|
nabobalis/astropy
|
9f77b9a0ffe18e4c767e36f00e2e8728135c0e11
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.io.ecsv import read_ecsv, write_ecsv
from astropy.table import QTable, Table, vstack
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
class ReadWriteECSVTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="ascii.ecsv"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_ecsv_bad_index(self, read, write, tmp_path):
"""Test if argument ``index`` is incorrect"""
fp = tmp_path / "test_to_ecsv_bad_index.ecsv"
write(fp, format="ascii.ecsv")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
read(fp, index=2, format="ascii.ecsv")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
read(fp, index="row 0", format="ascii.ecsv")
# -----------------------
def test_to_ecsv_failed_cls(self, write, tmp_path):
"""Test failed table type."""
fp = tmp_path / "test_to_ecsv_failed_cls.ecsv"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, format='ascii.ecsv', cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_ecsv_cls(self, write, tbl_cls, tmp_path):
fp = tmp_path / "test_to_ecsv_cls.ecsv"
write(fp, format='ascii.ecsv', cls=tbl_cls)
# -----------------------
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_ecsv_in_meta(self, cosmo_cls, write, in_meta, tmp_path, add_cu):
"""Test where the cosmology class is placed."""
fp = tmp_path / "test_to_ecsv_in_meta.ecsv"
write(fp, format='ascii.ecsv', cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
tbl = QTable.read(fp)
if in_meta:
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.colnames # not also a column
else:
assert tbl["cosmology"][0] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.meta
# -----------------------
def test_readwrite_ecsv_instance(self, cosmo_cls, cosmo, read, write, tmp_path, add_cu):
"""Test cosmology -> ascii.ecsv -> cosmology."""
fp = tmp_path / "test_readwrite_ecsv_instance.ecsv"
# ------------
# To Table
write(fp, format="ascii.ecsv")
# some checks on the saved file
tbl = QTable.read(fp)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl["name"] == cosmo.name
# ------------
# From Table
tbl["mismatching"] = "will error"
tbl.write(fp, format="ascii.ecsv", overwrite=True)
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = read(fp, format="ascii.ecsv")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
read(fp, format="ascii.ecsv")
# unless mismatched are moved to meta
got = read(fp, format="ascii.ecsv", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
tbl.remove_column("mismatching")
tbl.write(fp, format="ascii.ecsv", overwrite=True)
got = read(fp, format="ascii.ecsv")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``write``.
tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]]
got = read(fp, format="ascii.ecsv")
assert got == cosmo
# also it auto-identifies 'format'
got = read(fp)
assert got == cosmo
def test_readwrite_ecsv_subclass_partial_info(self, cosmo_cls, cosmo, read,
write, tmp_path, add_cu):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_read_ecsv_subclass_partial_info.ecsv"
# test write
write(fp, format="ascii.ecsv")
# partial information
tbl = QTable.read(fp)
tbl.meta.pop("cosmology", None)
del tbl["Tcmb0"]
tbl.write(fp, overwrite=True)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(fp, format="ascii.ecsv")
got2 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls)
got3 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
def test_readwrite_ecsv_mutlirow(self, cosmo, read, write, tmp_path, add_cu):
"""Test if table has multiple rows."""
fp = tmp_path / "test_readwrite_ecsv_mutlirow.ecsv"
# Make
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts='silent')
tbl.write(fp, format="ascii.ecsv")
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
read(fp, format="ascii.ecsv")
# unless the index argument is provided
got = read(fp, index=1, format="ascii.ecsv")
assert got == cosmo
# the index can be a string
got = read(fp, index=cosmo.name, format="ascii.ecsv")
assert got == cosmo
# it's better if the table already has an index
# this will be identical to the previous ``got``
tbl.add_index("name")
got2 = read(fp, index=cosmo.name, format="ascii.ecsv")
assert got2 == cosmo
class TestReadWriteECSV(ReadWriteDirectTestBase, ReadWriteECSVTestMixin):
"""
Directly test ``read/write_ecsv``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="ascii.ecsv")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_ecsv, "write": write_ecsv}
| 37.241379
| 92
| 0.612698
|
4a1783b14d9f817d1d6bcb745aaadf370a29b464
| 34,151
|
py
|
Python
|
django/contrib/auth/tests/test_views.py
|
AlexHill/django
|
fe1389e911b0cdc487e5547c09c920c12f4e1ce0
|
[
"BSD-3-Clause"
] | 1
|
2016-08-14T13:08:47.000Z
|
2016-08-14T13:08:47.000Z
|
django/contrib/auth/tests/test_views.py
|
AlexHill/django
|
fe1389e911b0cdc487e5547c09c920c12f4e1ce0
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/auth/tests/test_views.py
|
AlexHill/django
|
fe1389e911b0cdc487e5547c09c920c12f4e1ce0
|
[
"BSD-3-Clause"
] | null | null | null |
from importlib import import_module
import itertools
import os
import re
from django.conf import global_settings, settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict, HttpRequest
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import urlparse, ParseResult
from django.utils._os import upath
from django.test import TestCase
from django.test.utils import override_settings, patch_logger
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.views import login as login_view
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertTrue(SESSION_KEY in self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertTrue(SESSION_KEY not in self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertTrue('<html>' not in message.get_payload(0).get_payload())
self.assertTrue('<html>' in message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@skipIfCustomUser
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session['_language'] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session['_language'], 'pl')
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class ChangelistTests(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls_admin'
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=1)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk, data)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk,
self.get_user_data(self.admin)
)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
response = self.client.post('/admin/auth/user/%s/password/' % self.admin.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % self.admin.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
| 42.957233
| 117
| 0.63597
|
4a1783e03093eea5f0ed6e436518aee0d6b80d5b
| 4,952
|
py
|
Python
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/models/express_route_cross_connection.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/models/express_route_cross_connection.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/models/express_route_cross_connection.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ExpressRouteCrossConnection(Resource):
"""ExpressRouteCrossConnection resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:ivar primary_azure_port: The name of the primary port.
:vartype primary_azure_port: str
:ivar secondary_azure_port: The name of the secondary port.
:vartype secondary_azure_port: str
:ivar s_tag: The identifier of the circuit traffic.
:vartype s_tag: int
:param peering_location: The peering location of the ExpressRoute circuit.
:type peering_location: str
:param bandwidth_in_mbps: The circuit bandwidth In Mbps.
:type bandwidth_in_mbps: int
:param express_route_circuit: The ExpressRouteCircuit
:type express_route_circuit:
~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuitReference
:param service_provider_provisioning_state: The provisioning state of the
circuit in the connectivity provider system. Possible values are
'NotProvisioned', 'Provisioning', 'Provisioned'. Possible values include:
'NotProvisioned', 'Provisioning', 'Provisioned', 'Deprovisioning'
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2018_08_01.models.ServiceProviderProvisioningState
:param service_provider_notes: Additional read only notes set by the
connectivity provider.
:type service_provider_notes: str
:ivar provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param peerings: The list of peerings.
:type peerings:
list[~azure.mgmt.network.v2018_08_01.models.ExpressRouteCrossConnectionPeering]
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'primary_azure_port': {'readonly': True},
'secondary_azure_port': {'readonly': True},
's_tag': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'},
'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'},
's_tag': {'key': 'properties.sTag', 'type': 'int'},
'peering_location': {'key': 'properties.peeringLocation', 'type': 'str'},
'bandwidth_in_mbps': {'key': 'properties.bandwidthInMbps', 'type': 'int'},
'express_route_circuit': {'key': 'properties.expressRouteCircuit', 'type': 'ExpressRouteCircuitReference'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCrossConnectionPeering]'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCrossConnection, self).__init__(**kwargs)
self.primary_azure_port = None
self.secondary_azure_port = None
self.s_tag = None
self.peering_location = kwargs.get('peering_location', None)
self.bandwidth_in_mbps = kwargs.get('bandwidth_in_mbps', None)
self.express_route_circuit = kwargs.get('express_route_circuit', None)
self.service_provider_provisioning_state = kwargs.get('service_provider_provisioning_state', None)
self.service_provider_notes = kwargs.get('service_provider_notes', None)
self.provisioning_state = None
self.peerings = kwargs.get('peerings', None)
self.etag = None
| 46.716981
| 117
| 0.663368
|
4a1783f464ea7bc1eae9d0b99eb0f6596fad22c4
| 1,808
|
py
|
Python
|
applications/CoSimulationApplication/python_scripts/base_classes/co_simulation_convergence_criteria.py
|
HubertBalcerzak/Kratos
|
c15689d53f06dabb36dc44c13eeac73d3e183916
|
[
"BSD-4-Clause"
] | null | null | null |
applications/CoSimulationApplication/python_scripts/base_classes/co_simulation_convergence_criteria.py
|
HubertBalcerzak/Kratos
|
c15689d53f06dabb36dc44c13eeac73d3e183916
|
[
"BSD-4-Clause"
] | 1
|
2019-10-15T13:11:37.000Z
|
2019-10-15T13:11:37.000Z
|
applications/CoSimulationApplication/python_scripts/base_classes/co_simulation_convergence_criteria.py
|
Gaoliu19910601/Kratos
|
0bac5e132d02061680fc90f1e52d4930b5ed7fa3
|
[
"BSD-4-Clause"
] | null | null | null |
from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics as KM
# CoSimulation imports
import KratosMultiphysics.CoSimulationApplication.co_simulation_tools as cs_tools
import KratosMultiphysics.CoSimulationApplication.colors as colors
class CoSimulationConvergenceCriteria(object):
"""Baseclass for the convergence criteria used for CoSimulation
Checks if convergence was achieved in a (strongly) coupled simulation
"""
def __init__(self, settings):
self.settings = settings
self.settings.RecursivelyValidateAndAssignDefaults(self._GetDefaultSettings())
self.echo_level = self.settings["echo_level"].GetInt()
def Initialize(self):
pass
def Finalize(self):
pass
def InitializeSolutionStep(self):
pass
def FinalizeSolutionStep(self):
pass
def InitializeNonLinearIteration(self):
pass
def FinalizeNonLinearIteration(self):
pass
def IsConverged(self, residual, current_data):
raise NotImplementedError('"IsConverged" has to be implemented in the derived class!')
def PrintInfo(self):
cs_tools.cs_print_info("Convergence Criteria", colors.bold(self._ClassName()))
def Check(self):
cs_tools.cs_print_warning("Convergence Criteria", colors.bold(self._ClassName()), 'does not implement "Check"')
@classmethod
def _ClassName(cls):
return cls.__name__
@classmethod
def _GetDefaultSettings(cls):
return KM.Parameters("""{
"type" : "UNSPECIFIED",
"solver" : "UNSPECIFIED",
"data_name" : "UNSPECIFIED",
"echo_level" : 0
}""")
| 30.133333
| 131
| 0.69469
|
4a1786075e3cd1f186a6269408e906d0f981e472
| 3,163
|
py
|
Python
|
sdk/python/pulumi_azure_native/subscription/v20191001preview/get_subscription_alias.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/subscription/v20191001preview/get_subscription_alias.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/subscription/v20191001preview/get_subscription_alias.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSubscriptionAliasResult',
'AwaitableGetSubscriptionAliasResult',
'get_subscription_alias',
]
@pulumi.output_type
class GetSubscriptionAliasResult:
"""
Subscription Information with the alias.
"""
def __init__(__self__, id=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified ID for the alias resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Alias ID.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.PutAliasResponsePropertiesResponse':
"""
Put Alias response properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type, Microsoft.Subscription/aliases.
"""
return pulumi.get(self, "type")
class AwaitableGetSubscriptionAliasResult(GetSubscriptionAliasResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubscriptionAliasResult(
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_subscription_alias(alias_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionAliasResult:
"""
Subscription Information with the alias.
:param str alias_name: Alias Name
"""
__args__ = dict()
__args__['aliasName'] = alias_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:subscription/v20191001preview:getSubscriptionAlias', __args__, opts=opts, typ=GetSubscriptionAliasResult).value
return AwaitableGetSubscriptionAliasResult(
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| 30.708738
| 161
| 0.643377
|
4a1786312ec2a1f9cc548ea01bf8aa510552f16f
| 83
|
py
|
Python
|
estructura_repeticion.py
|
BrayanTorres2/Aprende_a_programar_Eanx
|
1a6818a4da34fea8eeb0e93f5f6b67bdf63fd109
|
[
"MIT"
] | null | null | null |
estructura_repeticion.py
|
BrayanTorres2/Aprende_a_programar_Eanx
|
1a6818a4da34fea8eeb0e93f5f6b67bdf63fd109
|
[
"MIT"
] | null | null | null |
estructura_repeticion.py
|
BrayanTorres2/Aprende_a_programar_Eanx
|
1a6818a4da34fea8eeb0e93f5f6b67bdf63fd109
|
[
"MIT"
] | null | null | null |
suma=0
for i in range(97,1004):
if(i%2==0):
suma=suma+i
print(suma)
| 16.6
| 25
| 0.542169
|
4a17864f6ac8bf6811e5e43267a2ab8f9134aeb9
| 320
|
py
|
Python
|
manga_py/providers/manhwa_club.py
|
sonvt1710/manga-py
|
848a78e93b890af0c92056a1a9fc7f6ce5707cf6
|
[
"MIT"
] | 337
|
2019-08-27T16:14:50.000Z
|
2022-03-29T09:58:22.000Z
|
manga_py/providers/manhwa_club.py
|
sonvt1710/manga-py
|
848a78e93b890af0c92056a1a9fc7f6ce5707cf6
|
[
"MIT"
] | 225
|
2019-08-25T15:02:01.000Z
|
2022-03-31T06:36:09.000Z
|
manga_py/providers/manhwa_club.py
|
sonvt1710/manga-py
|
848a78e93b890af0c92056a1a9fc7f6ce5707cf6
|
[
"MIT"
] | 41
|
2019-10-04T13:28:02.000Z
|
2022-03-19T08:18:34.000Z
|
from .rawdevart_com_old import RawDevArtComOld
class ManhwaClub(RawDevArtComOld):
_chapter_selector = r'/chapter-(\d+(?:\.\d+)?)'
def get_content(self):
return self._get_content('{}/manhwa/{}')
def get_manga_name(self) -> str:
return self._get_name('/manhwa/([^/]+)')
main = ManhwaClub
| 21.333333
| 51
| 0.65
|
4a1786f80bc0bfb48c2025ccbe9774d62da7195a
| 9,521
|
py
|
Python
|
pyrustic/manager/handler/init_handler.py
|
tutlane/pyrustic
|
b20653aef35735480e02eff04f8bb60570364ea8
|
[
"MIT"
] | null | null | null |
pyrustic/manager/handler/init_handler.py
|
tutlane/pyrustic
|
b20653aef35735480e02eff04f8bb60570364ea8
|
[
"MIT"
] | null | null | null |
pyrustic/manager/handler/init_handler.py
|
tutlane/pyrustic
|
b20653aef35735480e02eff04f8bb60570364ea8
|
[
"MIT"
] | null | null | null |
import os
import os.path
import pkgutil
from pyrustic.manager.misc import funcs
from pyrustic.jasonix import Jasonix
class InitHandler:
"""
Description
-----------
Use this command to init your project.
Pyrustic Manager will install a basic
project structure in your project.
The PROJECT_DIR is the project's root
directory.
The APP_DIR is the directory of your
source code.
The APP_PKG is simply the name of the
root package of your source code.
Usage
-----
- Description: Init your project
- Command: init
Example
-------
Assume the linked target is:
/home/alex/demo
This target is also your project root
directory. And 'demo' is your project's
name. So let's assume that your target
is an empty directory.
When you issue the command 'init', this
is what the project root will look like:
demo # target or PROJECT_ROOT
demo # APP_PKG or APP_DIR, source here
__main__.py # entry point
__init__.py
version.py # __version__ = "0.0.1"
view # the demo.view package
main_view.py # module
pyrustic_data # folder
hubstore.json
gui.json # configure your GUI
tests
__init__.py
setup.py
setup.cfg # edit your project config
pyproject.toml
MANIFEST.in # don't worry, I take care
So when you want to add a file "my_file.txt"
and the module "mod.py" in the package
demo.view, you issue in the manager:
- add demo.view my_file.txt mod.py
"""
def __init__(self, target, app_pkg, args):
self._target = target
self._app_pkg = app_pkg
self._process(args)
def _process(self, args):
if not self._target:
print("You should link a Target first")
return
if args:
print("Wrong usage of this command")
return
# ask for app_pkg
self._set_app_pkg()
# create package
self._make_packages()
# create folders
self._make_folders()
# add files
self._add_files()
# add json data files
self._add_json_data_files()
print("Successfully initialized !")
def _make_packages(self):
hooking_pkg = "{}.hooking".format(self._app_pkg)
packages = (self._app_pkg, "tests", hooking_pkg)
for package in packages:
funcs.build_package(self._target, package)
app_dir = os.path.join(self._target, self._app_pkg)
packages = ("view", )
for package in packages:
funcs.build_package(app_dir, package)
def _make_folders(self):
folders = ("pyrustic_data",)
for folder in folders:
path = os.path.join(self._target, self._app_pkg, folder)
if os.path.exists(path):
continue
os.mkdir(path)
def _add_files(self):
resource_prefix = "manager/template/"
# add version.py
resource = resource_prefix + "version_template.txt"
app_dir = os.path.join(self._target, self._app_pkg)
dest_path = os.path.join(app_dir, "version.py")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add __main__.py
resource = resource_prefix + "main_template.txt"
dest_path = os.path.join(app_dir, "__main__.py")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
data = data.format(app_pkg=self._app_pkg)
self._add_file(dest_path, data)
# add main_view.py
resource = resource_prefix + "main_view_template.txt"
dest_path = os.path.join(app_dir, "view", "main_view.py")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add .gitignore
resource = resource_prefix + "gitignore_template.txt"
dest_path = os.path.join(self._target, ".gitignore")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add LICENSE
resource = resource_prefix + "license_template.txt"
dest_path = os.path.join(self._target, "LICENSE")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add README.md
resource = resource_prefix + "readme_template.txt"
dest_path = os.path.join(self._target, "README.md")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add MANIFEST.in
resource = resource_prefix + "manifest_template.txt"
dest_path = os.path.join(self._target, "MANIFEST.in")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
data = data.format(app_pkg=self._app_pkg)
self._add_file(dest_path, data)
# add setup.py
resource = resource_prefix + "setup_py_template.txt"
dest_path = os.path.join(self._target, "setup.py")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add setup.cfg
resource = resource_prefix + "setup_cfg_template.txt"
dest_path = os.path.join(self._target, "setup.cfg")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
data = data.format(project_name=os.path.basename(self._target),
app_pkg=self._app_pkg)
self._add_file(dest_path, data)
# add pyproject.toml
resource = resource_prefix + "pyproject_template.txt"
dest_path = os.path.join(self._target, "pyproject.toml")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add pre_building_hook.py
resource = resource_prefix + "pre_building_hook_template.txt"
dest_path = os.path.join(self._target, self._app_pkg,
"hooking",
"pre_building_hook.py")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add post_building_hook.py
resource = resource_prefix + "post_building_hook_template.txt"
dest_path = os.path.join(self._target, self._app_pkg,
"hooking",
"post_building_hook.py")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add pre_publishing_hook.py
resource = resource_prefix + "pre_publishing_hook_template.txt"
dest_path = os.path.join(self._target, self._app_pkg,
"hooking",
"pre_publishing_hook.py")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
# add post_publishing_hook.py
resource = resource_prefix + "post_publishing_hook_template.txt"
dest_path = os.path.join(self._target, self._app_pkg,
"hooking",
"post_publishing_hook.py")
data = pkgutil.get_data("pyrustic", resource).decode("utf-8")
self._add_file(dest_path, data)
def _add_json_data_files(self):
local_pyrustic_data_folder = os.path.join(self._target,
self._app_pkg,
"pyrustic_data")
resource_prefix = "manager/default_json/pyrustic_data/"
# add dev.json
path = os.path.join(local_pyrustic_data_folder, "dev.json")
default_resource = resource_prefix + "dev_default.json"
data = pkgutil.get_data("pyrustic", default_resource)
if not os.path.exists(path):
with open(path, "wb") as file:
file.write(data)
jasonix = Jasonix(path)
jasonix.data["hooking_pkg"] = "{}.hooking".format(self._app_pkg)
jasonix.save()
# add gui.json
path = os.path.join(local_pyrustic_data_folder, "gui.json")
default_resource = resource_prefix + "gui_default.json"
data = pkgutil.get_data("pyrustic", default_resource)
if not os.path.exists(path):
with open(path, "wb") as file:
file.write(data)
# add publishing.json
path = os.path.join(local_pyrustic_data_folder,
"publishing.json")
default_resource = resource_prefix + "publishing_default.json"
data = pkgutil.get_data("pyrustic", default_resource)
if not os.path.exists(path):
with open(path, "wb") as file:
file.write(data)
# add hubstore.json
path = os.path.join(local_pyrustic_data_folder, "hubstore.json")
default_resource = resource_prefix + "hubstore_default.json"
data = pkgutil.get_data("pyrustic", default_resource)
if not os.path.exists(path):
with open(path, "wb") as file:
file.write(data)
def _add_file(self, path, data):
if os.path.exists(path):
return
with open(path, "w") as file:
file.write(data)
def _set_app_pkg(self):
if self._app_pkg is not None:
return
self._app_pkg = os.path.basename(self._target)
| 40.34322
| 72
| 0.603088
|
4a178764ca89d88643700d816ba5d2fe97aac0d1
| 3,411
|
py
|
Python
|
build/service/log_pb2.py
|
evanshih1999/IoTHW
|
068f2fbe9fde82e20b0bc4a6e44a876546335fc4
|
[
"MIT"
] | null | null | null |
build/service/log_pb2.py
|
evanshih1999/IoTHW
|
068f2fbe9fde82e20b0bc4a6e44a876546335fc4
|
[
"MIT"
] | null | null | null |
build/service/log_pb2.py
|
evanshih1999/IoTHW
|
068f2fbe9fde82e20b0bc4a6e44a876546335fc4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: log.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='log.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\tlog.proto\"\x0c\n\nLogRequest\"\x1b\n\x0bLogResponse\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x05\x32\'\n\x03Log\x12 \n\x03Get\x12\x0b.LogRequest\x1a\x0c.LogResponseb\x06proto3'
)
_LOGREQUEST = _descriptor.Descriptor(
name='LogRequest',
full_name='LogRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=13,
serialized_end=40,
)
_LOGRESPONSE = _descriptor.Descriptor(
name='LogResponse',
full_name='LogResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='history', full_name='LogResponse.history', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=42,
serialized_end=70,
)
DESCRIPTOR.message_types_by_name['LogRequest'] = _LOGREQUEST
DESCRIPTOR.message_types_by_name['LogResponse'] = _LOGRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LogRequest = _reflection.GeneratedProtocolMessageType('LogRequest', (_message.Message,), {
'DESCRIPTOR' : _LOGREQUEST,
'__module__' : 'log_pb2'
# @@protoc_insertion_point(class_scope:LogRequest)
})
_sym_db.RegisterMessage(LogRequest)
LogResponse = _reflection.GeneratedProtocolMessageType('LogResponse', (_message.Message,), {
'DESCRIPTOR' : _LOGRESPONSE,
'__module__' : 'log_pb2'
# @@protoc_insertion_point(class_scope:LogResponse)
})
_sym_db.RegisterMessage(LogResponse)
_LOGCALCULATOR = _descriptor.ServiceDescriptor(
name='LogCalculator',
full_name='LogCalculator',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=72,
serialized_end=125,
methods=[
_descriptor.MethodDescriptor(
name='Compute',
full_name='LogCalculator.Compute',
index=0,
containing_service=None,
input_type=_LOGREQUEST,
output_type=_LOGRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_LOGCALCULATOR)
DESCRIPTOR.services_by_name['LogCalculator'] = _LOGCALCULATOR
# @@protoc_insertion_point(module_scope)
| 26.038168
| 196
| 0.75667
|
4a17878f3508cf6793010c8e0f98832c9c9ca4fd
| 342
|
py
|
Python
|
particle_filter_student/scripts/common/Particle.py
|
EdMlt/Particle_Filter_plane_simulator
|
12bb30c62baf6d5302b04bc456eadc02258efa9e
|
[
"Apache-2.0"
] | null | null | null |
particle_filter_student/scripts/common/Particle.py
|
EdMlt/Particle_Filter_plane_simulator
|
12bb30c62baf6d5302b04bc456eadc02258efa9e
|
[
"Apache-2.0"
] | null | null | null |
particle_filter_student/scripts/common/Particle.py
|
EdMlt/Particle_Filter_plane_simulator
|
12bb30c62baf6d5302b04bc456eadc02258efa9e
|
[
"Apache-2.0"
] | null | null | null |
# from collections import namedtuple
# MyStruct = namedtuple("MyStruct", "field1 field2 field3")
class Particle:
x = 0
y = 0
w = 0
proba=0
def __init__(self,x_0,y_0,w_0,proba_0):
self.x=x_0
self.y = y_0
self.w = w_0
self.proba = proba_0
def id(self):
return str(self.x)+'_'+str(self.y)
| 20.117647
| 59
| 0.593567
|
4a1787a9b324c7199b3378b0d97584bc8057ed23
| 295
|
py
|
Python
|
Desafios/Desafio52.py
|
Felix-xilef/Curso-de-Python
|
cdff7c7f3850e6326e274c8c1987b9e1a18ce910
|
[
"MIT"
] | null | null | null |
Desafios/Desafio52.py
|
Felix-xilef/Curso-de-Python
|
cdff7c7f3850e6326e274c8c1987b9e1a18ce910
|
[
"MIT"
] | null | null | null |
Desafios/Desafio52.py
|
Felix-xilef/Curso-de-Python
|
cdff7c7f3850e6326e274c8c1987b9e1a18ce910
|
[
"MIT"
] | null | null | null |
numero = int(input('\n\tDigite um número inteiro: '))
for i in range(2, numero):
if numero % i == 0:
print('\n\tO Número não é primo!')
input('\n\nPressione <enter> para continuar')
exit()
print('\n\tO número é primo!')
input('\n\nPressione <enter> para continuar')
| 29.5
| 53
| 0.610169
|
4a1787f695c0c7ed035b0083b8dfdb493bfe3e48
| 2,039
|
py
|
Python
|
gslab_scons/builders/build_lyx.py
|
lboxell/gslab_python
|
0e6d687962146d8745cd80d5c888c69647863d2d
|
[
"MIT"
] | null | null | null |
gslab_scons/builders/build_lyx.py
|
lboxell/gslab_python
|
0e6d687962146d8745cd80d5c888c69647863d2d
|
[
"MIT"
] | null | null | null |
gslab_scons/builders/build_lyx.py
|
lboxell/gslab_python
|
0e6d687962146d8745cd80d5c888c69647863d2d
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import shutil
import gslab_scons.misc as misc
from gslab_scons import log_timestamp
from gslab_scons._exception_classes import ExecCallError
def build_lyx(target, source, env):
'''Compile a pdf from a LyX file
This function is a SCons builder that compiles a .lyx file
as a pdf and places it at the path specified by target.
Parameters
----------
target: string or list
The target of the SCons command. This should be the path
of the pdf that the builder is instructed to compile.
source: string or list
The source of the SCons command. This should
be the .lyx file that the function will compile as a PDF.
env: SCons construction environment, see SCons user guide 7.2
'''
# Prelims
source = misc.make_list_if_string(source)
target = misc.make_list_if_string(target)
source_file = str(source[0])
misc.check_code_extension(source_file, '.lyx')
# Set up target file and log file
newpdf = source_file[:-4] + '.pdf'
target_file = str(target[0])
target_dir = misc.get_directory(target_file)
start_time = misc.current_time()
misc.check_code_extension(source_file, 'lyx')
newpdf = source_file.replace('.lyx','.pdf')
try:
log_ext = '_%s' % env['log_ext']
except KeyError:
log_ext = ''
log_file = os.path.join(target_dir, ('sconscript%s.log' % log_ext))
# System call
try:
command = 'lyx -e pdf2 %s > %s' % (source_file, log_file)
subprocess.check_output(command,
stderr = subprocess.STDOUT,
shell = True)
# Move rendered pdf to the target
shutil.move(newpdf, target_file)
except subprocess.CalledProcessError:
message = misc.command_error_msg("lyx", command)
raise ExecCallError(message)
# Close log
end_time = misc.current_time()
log_timestamp(start_time, end_time, log_file)
return None
| 31.369231
| 71
| 0.647376
|
4a178909df3f6b71deadf502474eaa54f2aef715
| 7,576
|
py
|
Python
|
vgg_example.py
|
hvkwak/vgg-face.pytorch
|
eef10b358ff61dbdfe0192cba865fcc988c29ec2
|
[
"MIT"
] | null | null | null |
vgg_example.py
|
hvkwak/vgg-face.pytorch
|
eef10b358ff61dbdfe0192cba865fcc988c29ec2
|
[
"MIT"
] | null | null | null |
vgg_example.py
|
hvkwak/vgg-face.pytorch
|
eef10b358ff61dbdfe0192cba865fcc988c29ec2
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchfile
import cv2 as cv
import numpy as np
import os
# import tqdm
from os import listdir
from os.path import isfile, join, isdir
class VGG(nn.Module):
def __init__(self):
super().__init__()
self.block_size = [2, 2, 3, 3, 3]
# 3 input image channel, 64 output channels, 3x3 square convolution
self.conv1_1 = nn.Conv2d(3, 64, 3, padding = 1)
# 64 feature maps again to 64 feature maps
self.conv1_2 = nn.Conv2d(64, 64, 3, padding = 1)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding = 1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding = 1)
self.conv3_1 = nn.Conv2d(128, 256, 3, padding = 1)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding = 1)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding = 1)
self.conv3_4 = nn.Conv2d(256, 256, 3, padding = 1)
self.conv4_1 = nn.Conv2d(256, 512, 3, padding = 1)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding = 1)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding = 1)
self.conv5_1 = nn.Conv2d(512, 512, 3, padding = 1)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding = 1)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding = 1)
self.FC6 = nn.Linear(512 * 7 * 7, 4096) # 7 * 7 from image dimension
self.FC7 = nn.Linear(4096, 4096)
self.FC8 = nn.Linear(4096, 2622) # 2622 classes
def forward(self, x):
# input x.dim = (224, 224, 3)
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = F.max_pool2d(x, (2, 2)) # max pooling, window size of (2, 2)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = F.max_pool2d(x, (2, 2))
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
x = F.max_pool2d(x, (2, 2))
x = F.relu(self.conv4_1(x))
x = F.relu(self.conv4_2(x))
x = F.relu(self.conv4_3(x))
x = F.max_pool2d(x, (2, 2))
x = F.relu(self.conv5_1(x))
x = F.relu(self.conv5_2(x))
x = F.relu(self.conv5_3(x))
x = F.max_pool2d(x, (2, 2))
# flatten the feature maps: (1, N), where -1 does the job to calculate N
x = x.view(x.size(0), -1)
# FCs
x = F.relu(self.FC6(x))
x = F.dropout(x, 0.5, self.training)
x7 = F.relu(self.FC7(x))
x8 = F.dropout(x7, 0.5, self.training)
return(x7, self.FC8(x8))
def load_weights(self, path="/home/hyobin/Documents/SoSe20/SHK/vgg_face_torch/VGG_FACE.t7"):
""" Function to load luatorch pretrained
Args:
path: path for the luatorch pretrained
"""
model = torchfile.load(path)
counter = 1
block = 1
for i, layer in enumerate(model.modules):
if layer.weight is not None:
if block <= 5:
self_layer = getattr(self, "conv%d_%d" % (block, counter))
counter += 1
if counter > self.block_size[block - 1]:
counter = 1
block += 1
self_layer.weight.data[...] = torch.tensor(layer.weight).view_as(self_layer.weight)[...]
self_layer.bias.data[...] = torch.tensor(layer.bias).view_as(self_layer.bias)[...]
else:
self_layer = getattr(self, "FC%d" % (block))
block += 1
self_layer.weight.data[...] = torch.tensor(layer.weight).view_as(self_layer.weight)[...]
self_layer.bias.data[...] = torch.tensor(layer.bias).view_as(self_layer.bias)[...]
def compute_descriptors(phase): # computes descriptors of Train/Test dataset
# phase: 'Train' or 'Test'
model = VGG()
model.load_weights()
mypath = "/home/hyobin/Documents/vgg-face.pytorch/images/"+phase
people = np.sort(listdir(mypath)) # additional sort() needed.
# number of images and memory for labels
num_imgs = 0
for k in range(len(people)):
num_imgs = num_imgs + len(listdir(mypath + "/" + people[k]))
labels = torch.zeros(num_imgs, dtype = int)
# compute descriptors per person:
for i in range(len(people)):
print(i)
img_path = mypath + "/" + people[i]
n = len(listdir(img_path))
descriptors = torch.zeros([n, 4096])
labels[n*i:n*(i+1)] = i+1
img_names = listdir(img_path)
for k in range(n): # n images per person
img_name = img_names[k]
img = cv.imread(img_path + "/" + img_name)
img = cv.resize(img, (224, 224))
img = torch.Tensor(img).permute(2, 0, 1).view(1, 3, 224, 224)
model.eval()
img -= torch.Tensor(np.array([129.1863, 104.7624, 93.5940])).view(1, 3, 1, 1)
descriptor = model(img)[0]
descriptors[k, :] = descriptor
torch.save(descriptors, phase + '_descriptors{}.pt'.format(i))
torch.save(labels, phase + '_labels.pt')
def classifier(test_img):
mypath = "/home/hyobin/Documents/vgg-face.pytorch/descriptors/descriptors/"
n = len(listdir(mypath))
# Startwert: Infinity
best_distance = np.Inf
best_label = n+1
# per class compute the best Nearest Neighbor(NN)
for i in range(n):
descriptor = torch.load(mypath+"Train_descriptors{}.pt".format(i))
# compute Euclidean Distance:
NN = torch.min(torch.sum((descriptor - test_img)**2, 1)) # row sums
if NN < best_distance:
best_distance = NN
best_label = i
return(best_distance, best_label)
def einzel_test(img_name):
model = VGG()
model.load_weights()
img = cv.imread(img_name)
img = cv.resize(img, (224, 224))
img = torch.Tensor(img).permute(2, 0, 1).view(1, 3, 224, 224)
model.eval()
img -= torch.Tensor(np.array([129.1863, 104.7624, 93.5940])).view(1, 3, 1, 1)
test_img = model(img)[0]
__, NN_label = classifier(test_img)
# fuer Einzeltest muss es um 1 erhoeht werden
print(NN_label+1)
def test():
model = VGG()
model.load_weights()
testpath = "/home/hyobin/Documents/vgg-face.pytorch/images/"+"Test"
people = np.sort(listdir(testpath)) # additional sort() needed.
result_mat = torch.zeros((len(people), len(people)), dtype=int)
# compute descriptors per person:
for i in range(len(people)): # Here comes the label.
print(i)
img_path = testpath + "/" + people[i]
img_names = listdir(img_path)
n = len(img_names)
for k in range(n): # n images per person
img_name = img_names[k]
img = cv.imread(img_path + "/" + img_name)
img = cv.resize(img, (224, 224))
img = torch.Tensor(img).permute(2, 0, 1).view(1, 3, 224, 224)
model.eval()
img -= torch.Tensor(np.array([129.1863, 104.7624, 93.5940])).view(1, 3, 1, 1)
test_img = model(img)[0]
__, NN_label = classifier(test_img)
result_mat[i, NN_label] = result_mat[i, NN_label] + 1
print(result_mat)
return(result_mat)
if __name__ == "__main__":
print("Einzeltest Result:")
einzel_test("/home/hyobin/Documents/vgg-face.pytorch/images/Test/20_Hyovin/00020_0303202011129.png")
print("")
result_mat = test()
print(result_mat)
print(sum(torch.diag(result_mat))/torch.sum(result_mat).double())
# tensor(0.9961, dtype=torch.float64)
| 37.137255
| 108
| 0.572994
|
4a178b19c4c0caeeb41c171fa0096db5acbad6a9
| 4,879
|
py
|
Python
|
pyqtgraph/examples/_paramtreecfg.py
|
StSav012/pyqtgraph
|
65e17c4e3707eb3bd4d91cdc13504d9b150f4360
|
[
"MIT"
] | 1
|
2022-01-30T20:04:51.000Z
|
2022-01-30T20:04:51.000Z
|
pyqtgraph/examples/_paramtreecfg.py
|
StSav012/pyqtgraph
|
65e17c4e3707eb3bd4d91cdc13504d9b150f4360
|
[
"MIT"
] | null | null | null |
pyqtgraph/examples/_paramtreecfg.py
|
StSav012/pyqtgraph
|
65e17c4e3707eb3bd4d91cdc13504d9b150f4360
|
[
"MIT"
] | null | null | null |
import numpy as np
from pyqtgraph.parametertree.parameterTypes import QtEnumParameter as enum
from pyqtgraph.Qt import QtWidgets
dlg = QtWidgets.QFileDialog
cfg = {
'list': {
'limits': {
'type': 'checklist',
'limits': ['a', 'b', 'c']
}
},
'file': {
'acceptMode': {
'type': 'list',
'limits': list(enum(dlg.AcceptMode, dlg).enumMap)
},
'fileMode': {
'type': 'list',
'limits': list(enum(dlg.FileMode, dlg).enumMap)
},
'viewMode': {
'type': 'list',
'limits': list(enum(dlg.ViewMode, dlg).enumMap)
},
'dialogLabel': {
'type': 'list',
'limits': list(enum(dlg.DialogLabel, dlg).enumMap)
},
'relativeTo': {
'type': 'str',
'value': None
},
'directory': {
'type': 'str',
'value': None
},
'windowTitle': {
'type': 'str',
'value': None
},
'nameFilter': {
'type': 'str',
'value': None
}
},
'float': {
'Float Information': {
'type': 'str',
'readonly': True,
'value': 'Note that all options except "finite" also apply to "int" parameters',
},
'step': {
'type': 'float',
'limits': [0, None],
'value': 1,
},
'limits': {
'type': 'list',
'limits': {'[0, None]': [0, None], '[1, 5]': [1, 5]},
},
'suffix': {
'type': 'list',
'limits': ['Hz', 's', 'm'],
},
'siPrefix': {
'type': 'bool',
'value': True
},
'finite': {
'type': 'bool',
'value': True,
},
'dec': {
'type': 'bool',
'value': False,
},
'minStep': {
'type': 'float',
'value': 1.0e-12,
},
},
'checklist': {
'limits': {
'type': 'checklist',
'limits': ['one', 'two', 'three', 'four'],
},
'exclusive': {
'type': 'bool',
'value': False,
},
'delay': {
'type': 'float',
'value': 1.0,
'limits': [0, None]
}
},
'pen': {
'Pen Information': {
'type': 'str',
'value': 'Click the button to see options',
'readonly': True,
},
},
'slider': {
'step': {
'type': 'float',
'limits': [0, None],
'value': 1, },
'format': {
'type': 'str',
'value': '{0:>3}',
},
'precision': {
'type': 'int',
'value': 2,
'limits': [1, None],
},
'span': {
'type': 'list',
'limits': {'linspace(-pi, pi)': np.linspace(-np.pi, np.pi), 'arange(10)**2': np.arange(10) ** 2},
},
'How to Set': {
'type': 'list',
'limits': ['Use span', 'Use step + limits'],
}
},
'calendar': {
'format': {
'type': 'str',
'value': 'MM DD',
}
},
'Applies to All Types': {
'Extra Information': {
'type': 'text',
'value': 'These apply to all parameters. Watch how this text box is altered by any setting you change.',
'default': 'These apply to all parameters. Watch how this text box is altered by any setting you change.',
'readonly': True,
},
'readonly': {
'type': 'bool',
'value': True,
},
'removable': {
'type': 'bool',
'tip': 'Adds a context menu option to remove this parameter',
'value': False,
},
'visible': {
'type': 'bool',
'value': True,
},
'disabled': {
'type': 'bool',
'value': False,
},
'title': {
'type': 'str',
'value': 'Meta Options',
},
'default': {
'tip': 'The default value that gets set when clicking the arrow in the right column',
'type': 'str',
},
'expanded': {
'type': 'bool',
'value': True,
},
},
'No Extra Options': {
'text': 'Unlike the other parameters shown, these don\'t have extra settable options.\n' \
+ 'Note: "int" *does* have the same options as float, mentioned above',
'int': 10,
'str': 'Hi, world!',
'color': '#fff',
'bool': False,
'colormap': None,
'progress': 50,
'action': None,
'font': 'Inter',
}
}
| 25.279793
| 118
| 0.3843
|
4a178b9876c56dbc502b8f9853456f59a804c769
| 2,684
|
py
|
Python
|
make/internal/directive.py
|
tompis/casual
|
d838716c7052a906af8a19e945a496acdc7899a2
|
[
"MIT"
] | null | null | null |
make/internal/directive.py
|
tompis/casual
|
d838716c7052a906af8a19e945a496acdc7899a2
|
[
"MIT"
] | null | null | null |
make/internal/directive.py
|
tompis/casual
|
d838716c7052a906af8a19e945a496acdc7899a2
|
[
"MIT"
] | null | null | null |
'''
Created on 13 maj 2012
@author: hbergk
'''
import os
import sys
import re
from casual.make.output import Output
#
# Defines the return from "all" functions, that can be used
# in other function, as Install or whatnot...
#
class Target:
def __init__(self, output, source = '', name = None, operation = None):
#
# Storage of eventual Output instance
#
if isinstance( output, Output):
self.output = output
else:
self.output = None
#
# Stem and filename
#
self.stem = None
filename = extract_name( output)
if operation:
filename = operation( filename)
self.stem = filename
if self.output:
filename += '.' + self.output.version.full_version()
self.file = filename
#
# dirname
#
self.dirname = os.path.dirname( filename)
#
# name
#
if name:
self.name = name
else:
self.name = target_name( filename)
#
# source
#
if isinstance( source, Output):
self.source = source.name
else:
self.source = source
#
# base
#
self.base = os.path.basename( self.source);
def base_extract( output, parameter):
if isinstance( output, Output):
return parameter
else:
raise SystemError, "Unknown output type"
def extract_name( output):
if isinstance( output, basestring):
return output
else:
return base_extract( output, output.name)
def normalize_string( string):
return re.sub( '[^\w]+', '_', string)
def target_name( name):
return 'target_' + normalize_string( os.path.basename( name))
def multiline( values):
if isinstance( values, basestring):
values = values.split()
return ' \\\n '.join( values)
def validate_list( value):
if isinstance( value, basestring):
raise SyntaxError( 'not a list - content: ' + value);
def target_files( values):
names = []
for value in values:
if isinstance( value, Target):
names.append( value.file)
else:
names.append( value)
return names;
def target_base( values):
names = []
for value in values:
if isinstance( value, Target):
names.append( value.base)
else:
names.append( value)
return names;
def debug( message):
if os.getenv('PYTHONDEBUG'): sys.stderr.write( message + '\n')
| 21.301587
| 75
| 0.539493
|
4a178c2dceb5c7ef5e5665ff0bac79afdf6fd289
| 6,834
|
py
|
Python
|
networkx/algorithms/bipartite/cluster.py
|
tombeek111/networkx
|
0770b228e0aab5acf8842981947857fdf85205ab
|
[
"BSD-3-Clause"
] | 1
|
2019-12-03T14:58:04.000Z
|
2019-12-03T14:58:04.000Z
|
networkx/algorithms/bipartite/cluster.py
|
tombeek111/networkx
|
0770b228e0aab5acf8842981947857fdf85205ab
|
[
"BSD-3-Clause"
] | 1
|
2019-12-19T16:49:00.000Z
|
2019-12-20T06:22:46.000Z
|
networkx/algorithms/bipartite/cluster.py
|
tombeek111/networkx
|
0770b228e0aab5acf8842981947857fdf85205ab
|
[
"BSD-3-Clause"
] | 2
|
2020-02-13T10:33:34.000Z
|
2020-08-09T07:59:26.000Z
|
"""Functions for computing clustering of pairs
"""
import itertools
import networkx as nx
__all__ = ['clustering',
'average_clustering',
'latapy_clustering',
'robins_alexander_clustering']
def cc_dot(nu, nv):
return float(len(nu & nv)) / len(nu | nv)
def cc_max(nu, nv):
return float(len(nu & nv)) / max(len(nu), len(nv))
def cc_min(nu, nv):
return float(len(nu & nv)) / min(len(nu), len(nv))
modes = {'dot': cc_dot,
'min': cc_min,
'max': cc_max}
def latapy_clustering(G, nodes=None, mode='dot'):
r"""Compute a bipartite clustering coefficient for nodes.
The bipartie clustering coefficient is a measure of local density
of connections defined as [1]_:
.. math::
c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|}
where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`,
and `c_{uv}` is the pairwise clustering coefficient between nodes
`u` and `v`.
The mode selects the function for `c_{uv}` which can be:
`dot`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|}
`min`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)}
`max`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)}
Parameters
----------
G : graph
A bipartite graph
nodes : list or iterable (optional)
Compute bipartite clustering for these nodes. The default
is all nodes in G.
mode : string
The pariwise bipartite clustering method to be used in the computation.
It must be "dot", "max", or "min".
Returns
-------
clustering : dictionary
A dictionary keyed by node with the clustering coefficient value.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4) # path graphs are bipartite
>>> c = bipartite.clustering(G)
>>> c[0]
0.5
>>> c = bipartite.clustering(G,mode='min')
>>> c[0]
1.0
See Also
--------
robins_alexander_clustering
square_clustering
average_clustering
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if not nx.algorithms.bipartite.is_bipartite(G):
raise nx.NetworkXError("Graph is not bipartite")
try:
cc_func = modes[mode]
except KeyError:
raise nx.NetworkXError(
"Mode for bipartite clustering must be: dot, min or max")
if nodes is None:
nodes = G
ccs = {}
for v in nodes:
cc = 0.0
nbrs2 = set([u for nbr in G[v] for u in G[nbr]]) - set([v])
for u in nbrs2:
cc += cc_func(set(G[u]), set(G[v]))
if cc > 0.0: # len(nbrs2)>0
cc /= len(nbrs2)
ccs[v] = cc
return ccs
clustering = latapy_clustering
def average_clustering(G, nodes=None, mode='dot'):
r"""Compute the average bipartite clustering coefficient.
A clustering coefficient for the whole graph is the average,
.. math::
C = \frac{1}{n}\sum_{v \in G} c_v,
where `n` is the number of nodes in `G`.
Similar measures for the two bipartite sets can be defined [1]_
.. math::
C_X = \frac{1}{|X|}\sum_{v \in X} c_v,
where `X` is a bipartite set of `G`.
Parameters
----------
G : graph
a bipartite graph
nodes : list or iterable, optional
A container of nodes to use in computing the average.
The nodes should be either the entire graph (the default) or one of the
bipartite sets.
mode : string
The pariwise bipartite clustering method.
It must be "dot", "max", or "min"
Returns
-------
clustering : float
The average bipartite clustering for the given set of nodes or the
entire graph if no nodes are specified.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G=nx.star_graph(3) # star graphs are bipartite
>>> bipartite.average_clustering(G)
0.75
>>> X,Y=bipartite.sets(G)
>>> bipartite.average_clustering(G,X)
0.0
>>> bipartite.average_clustering(G,Y)
1.0
See Also
--------
clustering
Notes
-----
The container of nodes passed to this function must contain all of the nodes
in one of the bipartite sets ("top" or "bottom") in order to compute
the correct average bipartite clustering coefficients.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if nodes is None:
nodes = G
ccs = latapy_clustering(G, nodes=nodes, mode=mode)
return float(sum(ccs[v] for v in nodes)) / len(nodes)
def robins_alexander_clustering(G):
r"""Compute the bipartite clustering of G.
Robins and Alexander [1]_ defined bipartite clustering coefficient as
four times the number of four cycles `C_4` divided by the number of
three paths `L_3` in a bipartite graph:
.. math::
CC_4 = \frac{4 * C_4}{L_3}
Parameters
----------
G : graph
a bipartite graph
Returns
-------
clustering : float
The Robins and Alexander bipartite clustering for the input graph.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.davis_southern_women_graph()
>>> print(round(bipartite.robins_alexander_clustering(G), 3))
0.468
See Also
--------
latapy_clustering
square_clustering
References
----------
.. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking
directors: Network structure and distance in bipartite graphs.
Computational & Mathematical Organization Theory 10(1), 69–94.
"""
if G.order() < 4 or G.size() < 3:
return 0
L_3 = _threepaths(G)
if L_3 == 0:
return 0
C_4 = _four_cycles(G)
return (4. * C_4) / L_3
def _four_cycles(G):
cycles = 0
for v in G:
for u, w in itertools.combinations(G[v], 2):
cycles += len((set(G[u]) & set(G[w])) - set([v]))
return cycles / 4
def _threepaths(G):
paths = 0
for v in G:
for u in G[v]:
for w in set(G[u]) - set([v]):
paths += len(set(G[w]) - set([v, u]))
# Divide by two because we count each three path twice
# one for each possible starting point
return paths / 2
| 24.67148
| 80
| 0.592918
|
4a178c31e6aefb80d22b1ef37e5a2d033438b840
| 248
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/linqs/__init__.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/linqs/__init__.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/linqs/__init__.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""This sub-module offers methods to automatically retrieve the graphs from LINQS repository."""
from .citeseer import CiteSeer
from .cora import Cora
from .pubmeddiabetes import PubMedDiabetes
__all__ = [
"CiteSeer", "Cora", "PubMedDiabetes",
]
| 27.555556
| 96
| 0.774194
|
4a178c663e081d75ddb550a36bac78dec566804a
| 6,293
|
py
|
Python
|
base/learners/skill_discovery/base.py
|
lee15253/edl_bk
|
6777f5803138e6a64dabb096fe18a495728aabe3
|
[
"MIT"
] | null | null | null |
base/learners/skill_discovery/base.py
|
lee15253/edl_bk
|
6777f5803138e6a64dabb096fe18a495728aabe3
|
[
"MIT"
] | null | null | null |
base/learners/skill_discovery/base.py
|
lee15253/edl_bk
|
6777f5803138e6a64dabb096fe18a495728aabe3
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import torch
from ..base import BaseLearner
class BaseSkillDiscoveryLearner(BaseLearner):
def __init__(self, env_reward=False, hidden_size=128, num_layers=4, normalize_inputs=False, **kwargs):
self.env_reward = bool(env_reward)
self.hidden_size = int(hidden_size)
self.num_layers = int(num_layers)
self.normalize_inputs = bool(normalize_inputs)
super().__init__(**kwargs)
self.ep_summary_keys = ["cumulative_rew", "cumulative_im_rew", "cumulative_density_rew"]
def fill_summary(self, *values):
self._ep_summary = [float(sum([e['reward'] for e in self.agent.episode])),
float(sum([e.get('im_reward', 0.) for e in self.agent.episode])),
float(sum([e.get('density_model_reward', 0.) for e in self.agent.episode]))]
self._ep_summary += [v.item() for v in values]
def relabel_episode(self):
self._compress_me = []
for e in self.agent.episode:
# Optionally take into account extrinsic reward
r = e['env_reward'] * float(self.env_reward)
e['reward'] = r
self._compress_me.append(self.agent.episode)
# Add discriminator reward
self._add_im_reward()
def relabel_batch(self, batch):
# Compute intrinsic rewards
with torch.no_grad():
new_im_rew = self.im.surprisal(batch)
if self.density is not None:
new_density_rew = self.density.novelty(batch)
else:
new_density_rew = torch.zeros_like(new_im_rew)
# Make sure that weights for intrinsic rewards are not None
im_nu = self.im_nu if self.im_nu is not None else 0.
density_nu = self.density_nu if self.density_nu is not None else 0.
# Detach intrinsic rewards from computation graph
new_im_rew = new_im_rew.detach()
new_density_rew = new_density_rew.detach()
# Optionally take into account extrinsic reward
r = batch['env_reward'] * float(self.env_reward)
# Add intrinsic rewards
r += im_nu * new_im_rew + density_nu * new_density_rew
batch['reward'] = r
batch['im_reward'] = new_im_rew
batch['density_model_reward'] = new_density_rew
return batch
def _compute_surprisal(self, batched_episode):
return self.im.surprisal(batched_episode)
def _add_im_reward(self):
if self.im is not None:
for ep in self._compress_me:
batched_episode = {key: torch.stack([e[key] for e in ep]) for key in ep[0].keys()}
surprisals = self._compute_surprisal(batched_episode)
if self.im_scale:
self.train()
_ = self._im_bn(surprisals.view(-1, 1))
self.eval()
surprisals = surprisals / torch.sqrt(self._im_bn.running_var[0])
for e, s in zip(ep, surprisals):
e['reward'] = e['reward'].to('cuda')
e['reward'] += (self.im_nu * s.detach())
print("여기지나가나1?")
e['im_reward'] = s.detach()
def preprocess_skill(self, z, **kwargs):
return self.agent.preprocess_skill(z, **kwargs)
def get_values(self, batch):
return self.v_module(
batch['state'],
self.preprocess_skill(batch['skill']),
)
def get_terminal_values(self, batch):
return self.v_module(
batch['next_state'][-1:],
self.preprocess_skill(batch['skill'][-1:])
)
def get_policy_lprobs_and_nents(self, batch):
log_prob, n_ent, _ = self.policy(
batch['state'],
self.preprocess_skill(batch['skill']),
action_logit=batch['action_logit']
)
return log_prob.sum(dim=1), n_ent
def get_im_loss(self, batch):
self.im = self.im.to('cuda')
return self.im(batch)
def soft_update(self):
module_pairs = [
dict(source=self.v_module, target=self.v_target),
]
for pair in module_pairs:
for p, p_targ in zip(pair['source'].parameters(), pair['target'].parameters()):
p_targ.data *= self.polyak
p_targ.data += (1 - self.polyak) * p.data
def _get_q_module(self, q_i):
q_i = q_i if q_i is not None else 1
assert q_i in [1, 2]
return [self.q1, self.q2][q_i - 1]
def get_action_qs(self, batch, q_i=None):
return self.get_curr_qs(batch, new_actions=None, q_i=q_i)
def get_policy_loss_and_actions(self, batch):
policy_actions, logprobs = self.sample_policy_actions_and_lprobs(batch)
p_obj = self.q1.q_no_grad(batch['state'], policy_actions, self.preprocess_skill(batch['skill']))
if hasattr(self, 'alpha'): # for SAC
p_obj -= self.alpha * logprobs
p_losses = -p_obj # flip sign to turn the maximization objective into a loss function to minimize
p_loss = p_losses.mean()
return p_loss, policy_actions
def get_curr_qs(self, batch, new_actions=None, q_i=None):
"""
Compute Q_i(s,a). Use new_actions to override the actions in the batch (e.g. for SAC).
q_i selects the index of the Q-function.
"""
action = new_actions if new_actions is not None else batch['action']
return self._get_q_module(q_i)(
batch['state'],
action,
self.preprocess_skill(batch['skill'])
)
def get_next_vs(self, batch):
return self.v_target(
batch['next_state'],
self.preprocess_skill(batch['skill']),
)
def sample_policy_actions_and_lprobs(self, batch): # For SAC; we need to sample new actions when updating V
""" Sample new actions. Returns (actions, logprobs) tuple. """
action, action_logit, lprobs, n_ent = self.policy(
batch['state'],
self.preprocess_skill(batch['skill'])
)
return action, lprobs.sum(dim=1)
| 37.682635
| 112
| 0.604004
|
4a178c8abb19aa3742d27c18f09762a8a1a4c069
| 4,182
|
py
|
Python
|
EMDMeasurment/EMD.py
|
Javert899/VisualComparison2EventLogs
|
a7bda790922928010a7cd8d1691c92a098ffd11d
|
[
"Apache-2.0"
] | null | null | null |
EMDMeasurment/EMD.py
|
Javert899/VisualComparison2EventLogs
|
a7bda790922928010a7cd8d1691c92a098ffd11d
|
[
"Apache-2.0"
] | null | null | null |
EMDMeasurment/EMD.py
|
Javert899/VisualComparison2EventLogs
|
a7bda790922928010a7cd8d1691c92a098ffd11d
|
[
"Apache-2.0"
] | 1
|
2022-03-04T07:40:55.000Z
|
2022-03-04T07:40:55.000Z
|
import math
import numpy as np
import pandas as pd
from collections import Counter
from Levenshtein import distance as levenshtein_distance
from pyemd import emd,emd_with_flow
from pyemd import emd_with_flow
class EMD:
def __init__(self):
self = self
def log_freq(self,simple_log):
sum_seq = 0
tuple_list = map(tuple, simple_log)
c = Counter(tuple_list)
return self.relative_freq(c, len(c.items()))
def relative_freq(self,counter, n):
only_freq = np.zeros(n)
total_count = sum(counter.values())
relative = {}
for index, key in enumerate(counter):
relative[key] = round(counter[key] / total_count, 4)
only_freq[index] = round(counter[key] / total_count, 4)
return relative,only_freq
def distance_array(self,log1,log2):
log2=dict(sorted(log2.items(), key=lambda item: item[1], reverse=True))
log1 = dict(sorted(log1.items(), key=lambda item: item[1], reverse=True))
array = np.zeros(shape=(len(log1), len(log2)))
for index1,trace1 in enumerate(log1):
print("variant: " + str(index1))
for index2,trace2 in enumerate(log2):
str1 = ''.join(trace1)
str2 = ''.join(trace2)
dist_01 = levenshtein_distance(str1,str2)/max(len(str1), len(str2))
array[index1][index2] = round(dist_01,4)
# array[index1][index2] = 10
df = pd.DataFrame(array, [''.join(item[0])+":"+str(item[1]) for item in log1.items()], [''.join(item[0])+":"+str(item[1]) for item in log2.items()])
return df,array
def emd_distance(self, log_freq_1, log_freq_2):
distance_df, array = self.distance_array(log_freq_1, log_freq_2)
cost = 0
for ind, item in enumerate(log_freq_1.items()):
freq = item[1]
row = distance_df.iloc[ind]
min_col = row.idxmin()
while freq > 0:
freq_to_transfer = min(freq, float(min_col.split(':')[1]))
diff = freq - float(min_col.split(':')[1])
if diff < 0:
diff = 0
cell_value = distance_df.at[''.join(item[0]) + ":" + str(item[1]), min_col]
distance_df.at[''.join(item[0]) + ":" + str(item[1]), min_col] = cell_value * freq_to_transfer
cost += cell_value * freq_to_transfer
# distance_df = distance_df.rename(index={''.join(item[0])+":"+str(freq): ''.join(item[0])+":"+str(diff)})
freq = diff
row = row.drop(labels=[min_col])
min_col = row.idxmin()
return self.truncate(cost,3)
def emd_distance_pyemd(self,log_only_freq_1,log_only_freq_2,log_freq_1,log_freq_2):
checked = False
if len(log_only_freq_2) < len(log_only_freq_1):
checked = True
diff = len(log_only_freq_1) - len(log_only_freq_2)
for i in range(diff):
fake_str = 'a' + str(i)
log_freq_2[tuple(fake_str)] = 0
log_only_freq_2 = np.append(log_only_freq_2, 0)
elif len(log_only_freq_1) < len(log_only_freq_2) and not checked:
diff = len(log_only_freq_2) - len(log_only_freq_1)
for i in range(diff):
fake_str = 'a' + str(i)
log_freq_1[tuple(fake_str)] = 0
log_only_freq_1 = np.append(log_only_freq_1, 0)
distance_df, array = self.distance_array(log_freq_1, log_freq_2)
# if len(log_only_freq_1) > array.shape[0]:
# len(log_only_freq_1).pop()
# log_freq_1.pop()
# if len(log_only_freq_2) > array.shape[1]:
# len(log_only_freq_2).pop()
# log_freq_2.pop()
cost_lp = emd(log_only_freq_1, log_only_freq_2, array)
x = emd_with_flow(log_only_freq_1, log_only_freq_2, array)
distamce_df_emd = pd.DataFrame(x[1])
if cost_lp > 1:
cost_lp = 1
return cost_lp, distamce_df_emd
def truncate(self,number, digits):
stepper = 10.0 ** digits
return math.trunc(stepper * number) / stepper
| 41.82
| 156
| 0.581779
|
4a178c989b6d363705a8c371fe5a84de83975533
| 1,059
|
py
|
Python
|
lib/linkedin/constants.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 206
|
2015-10-15T07:05:08.000Z
|
2021-02-19T11:48:36.000Z
|
lib/linkedin/constants.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 8
|
2017-10-16T10:18:31.000Z
|
2022-03-09T14:24:27.000Z
|
lib/linkedin/constants.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 61
|
2015-10-15T08:12:44.000Z
|
2022-03-10T12:25:06.000Z
|
LINKEDIN_PROFILE_API_BASE_URL = 'https://api.linkedin.com/v1/people/~:(%s)?format=json'
# LinkedIn treasure trove: http://developer.linkedin.com/documents/profile-fields
LINKEDIN_PROFILE_FIELDS = [
'id',
'first-name',
'last-name',
'summary',
'picture-url',
'location:(name)',
'industry',
'headline',
# http://developer.linkedin.com/documents/profile-fields#positions
'positions:(id,title,summary,start-date,end-date,is-current,company)',
# http://developer.linkedin.com/documents/profile-fields#publiCations
'Publications:(id,title,date,url,summary)',
# http://developer.linkedin.com/documents/profile-fields#patents
'patents:(id,title,summary,number,date,url)',
# http://developer.linkedin.com/documents/profile-fields#languages
#'languages'
# http://developer.linkedin.com/documents/profile-fields#skills
'skills:(id,skill:(name))',
# http://developer.linkedin.com/documents/profile-fields#educations
'educations:(id,school-name,field-of-study,degree,start-date,end-date)',
]
| 40.730769
| 87
| 0.711992
|
4a178d648503524d3b3eca00b54407da3d024a9b
| 4,063
|
py
|
Python
|
python/surf/devices/cypress/_CypressS25Fl.py
|
qarlosalberto/surf
|
69df91296d77efc9e812da051841545e320ebf69
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-05-13T19:56:51.000Z
|
2021-05-21T13:33:02.000Z
|
python/surf/devices/cypress/_CypressS25Fl.py
|
qarlosalberto/surf
|
69df91296d77efc9e812da051841545e320ebf69
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
python/surf/devices/cypress/_CypressS25Fl.py
|
qarlosalberto/surf
|
69df91296d77efc9e812da051841545e320ebf69
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#-----------------------------------------------------------------------------
# Description: PyRogue Cypress S25FL PROM Series
#
# Note: Used with surf/devices/Micron/n25q firmware
#
#-----------------------------------------------------------------------------
# This file is part of the 'SLAC Firmware Standard Library'. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the 'SLAC Firmware Standard Library', including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import surf.devices.micron
import click
import time
import datetime
class CypressS25Fl(surf.devices.micron.AxiMicronN25Q):
def __init__(self,
description = "Container for Cypress S25FL PROM device",
addrMode = False, # False = 24-bit Address mode, True = 32-bit Address Mode
**kwargs):
super().__init__(description = description, **kwargs)
########################################
# Overwrite with Cypress S25FL Constants
########################################
self.FLAG_STATUS_REG = (0x05 << 16)
self.FLAG_STATUS_RDY = (0x01)
self.BRAC_CMD = (0xB9 << 16)
def _LoadMcsFile(self,arg):
click.secho(('LoadMcsFile: %s' % arg), fg='green')
self._progDone = False
# Start time measurement for profiling
start = time.time()
# Reset the SPI interface
self.resetFlash()
# Print the status registers
print("CypressS25Fl Manufacturer ID Code = {}".format(hex(self.getManufacturerId())))
print("CypressS25Fl Manufacturer Type = {}".format(hex(self.getManufacturerType())))
print("CypressS25Fl Manufacturer Capacity = {}".format(hex(self.getManufacturerCapacity())))
print("CypressS25Fl Status Register = {}".format(hex(self.getPromStatusReg())))
# Open the MCS file
self._mcs.open(arg)
# Erase the PROM
self.eraseProm()
# Write to the PROM
self.writeProm()
# Verify the PROM
self.verifyProm()
# End time measurement for profiling
end = time.time()
elapsed = end - start
click.secho('LoadMcsFile() took %s to program the PROM' % datetime.timedelta(seconds=int(elapsed)), fg='green')
# Add a power cycle reminder
self._progDone = True
click.secho(
"\n\n\
***************************************************\n\
***************************************************\n\
The MCS data has been written into the PROM. \n\
To reprogram the FPGA with the new PROM data, \n\
a IPROG CMD or power cycle is be required.\n\
***************************************************\n\
***************************************************\n\n"
, bg='green',
)
def resetFlash(self):
# Send the "Mode Bit Reset" command
self.setCmdReg(self.WRITE_MASK|(0xFF << 16))
time.sleep(0.001)
# Send the "Software Reset" Command
self.setCmdReg(self.WRITE_MASK|(0xF0 << 16))
time.sleep(0.001)
# Set the addressing mode
self.setModeReg()
# Check the address mode
if (self._addrMode):
self.setCmd(self.WRITE_MASK|self.BRAC_CMD|0x80)
else:
self.setCmd(self.WRITE_MASK|self.BRAC_CMD)
def waitForFlashReady(self):
while True:
# Get the status register
self.setCmdReg(self.READ_MASK|self.FLAG_STATUS_REG|0x1)
status = (self.getCmdReg()&0xFF)
# Check if not busy
if ( (status & self.FLAG_STATUS_RDY) == 0 ): # active Low READY
break
| 37.971963
| 119
| 0.527443
|
4a179093ebdfe4d821d0f9db4daeb1257ac1214e
| 2,912
|
py
|
Python
|
tests/unit/trace/test_link.py
|
bshaffer/opencensus-python
|
c624558c6829982d3464a5df29b48952f1fe23bc
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/trace/test_link.py
|
bshaffer/opencensus-python
|
c624558c6829982d3464a5df29b48952f1fe23bc
|
[
"Apache-2.0"
] | 1
|
2018-04-08T18:01:16.000Z
|
2018-04-08T18:01:16.000Z
|
tests/unit/trace/test_link.py
|
bshaffer/opencensus-python
|
c624558c6829982d3464a5df29b48952f1fe23bc
|
[
"Apache-2.0"
] | 1
|
2019-09-01T06:00:13.000Z
|
2019-09-01T06:00:13.000Z
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from opencensus.trace import link as link_module
class TestLink(unittest.TestCase):
def test_constructor_default(self):
trace_id = 'test trace id'
span_id = 'test span id'
type = link_module.Type.TYPE_UNSPECIFIED
attributes = mock.Mock()
link = link_module.Link(
trace_id=trace_id,
span_id=span_id,
attributes=attributes)
self.assertEqual(link.trace_id, trace_id)
self.assertEqual(link.span_id, span_id)
self.assertEqual(link.type, type)
self.assertEqual(link.attributes, attributes)
def test_constructor_explicit(self):
trace_id = 'test trace id'
span_id = 'test span id'
type = link_module.Type.CHILD_LINKED_SPAN
attributes = mock.Mock()
link = link_module.Link(
trace_id=trace_id,
span_id=span_id,
type=type,
attributes=attributes)
self.assertEqual(link.trace_id, trace_id)
self.assertEqual(link.span_id, span_id)
self.assertEqual(link.type, type)
self.assertEqual(link.attributes, attributes)
def test_format_link_json_with_attributes(self):
trace_id = 'test trace id'
span_id = 'test span id'
type = link_module.Type.CHILD_LINKED_SPAN
attributes = mock.Mock()
link = link_module.Link(
trace_id=trace_id,
span_id=span_id,
type=type,
attributes=attributes)
link_json = link.format_link_json()
expected_link_json = {
'trace_id': trace_id,
'span_id': span_id,
'type': type,
'attributes': attributes
}
self.assertEqual(expected_link_json, link_json)
def test_format_link_json_without_attributes(self):
trace_id = 'test trace id'
span_id = 'test span id'
type = link_module.Type.CHILD_LINKED_SPAN
link = link_module.Link(
trace_id=trace_id,
span_id=span_id,
type=type)
link_json = link.format_link_json()
expected_link_json = {
'trace_id': trace_id,
'span_id': span_id,
'type': type
}
self.assertEqual(expected_link_json, link_json)
| 29.414141
| 74
| 0.638049
|
4a17911a87fa523b6df5154b23a3f7669b99e349
| 35,110
|
py
|
Python
|
synapse/handlers/room.py
|
Lrizika/synapse
|
97174780ce726962ca1beb3788b62f16e9fad270
|
[
"Apache-2.0"
] | 1
|
2019-06-22T04:17:50.000Z
|
2019-06-22T04:17:50.000Z
|
synapse/handlers/room.py
|
fikalefaza/synapse
|
4f68188d0bbdb1966250375d34125572eb82a117
|
[
"Apache-2.0"
] | null | null | null |
synapse/handlers/room.py
|
fikalefaza/synapse
|
4f68188d0bbdb1966250375d34125572eb82a117
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions for performing events on rooms."""
import itertools
import logging
import math
import string
from collections import OrderedDict
from six import iteritems, string_types
from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
logger = logging.getLogger(__name__)
id_server_scheme = "https://"
class RoomCreationHandler(BaseHandler):
PRESETS_DICT = {
RoomCreationPreset.PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": "shared",
"original_invitees_have_ops": False,
"guest_can_join": True,
},
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": "shared",
"original_invitees_have_ops": True,
"guest_can_join": True,
},
RoomCreationPreset.PUBLIC_CHAT: {
"join_rules": JoinRules.PUBLIC,
"history_visibility": "shared",
"original_invitees_have_ops": False,
"guest_can_join": False,
},
}
def __init__(self, hs):
super(RoomCreationHandler, self).__init__(hs)
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.config = hs.config
# linearizer to stop two upgrades happening at once
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
@defer.inlineCallbacks
def upgrade_room(self, requester, old_room_id, new_version):
"""Replace a room with a new room with a different version
Args:
requester (synapse.types.Requester): the user requesting the upgrade
old_room_id (unicode): the id of the room to be replaced
new_version (unicode): the new room version to use
Returns:
Deferred[unicode]: the new room id
"""
yield self.ratelimit(requester)
user_id = requester.user.to_string()
with (yield self._upgrade_linearizer.queue(old_room_id)):
# start by allocating a new room id
r = yield self.store.get_room(old_room_id)
if r is None:
raise NotFoundError("Unknown room id %s" % (old_room_id,))
new_room_id = yield self._generate_room_id(
creator_id=user_id, is_public=r["is_public"],
)
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
# we create and auth the tombstone event before properly creating the new
# room, to check our user has perms in the old room.
tombstone_event, tombstone_context = (
yield self.event_creation_handler.create_event(
requester, {
"type": EventTypes.Tombstone,
"state_key": "",
"room_id": old_room_id,
"sender": user_id,
"content": {
"body": "This room has been replaced",
"replacement_room": new_room_id,
}
},
token_id=requester.access_token_id,
)
)
old_room_version = yield self.store.get_room_version(old_room_id)
yield self.auth.check_from_context(
old_room_version, tombstone_event, tombstone_context,
)
yield self.clone_existing_room(
requester,
old_room_id=old_room_id,
new_room_id=new_room_id,
new_room_version=new_version,
tombstone_event_id=tombstone_event.event_id,
)
# now send the tombstone
yield self.event_creation_handler.send_nonmember_event(
requester, tombstone_event, tombstone_context,
)
old_room_state = yield tombstone_context.get_current_state_ids(self.store)
# update any aliases
yield self._move_aliases_to_new_room(
requester, old_room_id, new_room_id, old_room_state,
)
# and finally, shut down the PLs in the old room, and update them in the new
# room.
yield self._update_upgraded_room_pls(
requester, old_room_id, new_room_id, old_room_state,
)
defer.returnValue(new_room_id)
@defer.inlineCallbacks
def _update_upgraded_room_pls(
self, requester, old_room_id, new_room_id, old_room_state,
):
"""Send updated power levels in both rooms after an upgrade
Args:
requester (synapse.types.Requester): the user requesting the upgrade
old_room_id (unicode): the id of the room to be replaced
new_room_id (unicode): the id of the replacement room
old_room_state (dict[tuple[str, str], str]): the state map for the old room
Returns:
Deferred
"""
old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
if old_room_pl_event_id is None:
logger.warning(
"Not supported: upgrading a room with no PL event. Not setting PLs "
"in old room.",
)
return
old_room_pl_state = yield self.store.get_event(old_room_pl_event_id)
# we try to stop regular users from speaking by setting the PL required
# to send regular events and invites to 'Moderator' level. That's normally
# 50, but if the default PL in a room is 50 or more, then we set the
# required PL above that.
pl_content = dict(old_room_pl_state.content)
users_default = int(pl_content.get("users_default", 0))
restricted_level = max(users_default + 1, 50)
updated = False
for v in ("invite", "events_default"):
current = int(pl_content.get(v, 0))
if current < restricted_level:
logger.info(
"Setting level for %s in %s to %i (was %i)",
v, old_room_id, restricted_level, current,
)
pl_content[v] = restricted_level
updated = True
else:
logger.info(
"Not setting level for %s (already %i)",
v, current,
)
if updated:
try:
yield self.event_creation_handler.create_and_send_nonmember_event(
requester, {
"type": EventTypes.PowerLevels,
"state_key": '',
"room_id": old_room_id,
"sender": requester.user.to_string(),
"content": pl_content,
}, ratelimit=False,
)
except AuthError as e:
logger.warning("Unable to update PLs in old room: %s", e)
logger.info("Setting correct PLs in new room")
yield self.event_creation_handler.create_and_send_nonmember_event(
requester, {
"type": EventTypes.PowerLevels,
"state_key": '',
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": old_room_pl_state.content,
}, ratelimit=False,
)
@defer.inlineCallbacks
def clone_existing_room(
self, requester, old_room_id, new_room_id, new_room_version,
tombstone_event_id,
):
"""Populate a new room based on an old room
Args:
requester (synapse.types.Requester): the user requesting the upgrade
old_room_id (unicode): the id of the room to be replaced
new_room_id (unicode): the id to give the new room (should already have been
created with _gemerate_room_id())
new_room_version (unicode): the new room version to use
tombstone_event_id (unicode|str): the ID of the tombstone event in the old
room.
Returns:
Deferred[None]
"""
user_id = requester.user.to_string()
if not self.spam_checker.user_may_create_room(user_id):
raise SynapseError(403, "You are not permitted to create rooms")
creation_content = {
"room_version": new_room_version,
"predecessor": {
"room_id": old_room_id,
"event_id": tombstone_event_id,
}
}
# Check if old room was non-federatable
# Get old room's create event
old_room_create_event = yield self.store.get_create_event_for_room(old_room_id)
# Check if the create event specified a non-federatable room
if not old_room_create_event.content.get("m.federate", True):
# If so, mark the new room as non-federatable as well
creation_content["m.federate"] = False
initial_state = dict()
# Replicate relevant room events
types_to_copy = (
(EventTypes.JoinRules, ""),
(EventTypes.Name, ""),
(EventTypes.Topic, ""),
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.GuestAccess, ""),
(EventTypes.RoomAvatar, ""),
(EventTypes.Encryption, ""),
(EventTypes.ServerACL, ""),
(EventTypes.RelatedGroups, ""),
)
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
old_room_id, StateFilter.from_types(types_to_copy),
)
# map from event_id to BaseEvent
old_room_state_events = yield self.store.get_events(old_room_state_ids.values())
for k, old_event_id in iteritems(old_room_state_ids):
old_event = old_room_state_events.get(old_event_id)
if old_event:
initial_state[k] = old_event.content
yield self._send_events_for_new_room(
requester,
new_room_id,
# we expect to override all the presets with initial_state, so this is
# somewhat arbitrary.
preset_config=RoomCreationPreset.PRIVATE_CHAT,
invite_list=[],
initial_state=initial_state,
creation_content=creation_content,
)
# Transfer membership events
old_room_member_state_ids = yield self.store.get_filtered_current_state_ids(
old_room_id, StateFilter.from_types([(EventTypes.Member, None)]),
)
# map from event_id to BaseEvent
old_room_member_state_events = yield self.store.get_events(
old_room_member_state_ids.values(),
)
for k, old_event in iteritems(old_room_member_state_events):
# Only transfer ban events
if ("membership" in old_event.content and
old_event.content["membership"] == "ban"):
yield self.room_member_handler.update_membership(
requester,
UserID.from_string(old_event['state_key']),
new_room_id,
"ban",
ratelimit=False,
content=old_event.content,
)
# XXX invites/joins
# XXX 3pid invites
@defer.inlineCallbacks
def _move_aliases_to_new_room(
self, requester, old_room_id, new_room_id, old_room_state,
):
directory_handler = self.hs.get_handlers().directory_handler
aliases = yield self.store.get_aliases_for_room(old_room_id)
# check to see if we have a canonical alias.
canonical_alias = None
canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
if canonical_alias_event_id:
canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
if canonical_alias_event:
canonical_alias = canonical_alias_event.content.get("alias", "")
# first we try to remove the aliases from the old room (we suppress sending
# the room_aliases event until the end).
#
# Note that we'll only be able to remove aliases that (a) aren't owned by an AS,
# and (b) unless the user is a server admin, which the user created.
#
# This is probably correct - given we don't allow such aliases to be deleted
# normally, it would be odd to allow it in the case of doing a room upgrade -
# but it makes the upgrade less effective, and you have to wonder why a room
# admin can't remove aliases that point to that room anyway.
# (cf https://github.com/matrix-org/synapse/issues/2360)
#
removed_aliases = []
for alias_str in aliases:
alias = RoomAlias.from_string(alias_str)
try:
yield directory_handler.delete_association(
requester, alias, send_event=False,
)
removed_aliases.append(alias_str)
except SynapseError as e:
logger.warning(
"Unable to remove alias %s from old room: %s",
alias, e,
)
# if we didn't find any aliases, or couldn't remove anyway, we can skip the rest
# of this.
if not removed_aliases:
return
try:
# this can fail if, for some reason, our user doesn't have perms to send
# m.room.aliases events in the old room (note that we've already checked that
# they have perms to send a tombstone event, so that's not terribly likely).
#
# If that happens, it's regrettable, but we should carry on: it's the same
# as when you remove an alias from the directory normally - it just means that
# the aliases event gets out of sync with the directory
# (cf https://github.com/vector-im/riot-web/issues/2369)
yield directory_handler.send_room_alias_update_event(
requester, old_room_id,
)
except AuthError as e:
logger.warning(
"Failed to send updated alias event on old room: %s", e,
)
# we can now add any aliases we successfully removed to the new room.
for alias in removed_aliases:
try:
yield directory_handler.create_association(
requester, RoomAlias.from_string(alias),
new_room_id, servers=(self.hs.hostname, ),
send_event=False, check_membership=False,
)
logger.info("Moved alias %s to new room", alias)
except SynapseError as e:
# I'm not really expecting this to happen, but it could if the spam
# checking module decides it shouldn't, or similar.
logger.error(
"Error adding alias %s to new room: %s",
alias, e,
)
try:
if canonical_alias and (canonical_alias in removed_aliases):
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.CanonicalAlias,
"state_key": "",
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": {"alias": canonical_alias, },
},
ratelimit=False
)
yield directory_handler.send_room_alias_update_event(
requester, new_room_id,
)
except SynapseError as e:
# again I'm not really expecting this to fail, but if it does, I'd rather
# we returned the new room to the client at this point.
logger.error(
"Unable to send updated alias events in new room: %s", e,
)
@defer.inlineCallbacks
def create_room(self, requester, config, ratelimit=True,
creator_join_profile=None):
""" Creates a new room.
Args:
requester (synapse.types.Requester):
The user who requested the room creation.
config (dict) : A dict of configuration options.
ratelimit (bool): set to False to disable the rate limiter
creator_join_profile (dict|None):
Set to override the displayname and avatar for the creating
user in this room. If unset, displayname and avatar will be
derived from the user's profile. If set, should contain the
values to go in the body of the 'join' event (typically
`avatar_url` and/or `displayname`.
Returns:
Deferred[dict]:
a dict containing the keys `room_id` and, if an alias was
requested, `room_alias`.
Raises:
SynapseError if the room ID couldn't be stored, or something went
horribly wrong.
ResourceLimitError if server is blocked to some resource being
exceeded
"""
user_id = requester.user.to_string()
yield self.auth.check_auth_blocking(user_id)
if not self.spam_checker.user_may_create_room(user_id):
raise SynapseError(403, "You are not permitted to create rooms")
if ratelimit:
yield self.ratelimit(requester)
room_version = config.get(
"room_version",
self.config.default_room_version.identifier,
)
if not isinstance(room_version, string_types):
raise SynapseError(
400,
"room_version must be a string",
Codes.BAD_JSON,
)
if room_version not in KNOWN_ROOM_VERSIONS:
raise SynapseError(
400,
"Your homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
if "room_alias_name" in config:
for wchar in string.whitespace:
if wchar in config["room_alias_name"]:
raise SynapseError(400, "Invalid characters in room alias")
room_alias = RoomAlias(
config["room_alias_name"],
self.hs.hostname,
)
mapping = yield self.store.get_association_from_room_alias(
room_alias
)
if mapping:
raise SynapseError(
400,
"Room alias already taken",
Codes.ROOM_IN_USE
)
else:
room_alias = None
invite_list = config.get("invite", [])
for i in invite_list:
try:
UserID.from_string(i)
except Exception:
raise SynapseError(400, "Invalid user_id: %s" % (i,))
yield self.event_creation_handler.assert_accepted_privacy_policy(
requester,
)
invite_3pid_list = config.get("invite_3pid", [])
visibility = config.get("visibility", None)
is_public = visibility == "public"
room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
if room_alias:
directory_handler = self.hs.get_handlers().directory_handler
yield directory_handler.create_association(
requester=requester,
room_id=room_id,
room_alias=room_alias,
servers=[self.hs.hostname],
send_event=False,
check_membership=False,
)
preset_config = config.get(
"preset",
RoomCreationPreset.PRIVATE_CHAT
if visibility == "private"
else RoomCreationPreset.PUBLIC_CHAT
)
raw_initial_state = config.get("initial_state", [])
initial_state = OrderedDict()
for val in raw_initial_state:
initial_state[(val["type"], val.get("state_key", ""))] = val["content"]
creation_content = config.get("creation_content", {})
# override any attempt to set room versions via the creation_content
creation_content["room_version"] = room_version
yield self._send_events_for_new_room(
requester,
room_id,
preset_config=preset_config,
invite_list=invite_list,
initial_state=initial_state,
creation_content=creation_content,
room_alias=room_alias,
power_level_content_override=config.get("power_level_content_override"),
creator_join_profile=creator_join_profile,
)
if "name" in config:
name = config["name"]
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.Name,
"room_id": room_id,
"sender": user_id,
"state_key": "",
"content": {"name": name},
},
ratelimit=False)
if "topic" in config:
topic = config["topic"]
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.Topic,
"room_id": room_id,
"sender": user_id,
"state_key": "",
"content": {"topic": topic},
},
ratelimit=False)
for invitee in invite_list:
content = {}
is_direct = config.get("is_direct", None)
if is_direct:
content["is_direct"] = is_direct
yield self.room_member_handler.update_membership(
requester,
UserID.from_string(invitee),
room_id,
"invite",
ratelimit=False,
content=content,
)
for invite_3pid in invite_3pid_list:
id_server = invite_3pid["id_server"]
address = invite_3pid["address"]
medium = invite_3pid["medium"]
yield self.hs.get_room_member_handler().do_3pid_invite(
room_id,
requester.user,
medium,
address,
id_server,
requester,
txn_id=None,
)
result = {"room_id": room_id}
if room_alias:
result["room_alias"] = room_alias.to_string()
yield directory_handler.send_room_alias_update_event(
requester, room_id
)
defer.returnValue(result)
@defer.inlineCallbacks
def _send_events_for_new_room(
self,
creator, # A Requester object.
room_id,
preset_config,
invite_list,
initial_state,
creation_content,
room_alias=None,
power_level_content_override=None,
creator_join_profile=None,
):
def create(etype, content, **kwargs):
e = {
"type": etype,
"content": content,
}
e.update(event_keys)
e.update(kwargs)
return e
@defer.inlineCallbacks
def send(etype, content, **kwargs):
event = create(etype, content, **kwargs)
logger.info("Sending %s in new room", etype)
yield self.event_creation_handler.create_and_send_nonmember_event(
creator,
event,
ratelimit=False
)
config = RoomCreationHandler.PRESETS_DICT[preset_config]
creator_id = creator.user.to_string()
event_keys = {
"room_id": room_id,
"sender": creator_id,
"state_key": "",
}
creation_content.update({"creator": creator_id})
yield send(
etype=EventTypes.Create,
content=creation_content,
)
logger.info("Sending %s in new room", EventTypes.Member)
yield self.room_member_handler.update_membership(
creator,
creator.user,
room_id,
"join",
ratelimit=False,
content=creator_join_profile,
)
# We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room.
pl_content = initial_state.pop((EventTypes.PowerLevels, ''), None)
if pl_content is not None:
yield send(
etype=EventTypes.PowerLevels,
content=pl_content,
)
else:
power_level_content = {
"users": {
creator_id: 100,
},
"users_default": 0,
"events": {
EventTypes.Name: 50,
EventTypes.PowerLevels: 100,
EventTypes.RoomHistoryVisibility: 100,
EventTypes.CanonicalAlias: 50,
EventTypes.RoomAvatar: 50,
},
"events_default": 0,
"state_default": 50,
"ban": 50,
"kick": 50,
"redact": 50,
"invite": 0,
}
if config["original_invitees_have_ops"]:
for invitee in invite_list:
power_level_content["users"][invitee] = 100
if power_level_content_override:
power_level_content.update(power_level_content_override)
yield send(
etype=EventTypes.PowerLevels,
content=power_level_content,
)
if room_alias and (EventTypes.CanonicalAlias, '') not in initial_state:
yield send(
etype=EventTypes.CanonicalAlias,
content={"alias": room_alias.to_string()},
)
if (EventTypes.JoinRules, '') not in initial_state:
yield send(
etype=EventTypes.JoinRules,
content={"join_rule": config["join_rules"]},
)
if (EventTypes.RoomHistoryVisibility, '') not in initial_state:
yield send(
etype=EventTypes.RoomHistoryVisibility,
content={"history_visibility": config["history_visibility"]}
)
if config["guest_can_join"]:
if (EventTypes.GuestAccess, '') not in initial_state:
yield send(
etype=EventTypes.GuestAccess,
content={"guest_access": "can_join"}
)
for (etype, state_key), content in initial_state.items():
yield send(
etype=etype,
state_key=state_key,
content=content,
)
@defer.inlineCallbacks
def _generate_room_id(self, creator_id, is_public):
# autogen room IDs and try to create it. We may clash, so just
# try a few times till one goes through, giving up eventually.
attempts = 0
while attempts < 5:
try:
random_string = stringutils.random_string(18)
gen_room_id = RoomID(
random_string,
self.hs.hostname,
).to_string()
if isinstance(gen_room_id, bytes):
gen_room_id = gen_room_id.decode('utf-8')
yield self.store.store_room(
room_id=gen_room_id,
room_creator_user_id=creator_id,
is_public=is_public,
)
defer.returnValue(gen_room_id)
except StoreError:
attempts += 1
raise StoreError(500, "Couldn't generate a room ID.")
class RoomContextHandler(object):
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
@defer.inlineCallbacks
def get_event_context(self, user, room_id, event_id, limit, event_filter):
"""Retrieves events, pagination tokens and state around a given event
in a room.
Args:
user (UserID)
room_id (str)
event_id (str)
limit (int): The maximum number of events to return in total
(excluding state).
event_filter (Filter|None): the filter to apply to the events returned
(excluding the target event_id)
Returns:
dict, or None if the event isn't found
"""
before_limit = math.floor(limit / 2.)
after_limit = limit - before_limit
users = yield self.store.get_users_in_room(room_id)
is_peeking = user.to_string() not in users
def filter_evts(events):
return filter_events_for_client(
self.store,
user.to_string(),
events,
is_peeking=is_peeking
)
event = yield self.store.get_event(event_id, get_prev_content=True,
allow_none=True)
if not event:
defer.returnValue(None)
return
filtered = yield(filter_evts([event]))
if not filtered:
raise AuthError(
403,
"You don't have permission to access that event."
)
results = yield self.store.get_events_around(
room_id, event_id, before_limit, after_limit, event_filter
)
results["events_before"] = yield filter_evts(results["events_before"])
results["events_after"] = yield filter_evts(results["events_after"])
results["event"] = event
if results["events_after"]:
last_event_id = results["events_after"][-1].event_id
else:
last_event_id = event_id
if event_filter and event_filter.lazy_load_members():
state_filter = StateFilter.from_lazy_load_member_list(
ev.sender
for ev in itertools.chain(
results["events_before"],
(results["event"],),
results["events_after"],
)
)
else:
state_filter = StateFilter.all()
# XXX: why do we return the state as of the last event rather than the
# first? Shouldn't we be consistent with /sync?
# https://github.com/matrix-org/matrix-doc/issues/687
state = yield self.store.get_state_for_events(
[last_event_id], state_filter=state_filter,
)
results["state"] = list(state[last_event_id].values())
# We use a dummy token here as we only care about the room portion of
# the token, which we replace.
token = StreamToken.START
results["start"] = token.copy_and_replace(
"room_key", results["start"]
).to_string()
results["end"] = token.copy_and_replace(
"room_key", results["end"]
).to_string()
defer.returnValue(results)
class RoomEventSource(object):
def __init__(self, hs):
self.store = hs.get_datastore()
@defer.inlineCallbacks
def get_new_events(
self,
user,
from_key,
limit,
room_ids,
is_guest,
explicit_room_id=None,
):
# We just ignore the key for now.
to_key = yield self.get_current_key()
from_token = RoomStreamToken.parse(from_key)
if from_token.topological:
logger.warn("Stream has topological part!!!! %r", from_key)
from_key = "s%s" % (from_token.stream,)
app_service = self.store.get_app_service_by_user_id(
user.to_string()
)
if app_service:
# We no longer support AS users using /sync directly.
# See https://github.com/matrix-org/matrix-doc/issues/1144
raise NotImplementedError()
else:
room_events = yield self.store.get_membership_changes_for_user(
user.to_string(), from_key, to_key
)
room_to_events = yield self.store.get_room_events_stream_for_rooms(
room_ids=room_ids,
from_key=from_key,
to_key=to_key,
limit=limit or 10,
order='ASC',
)
events = list(room_events)
events.extend(e for evs, _ in room_to_events.values() for e in evs)
events.sort(key=lambda e: e.internal_metadata.order)
if limit:
events[:] = events[:limit]
if events:
end_key = events[-1].internal_metadata.after
else:
end_key = to_key
defer.returnValue((events, end_key))
def get_current_key(self):
return self.store.get_room_events_max_id()
def get_current_key_for_room(self, room_id):
return self.store.get_room_events_max_id(room_id)
@defer.inlineCallbacks
def get_pagination_rows(self, user, config, key):
events, next_key = yield self.store.paginate_room_events(
room_id=key,
from_key=config.from_key,
to_key=config.to_key,
direction=config.direction,
limit=config.limit,
)
defer.returnValue((events, next_key))
| 36.010256
| 90
| 0.566534
|
4a179215803a821f3c96f1b77f547273d50393e3
| 1,701
|
py
|
Python
|
RLAnIntro/RLAnIntro_Chap7_RandomWalk_n_step_TD.py
|
HuangJingGitHub/PracMakePert_py
|
1e947b0804fdcd50b2a3afc0af9d824cb55978cd
|
[
"Apache-2.0"
] | 2
|
2019-05-28T15:04:20.000Z
|
2019-05-28T15:04:22.000Z
|
RLAnIntro/RLAnIntro_Chap7_RandomWalk_n_step_TD.py
|
HuangJingGitHub/PracMakePert_py
|
1e947b0804fdcd50b2a3afc0af9d824cb55978cd
|
[
"Apache-2.0"
] | null | null | null |
RLAnIntro/RLAnIntro_Chap7_RandomWalk_n_step_TD.py
|
HuangJingGitHub/PracMakePert_py
|
1e947b0804fdcd50b2a3afc0af9d824cb55978cd
|
[
"Apache-2.0"
] | 1
|
2019-08-30T06:06:33.000Z
|
2019-08-30T06:06:33.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
STATES_NUM = 19
STATES = np.arange(1, STATES_NUM + 1)
STATE_START = 10
STATE_TERMINAL = [0, STATES_NUM + 1]
VALUES = np.arange(1, STATES_NUM + 1) / (STATES_NUM + 1)
GAMMA = 0.8
def temporal_difference(value, n, alpha):
state = STATE_START
states = [state]
rewards = [0]
T = float('inf')
time = 0
while True:
if time < T:
if np.random.binomial(1, 0.5) == 1:
next_state = state + 1
else:
next_state = state - 1
if next_state not in STATE_TERMINAL:
reward = 0
else:
reward = 1 if next_state == STATE_TERMINAL[1] else 0
T = time + 1
state = next_state
states.append(next_state)
rewards.append(reward)
time += 1
update_time = time - n # + 1
if update_time >= 0:
returns = 0
for i in range(update_time + 1, min(update_time + n, T) + 1):
returns += pow(GAMMA, i - update_time - 1) * rewards[i]
if update_time + n < T:
returns = returns + pow(GAMMA, n) * value[states[update_time + n]]
value[states[update_time]] += alpha * (returns - value[states[update_time]])
if not time < T:
time += 1
if update_time == T - 1:
break
if __name__ == '__main__':
runs = 1000
values = np.zeros(STATES_NUM + 2)
for ep in tqdm(range(0, runs)):
temporal_difference(values, 4, 0.4)
plt.plot(values[1:-1])
plt.show()
| 28.830508
| 89
| 0.513228
|
4a17921bd754d1185b2e80468aa345ba84abc076
| 1,206
|
py
|
Python
|
database/restconfiguration.py
|
tomdoel/pyxnatbrowser
|
573701e34538d6bae488d0a2d2a8864e974e5a8a
|
[
"BSD-2-Clause"
] | null | null | null |
database/restconfiguration.py
|
tomdoel/pyxnatbrowser
|
573701e34538d6bae488d0a2d2a8864e974e5a8a
|
[
"BSD-2-Clause"
] | null | null | null |
database/restconfiguration.py
|
tomdoel/pyxnatbrowser
|
573701e34538d6bae488d0a2d2a8864e974e5a8a
|
[
"BSD-2-Clause"
] | null | null | null |
# https://github.com/tomdoel/pyxnatbrowser
# Author: Tom Doel www.tomdoel.com
# Distributed under the Simplified BSD License.
class RestConfiguration(object):
def __init__(self):
self.__applicationDirectory = None
self.__serverName = None
self.__baseUrl = None
self.__userName = None
self.__password = None
@property
def base_url(self):
return self.__baseUrl
@base_url.setter
def base_url(self, value):
self.__baseUrl = value
@property
def server_name(self):
return self.__serverName
@server_name.setter
def server_name(self, value):
self.__serverName = value
@property
def user_name(self):
return self.__userName
@user_name.setter
def user_name(self, value):
self.__userName = value
@property
def application_directory(self):
return self.__applicationDirectory
@application_directory.setter
def application_directory(self, value):
self.__applicationDirectory = value
@property
def password(self):
return self.__password
@password.setter
def password(self, value):
self.__password = value
| 22.333333
| 47
| 0.665008
|
4a1793e72b69c07a457fc78f8c97bb1703ab6471
| 1,412
|
py
|
Python
|
cs2130-python/program_04_base_conversion.py
|
clmay/school
|
a4780ca6f517614ab5a5d9a44d0c6d8bc00783b6
|
[
"MIT"
] | null | null | null |
cs2130-python/program_04_base_conversion.py
|
clmay/school
|
a4780ca6f517614ab5a5d9a44d0c6d8bc00783b6
|
[
"MIT"
] | null | null | null |
cs2130-python/program_04_base_conversion.py
|
clmay/school
|
a4780ca6f517614ab5a5d9a44d0c6d8bc00783b6
|
[
"MIT"
] | 1
|
2019-12-26T20:32:31.000Z
|
2019-12-26T20:32:31.000Z
|
def main():
while True:
print()
val = input("Enter a non-negative base-10 integer (or 'q' to quit): ")
if val.lower() == 'q':
break
try:
val = int(val)
except ValueError as err:
print_error()
continue
if val < 0:
print_error()
continue
print()
print_results(val)
def print_error():
print("Input must be a non-negative integer.")
def print_results(n):
print(n, "in base 2:", convert_to_base(2, n))
print(n, "in base 8:", convert_to_base(8, n))
print(n, "in base 16:", convert_to_base(16, n))
def convert_to_base(base, value):
digits = []
while True:
digits.append(str(value % base))
if value < base:
break
value //= base
digits.reverse()
if base == 16:
digits = convert_to_hex(digits)
return ''.join(digits)
def convert_to_hex(digits):
for i in range(len(digits)):
if digits[i] == '10':
digits[i] = 'A'
elif digits[i] == '11':
digits[i] = 'B'
elif digits[i] == '12':
digits[i] = 'C'
elif digits[i] == '13':
digits[i] = 'D'
elif digits[i] == '14':
digits[i] = 'E'
elif digits[i] == '15':
digits[i] = 'F'
return digits
if __name__ == "__main__":
main()
| 23.147541
| 78
| 0.49221
|
4a1794432c086a69a41dc226c2d833d0b38323f5
| 4,213
|
py
|
Python
|
src/helpers/mailer.py
|
lesh1k/VKStalk
|
bcf10a944a0259efe7899cc3d15b056b7d148837
|
[
"MIT"
] | 1
|
2021-09-26T08:10:05.000Z
|
2021-09-26T08:10:05.000Z
|
src/helpers/mailer.py
|
lesh1k/VKStalk
|
bcf10a944a0259efe7899cc3d15b056b7d148837
|
[
"MIT"
] | 1
|
2016-08-27T09:13:08.000Z
|
2016-08-28T11:33:00.000Z
|
src/helpers/mailer.py
|
lesh1k/VKStalk
|
bcf10a944a0259efe7899cc3d15b056b7d148837
|
[
"MIT"
] | null | null | null |
# Mail sending from v4.0.1. In v.5.0.0+ this was not implemented
# Needs review, cleaning and refactoring
def SendMail(self, mail_type='daily', msg='default_message', filename=''):
# ConsoleLog('Sending ' + mail_type + ' email...')
TEXT = ''
SUBJECT = ''
if mail_type == 'daily':
# Add number of logs and error to message
TEXT += 'Logs written: ' + str(self.logs_counter)
TEXT += '\nErrors occured: ' + str(self.error_counter)
TEXT += '\nLast error: ' + str(self.last_error) + '\n\n\n'
# Writing the message (this message will appear in the email)
SUBJECT = 'VKStalk report. Name: ' + \
self.user_data['name'] + '. ID: ' + self.user_id
if self.filename:
file_handle = open(self.filename, 'r')
TEXT = TEXT + file_handle.read()
file_handle.close()
elif mail_type == 'error':
# Writing the message (this message will appear in the email)
SUBJECT = 'VKStalk ERROR. User ID: ' + self.user_id
TEXT += msg
elif mail_type == 'summary':
# Writing the message (this message will appear in the email)
SUBJECT = 'VKStalk summary. Name: ' + \
self.user_data['name'] + '. ID: ' + self.user_id
if self.filename:
file_handle = open(filename, 'r')
TEXT = TEXT + file_handle.read()
file_handle.close()
# Constructing the message
message = 'Subject: %s\n\n%s' % (SUBJECT, TEXT)
# Specifying the from and to addresses
fromaddr = 'vkstalk@gmail.com'
if not self.mail_recipient:
# ConsoleLog('Mail NOT sent!')
clear_screen()
return False
toaddrs = self.mail_recipient
# Gmail Login
mail_username = 'HERE_BE_USERNAME'
mail_password = 'HERE_BE_PASSWORD'
# Sending the mail
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(mail_username, mail_password)
server.sendmail(fromaddr, toaddrs, message)
server.quit()
# ConsoleLog('Mail sent!')
return True
def send_mail_if_time():
try:
current_step = 'Sending email.'
if self.debug_mode:
WriteDebugLog(current_step, userid=self.user_id)
if (self.email_notifications
and (datetime.now().hour in self.mail_notification_hours)
and (datetime.now().hour != self.last_mail_time)):
current_step = "Trying to send daily email."
if self.debug_mode:
WriteDebugLog(current_step, userid=self.user_id)
if self.SendMail():
self.last_mail_time = datetime.now().hour
except Exception as e:
current_step = "Could not send DAILY email."
if self.debug_mode:
WriteDebugLog(current_step, userid=self.user_id)
self.HandleError(
step=current_step,
exception_msg=e,
dump_vars=True,
console_msg='Could not send email.\n' + str(e)
)
pass
# Send summary email if the time has come =)
try:
current_step = 'Preparing a summary.'
if self.debug_mode:
WriteDebugLog(current_step, userid=self.user_id)
if (self.email_notifications
and (datetime.now().hour in self.summary_notification_hours)
and (time.localtime().tm_wday in self.summary_notification_days)
and (datetime.now().day != self.last_summary_mail_day)):
current_step = "Trying to send summary mail."
if self.debug_mode:
WriteDebugLog(current_step, userid=self.user_id)
if self.SendMail(mail_type='summary', filename=Summarize(user_name=self.user_data['name'], max_files=self.max_files_for_summary)):
self.last_summary_mail_day = datetime.now().day
except Exception as e:
current_step = "Could not send SUMMARY email."
if self.debug_mode:
WriteDebugLog(current_step, userid=self.user_id)
self.HandleError(
step=current_step,
exception_msg=e,
dump_vars=True,
console_msg='Could not send summary email.\n' + str(e)
)
| 38.3
| 142
| 0.606931
|
4a1794796e5c545d9e64380cf6156f5a13a95908
| 961
|
py
|
Python
|
setup.py
|
xinabox/Python-CORE
|
656e5bbf6638e2b6142cfd72b25a67349e353881
|
[
"MIT"
] | null | null | null |
setup.py
|
xinabox/Python-CORE
|
656e5bbf6638e2b6142cfd72b25a67349e353881
|
[
"MIT"
] | null | null | null |
setup.py
|
xinabox/Python-CORE
|
656e5bbf6638e2b6142cfd72b25a67349e353881
|
[
"MIT"
] | null | null | null |
import setuptools
import sys
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = list()
if sys.platform == "linux2" or sys.platform == "linux":
install_requires = ["smbus2",]
if sys.platform == "esp8266":
sys.path.pop(0)
sys.path.append("..")
setuptools.setup(
name="xinabox-CORE",
version="0.0.11",
author="Luqmaan Baboo",
author_email="luqmaanbaboo@gmail.com",
description="I2C Core for CC03/CS11/CW03, CW02, CW01, Raspberry Pi and Microbit",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/xinabox/Python-CORE",
install_requires=install_requires,
py_modules=["xCore",],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 29.121212
| 85
| 0.665973
|
4a1794d7e857cdc7defec5557c6b6b2e67b19ff2
| 1,665
|
py
|
Python
|
scripts/tflite_model_tools/tflite/LogicalAndOptions.py
|
LaudateCorpus1/edgeai-tidl-tools
|
d98789769a711e5a3700dfdc20d877073bd87da7
|
[
"CNRI-Python"
] | 15
|
2021-09-05T03:43:54.000Z
|
2022-03-29T14:17:29.000Z
|
scripts/tflite_model_tools/tflite/LogicalAndOptions.py
|
LaudateCorpus1/edgeai-tidl-tools
|
d98789769a711e5a3700dfdc20d877073bd87da7
|
[
"CNRI-Python"
] | 21
|
2021-09-01T06:58:31.000Z
|
2022-03-31T06:33:15.000Z
|
scripts/tflite_model_tools/tflite/LogicalAndOptions.py
|
LaudateCorpus1/edgeai-tidl-tools
|
d98789769a711e5a3700dfdc20d877073bd87da7
|
[
"CNRI-Python"
] | 6
|
2021-09-22T06:44:19.000Z
|
2022-02-07T06:28:35.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LogicalAndOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsLogicalAndOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LogicalAndOptions()
x.Init(buf, n + offset)
return x
@classmethod
def LogicalAndOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# LogicalAndOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def LogicalAndOptionsStart(builder): builder.StartObject(0)
def LogicalAndOptionsEnd(builder): return builder.EndObject()
class LogicalAndOptionsT(object):
# LogicalAndOptionsT
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
logicalAndOptions = LogicalAndOptions()
logicalAndOptions.Init(buf, pos)
return cls.InitFromObj(logicalAndOptions)
@classmethod
def InitFromObj(cls, logicalAndOptions):
x = LogicalAndOptionsT()
x._UnPack(logicalAndOptions)
return x
# LogicalAndOptionsT
def _UnPack(self, logicalAndOptions):
if logicalAndOptions is None:
return
# LogicalAndOptionsT
def Pack(self, builder):
LogicalAndOptionsStart(builder)
logicalAndOptions = LogicalAndOptionsEnd(builder)
return logicalAndOptions
| 28.220339
| 114
| 0.705706
|
4a1796190c9377bfd82024d800cb59b5c425b3ff
| 120
|
py
|
Python
|
basic/primenos.py
|
AKASHDKR/pythonrepo
|
3b15c4dbbe913e72c3a88fd35bff65b79ce15b9e
|
[
"Apache-2.0"
] | 4
|
2021-07-19T08:29:01.000Z
|
2022-03-08T15:40:10.000Z
|
basic/primenos.py
|
AKASHDKR/pythonrepo
|
3b15c4dbbe913e72c3a88fd35bff65b79ce15b9e
|
[
"Apache-2.0"
] | null | null | null |
basic/primenos.py
|
AKASHDKR/pythonrepo
|
3b15c4dbbe913e72c3a88fd35bff65b79ce15b9e
|
[
"Apache-2.0"
] | null | null | null |
a=10
b=25
for i in range(a,b+1):
if i>1:
for j in range(2,i):
if(i%j==0):
break
else:
print(i)
| 13.333333
| 23
| 0.475
|
4a1796479e2babb601a9c5321ba0cef4d4a6e1a2
| 42,006
|
py
|
Python
|
egret/parsers/parser.py
|
bknueven/Egret
|
37567c1ec3bc0072b61124ce46ceb28add9ad539
|
[
"BSD-3-Clause"
] | null | null | null |
egret/parsers/parser.py
|
bknueven/Egret
|
37567c1ec3bc0072b61124ce46ceb28add9ad539
|
[
"BSD-3-Clause"
] | 1
|
2019-12-11T22:45:12.000Z
|
2019-12-11T22:45:12.000Z
|
egret/parsers/parser.py
|
bknueven/Egret
|
37567c1ec3bc0072b61124ce46ceb28add9ad539
|
[
"BSD-3-Clause"
] | null | null | null |
# ___________________________________________________________________________
#
# EGRET: Electrical Grid Research and Engineering Tools
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
This module provides supporting functions for interacting with standard format input data
It includes methods to parse the data and load them into a TemporalGridNetwork object
"""
import os.path
import egret.data.model_data as md
import pandas as pd
import math
from datetime import datetime, timedelta
from collections import namedtuple
def convert_load_by_area_to_source(data_dir, begin_time, end_time, t0_state=None):
"""
Create a ModelData object from the input data. Assumes data is formatted like the RTS-GMLC repository's 'RTS_Data' directory.
Parameters
----------
data_dir : str
Path to data directory
begin_time : datetime.datetime or str
Beginning of time horizon. If str, date/time in "YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD" format,
the later of which assumes a midnight start.
end_time : datetime.datetime or str
End of time horizon. If str, date/time in "YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD" format,
the later of which assumes a midnight start.
t0_state : dict or Nonetype
Keys of this dict are thermal generator names, each element of which is another dictionary with
keys "initial_status", "initial_p_output", and "initial_q_output", which specify whether the
generator is on at t0, the real power output at t0, and the reactive power output at t0.
If this is None, default values are loaded.
"""
for simulation in ['DAY_AHEAD', 'REAL_TIME']:
simulation = simulation.upper()
base_dir = os.path.join(data_dir, 'SourceData')
begin_time, end_time = _get_datetimes(begin_time, end_time, base_dir, simulation)
TimeSeriesPointer = namedtuple('TimeSeriesPointer',
['Object',
'Simulation',
'Parameter',
'DataFile'])
DateTimeValue = namedtuple('DateTimeValue',
['DateTime', 'Value'])
areas = _get_eligible_areas(rts_gmlc_dir)
area_names = _get_eligible_area_names(areas)
Load = namedtuple('Load', ['DateTime'] + area_names)
timeseries_pointer_df = pd.read_csv(os.path.join(base_dir, "timeseries_pointers.csv"), header=0, sep=',')
time_delta = end_time - begin_time
hours = 24 * time_delta.days + math.ceil(time_delta.seconds / 3600.)
model_data = _create_rtsgmlc_skeleton(rts_gmlc_dir)
## create an object for easy iterating
md_obj = md.ModelData(model_data)
system = md_obj.data["system"]
elements = md_obj.data["elements"]
if simulation == "DAY_AHEAD":
system["time_period_length_minutes"] = 60
else:
system["time_period_length_minutes"] = 5
# compute aggregate load per area, and then compute
# load participation factors from each bus from that data.
region_total_load = {}
areas = ["Area" + str(i) for i in range(1, 4)]
for this_region in areas:
this_region_total_load = 0.0
## loads have exactly one bus
for name, load in md_obj.elements("load"):
bus = elements["bus"][load["bus"]]
if bus["area"] == this_region:
this_region_total_load += load["p_load"]
region_total_load[this_region] = this_region_total_load
bus_load_participation_factor_dict = {}
bus_Ql_over_Pl_dict = {}
for name, load in md_obj.elements("load"):
bus = elements["bus"][load["bus"]]
bus_load_participation_factor_dict[name] = load["p_load"] / region_total_load[bus["area"]]
bus_Ql_over_Pl_dict[name] = load["q_load"] / load["p_load"]
timeseries_pointer_dict = {}
for timeseries_pointer_index in timeseries_pointer_df.index.tolist():
this_timeseries_pointer_dict = timeseries_pointer_df.loc[timeseries_pointer_index].to_dict()
new_timeseries_pointer = TimeSeriesPointer(this_timeseries_pointer_dict["Object"],
this_timeseries_pointer_dict["Simulation"],
this_timeseries_pointer_dict["Parameter"],
os.path.join(base_dir,
this_timeseries_pointer_dict["Data File"]))
timeseries_pointer_dict[
(new_timeseries_pointer.Object, new_timeseries_pointer.Simulation)] = new_timeseries_pointer
load_timeseries_spec = timeseries_pointer_dict[("1", simulation)]
load_timeseries_df = _read_rts_gmlc_table(load_timeseries_spec.DataFile, simulation)
load_timeseries_df = load_timeseries_df.rename(columns={"Year_Month_Day_Period": "DateTime"})
start_mask = load_timeseries_df["DateTime"] >= begin_time
end_mask = load_timeseries_df["DateTime"] < end_time
masked_load_timeseries_df = load_timeseries_df[start_mask & end_mask]
load_dict = masked_load_timeseries_df.to_dict(orient='split')
load_timeseries = []
for load_row in load_dict["data"]:
load_timeseries.append(Load(load_row[0],
float(load_row[1]),
float(load_row[2]),
float(load_row[3])))
times = []
for load in load_timeseries:
times.append(str(load.DateTime))
system["time_keys"] = times
## load into grid_network object
## First, load Pl, Ql
for name, load in md_obj.elements("load"):
pl_dict, ql_dict = dict(), dict()
bus = elements["bus"][load["bus"]]
for load_time in load_timeseries:
area_load = getattr(load_time, bus["area"])
pl_dict[str(load_time.DateTime)] = round(bus_load_participation_factor_dict[name] * area_load, 2)
ql_dict[str(load_time.DateTime)] = pl_dict[str(load_time.DateTime)] * bus_Ql_over_Pl_dict[name]
load["p_load"] = _make_time_series_dict(list(pl_dict.values()))
load["q_load"] = _make_time_series_dict(list(ql_dict.values()))
new_load_time_series = []
day_ahead_load_file = '../timeseries_data_files/Load/new_load_time_series_DA.csv'
real_time_load_file = '../timeseries_data_files/Load/new_load_time_series_RT.csv'
for ix, load_time in enumerate(load_timeseries, start=0):
load_time_series_record = {}
load_time_series_record['Year'] = load_time.DateTime.year
load_time_series_record['Month'] = load_time.DateTime.month
load_time_series_record['Day'] = load_time.DateTime.day
if simulation == 'DAY_AHEAD':
load_time_series_record['Period'] = (ix % 24) + 1
else:
load_time_series_record['Period'] = (ix % (24 * 12)) + 1
for name, load in md_obj.elements('load'):
bus = elements['bus'][load['bus']]
area_load = getattr(load_time, bus['area'])
load_time_series_record[name] = round(bus_load_participation_factor_dict[name] * area_load, 2)
new_load_time_series.append(load_time_series_record)
new_load_time_series_df = pd.DataFrame(new_load_time_series)
new_load_time_series_df = new_load_time_series_df[
['Year', 'Month', 'Day', 'Period'] + new_load_time_series_df.columns[4:].tolist()]
new_load_time_series_fname = 'new_load_time_series_{0}.csv'.format('DA' if simulation == "DAY_AHEAD" else 'RT')
new_load_time_series_df.to_csv(
os.path.join(data_dir, 'timeseries_data_files', 'Load', new_load_time_series_fname), index=False)
# Augment time series pointer dataframe.
for name, load in md_obj.elements('load'):
new_load_timeseries_spec = {}
new_load_timeseries_spec['Object'] = name
new_load_timeseries_spec['Parameter'] = 'Requirement'
new_load_timeseries_spec['Simulation'] = 'DAY_AHEAD'
new_load_timeseries_spec['Data File'] = day_ahead_load_file
timeseries_pointer_df = timeseries_pointer_df.append(new_load_timeseries_spec, ignore_index=True)
new_load_timeseries_spec = {}
new_load_timeseries_spec['Object'] = name
new_load_timeseries_spec['Parameter'] = 'Requirement'
new_load_timeseries_spec['Simulation'] = 'REAL_TIME'
new_load_timeseries_spec['Data File'] = real_time_load_file
timeseries_pointer_df = timeseries_pointer_df.append(new_load_timeseries_spec, ignore_index=True)
timeseries_pointer_df.loc[timeseries_pointer_df['Object'] != 'Load'].to_csv(
os.path.join(data_dir, 'SourceData', 'timeseries_pointers.csv'), index=False)
def create_ModelData(data_dir, begin_time, end_time, simulation="DAY_AHEAD", t0_state=None):
"""
Create a ModelData object from the input data.
Parameters
----------
data_dir : str
Path to data directory
begin_time : datetime.datetime or str
Beginning of time horizon. If str, date/time in "YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD" format,
the later of which assumes a midnight start.
end_time : datetime.datetime or str
End of time horizon. If str, date/time in "YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD" format,
the later of which assumes a midnight start.
simulation : str
Either "DAY_AHEAD" or "REAL_TIME", which specifies which time series the data is taken from,
default is "DAY_AHEAD".
t0_state : dict or Nonetype
Keys of this dict are thermal generator names, each element of which is another dictionary with
keys "initial_status", "initial_p_output", and "initial_q_output", which specify whether the
generator is on at t0, the real power output at t0, and the reactive power output at t0.
If this is None, default values are loaded.
Returns
-------
egret.model_data.ModelData
Returns a ModelData object with the timeseries data specified
"""
return md.ModelData(create_model_data_dict(data_dir, begin_time, end_time, simulation, t0_state))
def create_model_data_dict(rts_gmlc_dir, begin_time, end_time, simulation="DAY_AHEAD", t0_state=None):
"""
Create a model_data dictionary from the RTS-GMLC data.
Parameters
----------
rts_gmlc_dir : str
Path to RTS-GMLC directory
begin_time : datetime.datetime or str
Beginning of time horizon. If str, date/time in "YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD" format,
the later of which assumes a midnight start.
end_time : datetime.datetime or str
End of time horizon. If str, date/time in "YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD" format,
the later of which assumes a midnight start.
simulation : str
Either "DAY_AHEAD" or "REAL_TIME", which specifies which time series the data is taken from,
default is "DAY_AHEAD".
t0_state : dict or Nonetype
Keys of this dict are thermal generator names, each element of which is another dictionary with
keys "initial_status", "initial_p_output", and "initial_q_output", which specify whether the
generator is on at t0, the real power output at t0, and the reactive power output at t0.
If this is None, default values are loaded.
Returns
-------
dict : A dictionary in the format required for the ModelData object.
"""
simulation = simulation.upper()
if simulation not in ["DAY_AHEAD", "REAL_TIME"]:
raise ValueError('simulation must be "DAY_AHEAD" or "REAL_TIME"')
base_dir = os.path.join(rts_gmlc_dir, 'SourceData')
begin_time, end_time = _get_datetimes(begin_time, end_time, base_dir, simulation)
TimeSeriesPointer = namedtuple('TimeSeriesPointer',
['Object',
'Simulation',
'Parameter',
'DataFile'])
DateTimeValue = namedtuple('DateTimeValue',
['DateTime', 'Value'])
areas = _get_eligible_areas(rts_gmlc_dir)
area_names = _get_eligible_area_names(areas)
Load = namedtuple('Load', ['DateTime'] + area_names)
timeseries_pointer_df = pd.read_csv(os.path.join(base_dir, "timeseries_pointers.csv"), header=0, sep=',')
time_delta = end_time - begin_time
hours = 24 * time_delta.days + math.ceil(time_delta.seconds / 3600.)
model_data = _create_rtsgmlc_skeleton(rts_gmlc_dir)
## create an object for easy iterating
md_obj = md.ModelData(model_data)
system = md_obj.data["system"]
elements = md_obj.data["elements"]
if simulation == "DAY_AHEAD":
system["time_period_length_minutes"] = 60
else:
system["time_period_length_minutes"] = 5
# compute aggregate load per area, and then compute
# load participation factors from each bus from that data.
region_total_load = {}
for this_region in area_names:
this_region_total_load = 0.0
## loads have exactly one bus
for name, load in md_obj.elements("load"):
bus = elements["bus"][load["bus"]]
if bus["area"] == this_region:
this_region_total_load += load["p_load"]
region_total_load[this_region] = this_region_total_load
bus_load_participation_factor_dict = {}
bus_Ql_over_Pl_dict = {}
for name, load in md_obj.elements("load"):
bus = elements["bus"][load["bus"]]
bus_load_participation_factor_dict[name] = load["p_load"] / region_total_load[bus["area"]]
bus_Ql_over_Pl_dict[name] = load["q_load"] / load["p_load"]
timeseries_pointer_dict = {}
for timeseries_pointer_index in timeseries_pointer_df.index.tolist():
this_timeseries_pointer_dict = timeseries_pointer_df.loc[timeseries_pointer_index].to_dict()
new_timeseries_pointer = TimeSeriesPointer(this_timeseries_pointer_dict["Object"],
this_timeseries_pointer_dict["Simulation"],
this_timeseries_pointer_dict["Parameter"],
os.path.join(base_dir, this_timeseries_pointer_dict["Data File"]))
timeseries_pointer_dict[
(new_timeseries_pointer.Object, new_timeseries_pointer.Simulation)] = new_timeseries_pointer
filtered_timeseries = {}
for name, gen in md_obj.elements("generator", generator_type="renewable"):
if gen["fuel"] in ["Solar", "Wind", "Hydro"]:
if (name, simulation) not in timeseries_pointer_dict:
print("***WARNING - No timeseries pointer entry found for generator=%s" % name)
else:
# print("Time series for generator=%s will be loaded from file=%s" % (name, timeseries_pointer_dict[(name,"DAY_AHEAD")].DataFile))
renewables_timeseries_df = _read_rts_gmlc_table(timeseries_pointer_dict[(name, simulation)].DataFile,
simulation)
this_source_timeseries_df = renewables_timeseries_df.loc[:, ["Year_Month_Day_Period", name]]
this_source_timeseries_df = this_source_timeseries_df.rename(
columns={"Year_Month_Day_Period": "DateTime"})
start_mask = this_source_timeseries_df["DateTime"] >= begin_time
end_mask = this_source_timeseries_df["DateTime"] < end_time
this_source_masked_timeseries_df = this_source_timeseries_df[start_mask & end_mask]
renewables_timeseries_dict = this_source_masked_timeseries_df.to_dict(orient='split')
renewables_timeseries = []
for this_row in renewables_timeseries_dict["data"]:
renewables_timeseries.append(DateTimeValue(this_row[0],
float(this_row[1])))
filtered_timeseries[name] = renewables_timeseries
for name, load in md_obj.elements("load"):
load_timeseries_spec = timeseries_pointer_dict[(name, simulation)]
load_timeseries_df = _read_rts_gmlc_table(load_timeseries_spec.DataFile, simulation)
load_timeseries_df = load_timeseries_df.rename(columns={"Year_Month_Day_Period": "DateTime"})
start_mask = load_timeseries_df["DateTime"] >= begin_time
end_mask = load_timeseries_df["DateTime"] < end_time
masked_load_timeseries_df = load_timeseries_df[start_mask & end_mask]
load_dict = masked_load_timeseries_df.to_dict(orient='records')
reserves_dfs = {}
spin_reserve_categories = ["Spin_Up_R1", "Spin_Up_R2", "Spin_Up_R3"]
other_reserve_categories = ["Reg_Down", "Reg_Up", ]
## flexiramp products only in day-ahead simulation
if simulation == "DAY_AHEAD":
other_reserve_categories += ["Flex_Down", "Flex_Up", ]
for reserve in spin_reserve_categories:
reserves_dfs[reserve] = _read_rts_gmlc_table(timeseries_pointer_dict[(reserve, simulation)].DataFile,
simulation)
reserves_dict = {}
for name, reserve_df in reserves_dfs.items():
reserve_df = reserve_df.rename(columns={"Year_Month_Day_Period": "DateTime"})
start_mask = reserve_df["DateTime"] >= begin_time
end_mask = reserve_df["DateTime"] < end_time
reserve_df = reserve_df[start_mask & end_mask]
reserve_timeseries = []
for this_row in reserve_df.to_dict(orient='split')["data"]:
reserve_timeseries.append(DateTimeValue(this_row[0], float(this_row[1])))
reserves_dict[name] = reserve_timeseries
for reserve in other_reserve_categories:
reserves_dict[reserve] = _read_rts_gmlc_reserve_table(
timeseries_pointer_dict[(reserve, simulation)].DataFile,
begin_time,
end_time,
simulation,
)
times = []
for load in load_dict:
times.append(str(load['DateTime']))
system["time_keys"] = times
## load into grid_network object
## First, load Pl, Ql
for name, load in md_obj.elements("load"):
pl_dict, ql_dict = dict(), dict()
bus = elements["bus"][load["bus"]]
for load_row in load_dict:
pl_dict[str(load_row['DateTime'])] = round(load_row[name], 2)
ql_dict[str(load_row['DateTime'])] = pl_dict[str(load_row['DateTime'])] * bus_Ql_over_Pl_dict[name]
load["p_load"] = _make_time_series_dict(list(pl_dict.values()))
load["q_load"] = _make_time_series_dict(list(ql_dict.values()))
## load in area reserve factors
area_spin_map = _create_rts_gmlc_area_spin_map(rts_gmlc_dir)
for name, area in md_obj.elements("area"):
spin_reserve_dict = dict()
for datetimevalue in reserves_dict[area_spin_map[name]]:
spin_reserve_dict[str(datetimevalue.DateTime)] = round(datetimevalue.Value, 2)
area["spinning_reserve_requirement"] = _make_time_series_dict(list(spin_reserve_dict.values()))
## load in global reserve factors
rts_to_egret_reserve_map = {
"Flex_Down": "flexible_ramp_down_requirement",
"Flex_Up": "flexible_ramp_up_requirement",
"Reg_Down": "regulation_down_requirement",
"Reg_Up": "regulation_up_requirement",
}
for reserve in other_reserve_categories:
system[rts_to_egret_reserve_map[reserve]] = _make_time_series_dict(list(reserves_dict[reserve].values()))
## now load renewable generator stuff
for name, gen in md_obj.elements("generator", generator_type="renewable"):
if gen["fuel"] not in ["Solar", "Wind", "Hydro"]:
continue
renewables_timeseries = filtered_timeseries[name]
## for safety, curtailable renewables can go down to 0
gen["p_min"] = 0.
output_dict = dict()
for datetimevalue in renewables_timeseries:
output_dict[str(datetimevalue.DateTime)] = round(datetimevalue.Value, 2)
gen["p_max"] = _make_time_series_dict(list(output_dict.values()))
# set must-take for Hydro and RTPV
if gen["unit_type"] in ["HYDRO", "RTPV"]:
## copy is for safety when overwriting
gen["p_min"] = _make_time_series_dict(list(output_dict.copy().values()))
## get this from the same place the prescient reader does
if t0_state is None:
unit_on_time_df = pd.read_csv(os.path.join(base_dir,
"../FormattedData/PLEXOS/PLEXOS_Solution/DAY_AHEAD Solution Files/noTX/on_time_7.12.csv"),
header=0,
sep=",")
unit_on_time_df_as_dict = unit_on_time_df.to_dict(orient="split")
unit_on_t0_state_dict = {}
for i in range(0, len(unit_on_time_df_as_dict["columns"])):
gen_id = unit_on_time_df_as_dict["columns"][i]
unit_on_t0_state_dict[gen_id] = int(unit_on_time_df_as_dict["data"][0][i])
for name, gen in md_obj.elements("generator", generator_type="thermal"):
gen["initial_status"] = unit_on_t0_state_dict[name]
if gen["initial_status"] < 0:
gen["initial_p_output"] = 0.
gen["initial_q_output"] = 0.
else:
gen["initial_p_output"] = gen["p_min"]
gen["initial_q_output"] = max(0., gen["q_min"])
else:
for name, gen in md_obj.elements("generator", generator_type="thermal"):
gen["initial_status"] = t0_state[name]["initial_status"]
gen["initial_p_output"] = t0_state[name]["initial_p_output"]
gen["initial_q_output"] = t0_state[name]["initial_q_output"]
return md_obj.data
def _create_rts_gmlc_area_spin_map(rts_gmlc_dir):
base_dir = os.path.join(rts_gmlc_dir, 'SourceData')
reserves = pd.read_csv(os.path.join(base_dir, 'reserves.csv'))
area_spin_map = {}
areas = _get_eligible_areas(rts_gmlc_dir)
area_names = _get_eligible_area_names(areas)
#assuming we have areas that correspond to the "Eligible Regions" category, starting at 1, 2, 3...
for area, name in zip(areas, area_names):
spin_name = reserves.loc[reserves['Eligible Regions'] == str(area)]['Reserve Product'].values[0]
area_spin_map[name] = spin_name
return area_spin_map
def _get_rts_gmlc_start_end_dates(base_dir, simulation):
simulation_objects = pd.read_csv(os.path.join(base_dir, 'simulation_objects.csv'))
date_from = simulation_objects.loc[simulation_objects['Simulation_Parameters'] == 'Date_From']
date_to = simulation_objects.loc[simulation_objects['Simulation_Parameters'] == 'Date_To']
from_date_string = ''
to_date_string = ''
if simulation == 'DAY_AHEAD':
from_date_string = date_from.iloc[0]['DAY_AHEAD']
to_date_string = date_to.iloc[0]['DAY_AHEAD']
else:
from_date_string = date_from.iloc[0]['REAL_TIME']
to_date_string = date_to.iloc[0]['REAL_TIME']
start_date = datetime.strptime(from_date_string, '%m/%d/%y %H:%M')
end_date = datetime.strptime(to_date_string, '%m/%d/%y %H:%M')
return start_date, end_date
def _get_eligible_areas(rts_gmlc_dir):
base_dir = os.path.join(rts_gmlc_dir, 'SourceData')
bus = pd.read_csv(os.path.join(base_dir, 'bus.csv'))
return bus['Area'].drop_duplicates().values.tolist()
def _get_eligible_area_names(areas):
area_names = list(map(lambda x: 'Area' + str(x), areas))
return area_names
def _create_rtsgmlc_skeleton(rts_gmlc_dir):
"""
Creates a grid_data dictionary from the RTS-GMLC data,
but does not load hourly data
Parameters
----------
rts_gmlc_dir : str
Path to RTS-GMLC directory
Returns
-------
grid_data : dict
Returns a dict loaded from the RTS-GMLC data
"""
base_dir = os.path.join(rts_gmlc_dir, 'SourceData')
case_name = "RTS-GMLC"
model_data = md.ModelData.empty_model_data_dict()
elements = model_data["elements"]
system = model_data["system"]
system["name"] = case_name
# this is the default used in the MATPOWER writer for RTS-GMLC
system["baseMVA"] = 100.
elements["bus"] = {}
elements["load"] = {}
elements["shunt"] = {}
# add the buses
bus_df = pd.read_csv(os.path.join(base_dir, 'bus.csv'))
for idx, row in bus_df.iterrows():
BUS_I = str(row['Bus ID'])
if row['Bus Type'] == 'PQ':
BUS_TYPE = 1
elif row['Bus Type'] == 'PV':
BUS_TYPE = 2
elif row['Bus Type'] == 'Ref':
BUS_TYPE = 3
else:
BUS_TYPE = 4
PD = float(row['MW Load'])
QD = float(row['MVAR Load'])
GS = float(row['MW Shunt G'])
BS = float(row['MVAR Shunt B'])
BUS_AREA = str(row['Area'])
VM = float(row['V Mag'])
VA = float(row['V Angle'])
BASE_KV = float(row['BaseKV'])
ZONE = str(int(row['Zone']))
VMAX = 1.05 # default used in RTS-GMLC MATPOWER writer
VMIN = 0.95 # default used in RTS-GMLC MATPOWER writer
bus_dict = dict()
if BUS_TYPE < 1 or BUS_TYPE > 3:
raise ValueError(
"Encountered an unsupported bus type: {} when parsing MATPOWER input file".format(BUS_TYPE))
bus_types = {1: "PQ", 2: "PV", 3: "ref", 4: "isolated"}
bus_dict["matpower_bustype"] = bus_types[BUS_TYPE]
if BUS_TYPE == 3:
if VA != 0:
if abs(VA) >= 1e-16:
raise ValueError('EGRET only supports reference buses with an angle of 0 degrees.')
msg = "\nEgret only supports reference buses with an angle of 0 degrees. \nFound a " \
"reference bus with an angle close to 0. \n" \
"Value: {0}".format(VA) + "\nSetting reference bus angle to 0."
warnings.warn(msg)
system["reference_bus"] = BUS_I
system["reference_bus_angle"] = VA
if PD != 0 or QD != 0:
load_dict = {"bus": BUS_I, "in_service": True}
load_dict["p_load"] = PD
load_dict["q_load"] = QD
load_dict["area"] = "Area" + BUS_AREA
load_dict["zone"] = ZONE
elements["load"]['load_' + BUS_I] = load_dict
if GS != 0 or BS != 0:
shunt_dict = {"shunt_type": "fixed", "bus": BUS_I}
shunt_dict["gs"] = GS
shunt_dict["bs"] = BS
elements["shunt"]['shunt_' + BUS_I] = shunt_dict
bus_dict["vm"] = VM
bus_dict["va"] = VA
if BASE_KV > 0:
bus_dict["base_kv"] = BASE_KV
else:
raise ValueError('BASE_KV value found that is <= 0. Not supported at this time.')
bus_dict["area"] = "Area" + BUS_AREA
bus_dict["zone"] = ZONE
bus_dict["v_min"] = VMIN
bus_dict["v_max"] = VMAX
bus_dict["id"] = row['Bus Name']
elements["bus"][BUS_I] = bus_dict
# add the areas
elements["area"] = {}
areas = _get_eligible_areas(rts_gmlc_dir)
area_names = _get_eligible_area_names(areas)
for name in area_names:
## TODO: what else should be in here?
elements["area"][name] = dict()
elements["branch"] = {}
# add the branches
branch_df = pd.read_csv(os.path.join(base_dir, 'branch.csv'))
for idx, row in branch_df.iterrows():
name = str(row['UID'])
F_BUS = str(row['From Bus'])
T_BUS = str(row['To Bus'])
BR_R = float(row['R'])
BR_X = float(row['X'])
BR_B = float(row['B'])
RATE_A = float(row['Cont Rating'])
RATE_B = float(row['Cont Rating'])
RATE_C = float(row['Cont Rating'])
if RATE_A == 0:
RATE_A = None
if RATE_B == 0:
RATE_B = None
if RATE_C == 0:
RATE_C = None
TAP = float(row['Tr Ratio'])
SHIFT = 0.0 # these hard-coded values are the defaults
BR_STATUS = 1 # from the RTS-GMLC MATPOWER writer
ANGMIN = -90.
ANGMAX = 90.
PF = None # these values are not given
QF = None
PT = None
QT = None
branch_dict = {"from_bus": F_BUS, "to_bus": T_BUS}
branch_dict["resistance"] = BR_R
branch_dict["reactance"] = BR_X
branch_dict["charging_susceptance"] = BR_B
if TAP != 0.0:
branch_dict["transformer_tap_ratio"] = TAP
branch_dict["transformer_phase_shift"] = SHIFT
branch_dict["branch_type"] = "transformer"
else:
branch_dict["branch_type"] = "line"
branch_dict["rating_long_term"] = RATE_A
branch_dict["rating_short_term"] = RATE_B
branch_dict["rating_emergency"] = RATE_C
branch_dict["angle_diff_min"] = ANGMIN
branch_dict["angle_diff_max"] = ANGMAX
assert (BR_STATUS == 0 or BR_STATUS == 1)
if BR_STATUS == 1:
branch_dict["in_service"] = True
else:
branch_dict["in_service"] = False
branch_dict["pf"] = PF
branch_dict["qf"] = QF
branch_dict["pt"] = PT
branch_dict["qt"] = QT
elements["branch"][name] = branch_dict
# add the generators
elements["generator"] = {}
RENEWABLE_TYPES = ['WIND', 'HYDRO', 'RTPV', 'PV']
gen_df = pd.read_csv(os.path.join(base_dir, 'gen.csv'))
for idx, row in gen_df.iterrows():
name = str(row['GEN UID'])
GEN_BUS = str(row['Bus ID'])
gen_dict = {"bus": GEN_BUS}
# if this is a renewable, hydro, or storage need to handle differently
# (hydro schedules in RTS-GMLC are fixed)
if row['Fuel'] in ['Storage']:
pass
else:
# NOTE: for now, prescient doesn't handle CSP -- not clear how to model
if row['Unit Type'] == 'CSP':
continue
## (mostly) MATPOWER data
PG = float(row['MW Inj'])
QG = float(row['MVAR Inj'])
QMAX = float(row['QMax MVAR'])
QMIN = float(row['QMin MVAR'])
RAMP_Q = 1. * float(row['Ramp Rate MW/Min'])
VG = float(row['V Setpoint p.u.'])
MBASE = 100. # set in RTS-GMLC MATPOWER writer
GEN_STATUS = 1
PMAX = float(row['PMax MW'])
PMIN = float(row['PMin MW'])
FUEL = str(row['Fuel'])
UNIT_TYPE = str(row['Unit Type'])
if UNIT_TYPE in RENEWABLE_TYPES:
gen_dict["generator_type"] = "renewable"
elif UNIT_TYPE == 'SYNC_COND':
## TODO: should we have a flag for these?
gen_dict["generator_type"] = "thermal"
else:
gen_dict["generator_type"] = "thermal"
gen_dict["bus"] = GEN_BUS
gen_dict["mbase"] = MBASE
gen_dict["in_service"] = True
gen_dict["pg"] = PG
gen_dict["qg"] = QG
gen_dict["vg"] = VG
gen_dict["p_min"] = PMIN
gen_dict["p_max"] = PMAX
gen_dict["q_min"] = QMIN
gen_dict["q_max"] = QMAX
gen_dict["ramp_q"] = RAMP_Q
gen_dict["fuel"] = FUEL
gen_dict["unit_type"] = UNIT_TYPE
gen_dict["area"] = elements["bus"][gen_dict["bus"]]["area"]
gen_dict["zone"] = elements["bus"][gen_dict["bus"]]["zone"]
# after this is only really needed for thermal units
if UNIT_TYPE in RENEWABLE_TYPES:
elements["generator"][name] = gen_dict
continue
PC1 = 0.0
PC2 = 0.0
QC1MIN = 0.0
QC1MAX = 0.0
QC2MIN = 0.0
QC2MAX = 0.0
RAMP_AGC = 1. * float(row['Ramp Rate MW/Min'])
RAMP_10 = 10. * float(row['Ramp Rate MW/Min'])
RAMP_30 = 30. * float(row['Ramp Rate MW/Min'])
RAMP_UP_60 = 60. * float(row['Ramp Rate MW/Min'])
RAMP_DN_60 = 60. * float(row['Ramp Rate MW/Min'])
APF = 0.0 # 0.0 from RTS-GMLC MATPOWER writer
# Gen cost
x = {}
## round as in RTS-GMLC Prescient/topysp.py
x[0] = round(float(row['Output_pct_0']) * float(row['PMax MW']), 1)
x[1] = round(float(row['Output_pct_1']) * float(row['PMax MW']), 1)
x[2] = round(float(row['Output_pct_2']) * float(row['PMax MW']), 1)
x[3] = round(float(row['Output_pct_3']) * float(row['PMax MW']), 1)
y = {}
y[0] = float(row['Fuel Price $/MMBTU']) * ((float(row['HR_avg_0']) * 1000. / 1000000.) * x[
0]) ## /1000. from the RTS-GMLC MATPOWER writer,
y[1] = float(row['Fuel Price $/MMBTU']) * (((x[1] - x[0]) * (float(row['HR_incr_1']) * 1000. / 1000000.))) + \
y[0]
y[2] = float(row['Fuel Price $/MMBTU']) * (((x[2] - x[1]) * (float(row['HR_incr_2']) * 1000. / 1000000.))) + \
y[1]
y[3] = float(row['Fuel Price $/MMBTU']) * (((x[3] - x[2]) * (float(row['HR_incr_3']) * 1000. / 1000000.))) + \
y[2]
# only include the cost coeffecients that matter
P_COEFF = [(x[i], round(y[i], 2)) for i in range(4) if
(((i == 0) or (x[i - 1], y[i - 1]) != (x[i], y[i])) and (x[i], y[i]) != (0., 0.))]
if P_COEFF == []:
P_COEFF = [(PMAX, 0.0)]
# UC Data
MIN_UP_TIME = float(row['Min Up Time Hr'])
MIN_DN_TIME = float(row['Min Down Time Hr'])
# Startup types and costs
COLD_HEAT = float(row['Start Heat Cold MBTU'])
WARM_HEAT = float(row['Start Heat Warm MBTU'])
HOT_HEAT = float(row['Start Heat Hot MBTU'])
COLD_TIME = float(row['Start Time Cold Hr'])
WARM_TIME = float(row['Start Time Warm Hr'])
HOT_TIME = float(row['Start Time Hot Hr'])
FUEL_PRICE = float(row['Fuel Price $/MMBTU'])
FIXED_START_COST = float(row['Non Fuel Start Cost $'])
if (COLD_TIME <= MIN_DN_TIME) or (COLD_TIME == WARM_TIME == HOT_TIME):
STARTUP_COSTS = [(MIN_DN_TIME, round(COLD_HEAT * FUEL_PRICE + FIXED_START_COST, 2))]
elif WARM_TIME <= MIN_DN_TIME:
STARTUP_COSTS = [(MIN_DN_TIME, round(WARM_HEAT * FUEL_PRICE + FIXED_START_COST, 2)), \
(COLD_TIME, round(COLD_HEAT * FUEL_PRICE + FIXED_START_COST, 2))]
else:
STARTUP_COSTS = [(MIN_DN_TIME, round(HOT_HEAT * FUEL_PRICE + FIXED_START_COST, 2)), \
(WARM_TIME, round(WARM_HEAT * FUEL_PRICE + FIXED_START_COST, 2)), \
(COLD_TIME, round(COLD_HEAT * FUEL_PRICE + FIXED_START_COST, 2))]
SHUTDOWN_COST = 0.0
gen_dict["pc1"] = PC1
gen_dict["pc2"] = PC2
gen_dict["qc1_min"] = QC1MIN
gen_dict["qc1_max"] = QC1MAX
gen_dict["qc2_min"] = QC2MIN
gen_dict["qc2_max"] = QC2MAX
gen_dict["agc_capable"] = True
gen_dict["p_min_agc"] = gen_dict["p_min"]
gen_dict["p_max_agc"] = gen_dict["p_max"]
gen_dict["ramp_agc"] = RAMP_AGC
gen_dict["ramp_10"] = RAMP_10
gen_dict["ramp_30"] = RAMP_30
gen_dict["ramp_up_60min"] = RAMP_UP_60
gen_dict["ramp_down_60min"] = RAMP_DN_60
gen_dict["power_factor"] = APF
gen_dict["p_cost"] = {"data_type": "cost_curve", "cost_curve_type": "piecewise", "values": P_COEFF}
gen_dict["startup_cost"] = STARTUP_COSTS
gen_dict["shutdown_cost"] = SHUTDOWN_COST
# these assumptions are the same as prescient-rtsgmlc
gen_dict["startup_capacity"] = PMIN
gen_dict["shutdown_capacity"] = PMIN
gen_dict["min_up_time"] = MIN_UP_TIME
gen_dict["min_down_time"] = MIN_DN_TIME
gen_dict["must_run"] = False
elements["generator"][name] = gen_dict
return model_data
def _read_rts_gmlc_table(file_name, simulation):
if simulation == "DAY_AHEAD":
_date_parser = lambda *columns: datetime(*map(int, columns[0:3]), int(columns[3]) - 1)
else:
minute_mutli = 5
hour_divisor = 12
time_periods_in_day = 24 * hour_divisor
_date_parser = lambda *columns: datetime(*map(int, columns[0:3]), \
(int(columns[3]) - 1) // hour_divisor,
minute_mutli * ((int(columns[3]) - 1) % hour_divisor))
return pd.read_csv(file_name,
header=0,
sep=',',
parse_dates=[[0, 1, 2, 3]],
date_parser=_date_parser)
def _read_rts_gmlc_reserve_table(file_name, begin_time, end_time, simulation):
table_dict = pd.read_csv(file_name, header=0, sep=',').T.to_dict()
if simulation == "DAY_AHEAD":
hour_divisor = 1
minute_mutli = 0
time_periods_in_day = 24
else:
minute_mutli = 5
hour_divisor = 12
time_periods_in_day = 24 * hour_divisor
by_datetime_dict = dict()
for day_num, day_data in table_dict.items():
year = day_data['Year']
month = day_data['Month']
day = day_data['Day']
for i in range(1, time_periods_in_day + 1):
date_time = datetime(year=int(year), month=int(month), day=int(day),
hour=(i - 1) // hour_divisor, minute=minute_mutli * ((i - 1) % hour_divisor))
if begin_time <= date_time < end_time:
by_datetime_dict[str(date_time)] = float(day_data[str(i)])
return by_datetime_dict
def _make_time_series_dict(values):
return {"data_type": "time_series", "values": values}
def _get_datetimes(begin_time, end_time, base_dir, simulation):
datetime_format = "%Y-%m-%d %H:%M:%S"
datestr = "YYYY-DD-MM"
midnight = " 00:00:00"
if isinstance(begin_time, datetime):
pass
elif isinstance(begin_time, str):
if len(begin_time) == len(datestr):
begin_time += midnight
begin_time = datetime.strptime(begin_time, datetime_format)
else:
raise ValueError("Unable to parse begin_time")
if isinstance(end_time, datetime):
pass
elif isinstance(end_time, str):
if len(end_time) == len(datestr):
end_time += midnight
end_time = datetime.strptime(end_time, datetime_format)
else:
raise ValueError("Unable to parse end_time")
# stay in the times provided
rts_start_date, rts_end_date = _get_rts_gmlc_start_end_dates(base_dir, simulation)
assert begin_time >= rts_start_date
assert end_time <= rts_end_date
# We only take times in whole hours (for now)
assert (begin_time.minute == 0. and begin_time.second == 0. and begin_time.microsecond == 0.)
assert (end_time.minute == 0. and end_time.second == 0. and end_time.microsecond == 0.)
return begin_time, end_time
if __name__ == '__main__':
from egret.viz.generate_graphs import generate_stack_graph
from egret.models.unit_commitment import solve_unit_commitment, create_tight_unit_commitment_model
import matplotlib.pyplot as plt
current_dir = os.path.dirname(os.path.abspath(__file__))
rts_gmlc_dir = os.path.join(current_dir, '..', '..', '..', 'RTS-GMLC',
'RTS_Data') # This is just the root of the RTS-GMLC data set.
# This converts the load data (in RTS-GMLC format) such that individual loads have their own time series explicitly specified (instead of one system-wide time series).
# It should only need to be run once.
convert_load_by_area_to_source(
rts_gmlc_dir, "2020-01-01", "2020-12-31",
t0_state=None,
)
# Test model creation and UC solve for one day using the newly formatted data.
begin_time = "2020-07-05"
end_time = "2020-07-06"
md = create_ModelData(
rts_gmlc_dir, begin_time, end_time,
simulation="DAY_AHEAD",
t0_state=None,
)
solved_md = solve_unit_commitment(md,
'gurobi_persistent',
mipgap=0.001,
timelimit=None,
solver_tee=True,
symbolic_solver_labels=False,
options=None,
uc_model_generator=create_tight_unit_commitment_model,
relaxed=False,
return_model=False
)
fig, ax = generate_stack_graph(
solved_md,
title=begin_time,
show_individual_components=False,
plot_individual_generators=False,
x_tick_frequency=4,
)
plt.show()
| 43.171634
| 171
| 0.604128
|
4a17974d253a13ef8cf4a3148326a3e5e4882089
| 18,945
|
py
|
Python
|
src/contiguityPlot.py
|
ngannguyen/referenceViz
|
6990a00739a712ccd1371e996229882252fa8f91
|
[
"MIT"
] | 1
|
2020-03-15T12:17:10.000Z
|
2020-03-15T12:17:10.000Z
|
src/contiguityPlot.py
|
ngannguyen/referenceViz
|
6990a00739a712ccd1371e996229882252fa8f91
|
[
"MIT"
] | null | null | null |
src/contiguityPlot.py
|
ngannguyen/referenceViz
|
6990a00739a712ccd1371e996229882252fa8f91
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Create contiguity plots
nknguyen at soe dot ucsc dot edu
May 11 2011
Input: two contiguityStats.xml files to be compared and contrast
(e.g: contiguityStats_reference.xml and contiguityStats_hg19.xml)
"""
import os, sys
from optparse import OptionParser
import xml.etree.ElementTree as ET
#from numpy import *
from numpy import linspace
import libPlotting as libplot
import matplotlib.pyplot as pyplot
from matplotlib.ticker import *
from matplotlib.font_manager import FontProperties
class Bucket:
def __init__( self, bucketElement ):
self.start = int( bucketElement.attrib[ 'from' ] )
self.end = int( bucketElement.attrib[ 'to' ] )
self.mid = ( self.start + self.end )/2
self.correct = int( bucketElement.attrib[ 'correct' ] )
self.samples = int( bucketElement.attrib[ 'samples' ] )
self.aligned = int( bucketElement.attrib[ 'aligned' ] )
self.correctPerSample = float( bucketElement.attrib[ 'correctPerSample' ] )
self.correctPerAligned = float( bucketElement.attrib[ 'correctPerAligned' ] )
self.cumulativeCorrect = int( bucketElement.attrib[ 'cumulativeCorrect' ] )
self.cumulativeSamples = int( bucketElement.attrib[ 'cumulativeSamples' ] )
self.cumulativeAligned = int( bucketElement.attrib[ 'cumulativeAligned' ] )
self.cumulativeCorrectPerSample = float( bucketElement.attrib[ 'cumulativeCorrectPerSample' ] )
self.cumulativeCorrectPerAligned = float( bucketElement.attrib[ 'cumulativeCorrectPerAligned' ] )
class Sample( list ):
def __init__(self, name, reference):
self.name = name
self.reference = reference
def setSummary(self, sample):
self.totalSamples = sample.attrib['totalSamples']
self.totalCorrect = sample.attrib['totalCorrect']
self.totalAligned = sample.attrib['totalAligned']
self.correctPerSample = sample.attrib['correctPerSample']
self.correctPerAligned = sample.attrib['correctPerAligned']
def setBuckets( self, sampleElement ):
for bucket in sampleElement.findall( 'bucket' ):
self.append( Bucket( bucket ) )
def setBuckets2( self, buckets ):
for bucket in buckets:
self.append( bucket )
class Stats( list ): #each Stats represents one input XML file
def __init__( self, name ):
self.name = name
def setRefName( self, refname ):
self.refname = refname
def getSample( stats, name ):
for sample in stats:
if sample.name == name:
return sample
return None
def setAxisLimits( axes, ycutoff ):
axes.set_xscale('log')
axes.set_ylim( ycutoff, 1.002 )
def drawLegend( axes, lines, sampleNames, options ):
fontP = FontProperties()
fontP.set_size('small')
box= axes.get_position()
axes.set_position([box.x0, box.y0, box.width * 0.8, box.height])
#legend = pyplot.legend( lines, sampleNames, numpoints = 1, prop= fontP, loc="best", bbox_to_anchor=(1, 0.5))
if not options.legendElements:
legend = pyplot.legend( lines, [ libplot.properName(n) for n in sampleNames ], prop= fontP, loc="best", bbox_to_anchor=(1,0.5))
legend._drawFrame=False
elif len(lines) == len(options.legendElements):
legend = pyplot.legend( lines, options.legendElements, prop= fontP, loc="best", bbox_to_anchor=(1,0.5) )
legend._drawFrame=False
else:
sys.stderr.write('Number of items in --legendElements is different '
'from the number of lines plotted\n' )
def drawData( axes, stats, options ):
#halfsize = len(stats)/2 + len(stats)%2
#colors = libplot.getColors2( halfsize )
#colors = libplot.getColors2( len(stats) )
#styles = { 0:'-', 1:'--' }
colors = libplot.getColors1()
if len(stats) < 1:
return
if stats[0].reference == "reference":
colors.pop(0)
elif stats[0].reference == 'hg19':
colors.pop(1)
#===========
#dash = 0
colorindex = -1
lines = []
sampleNames = []
ymin = float('inf')
ref = ''
for sample in stats:
sampleNames.append(sample.name)
if ref == '':
ref = sample.reference
xdata = []
ydata = []
for bucket in sample:
xdata.append( bucket.mid )
if options.includeCov:
ydata.append( bucket.correctPerSample )
else:
ydata.append( bucket.correctPerAligned )
#if not dash:
# colorindex += 1
#if colorindex == 1:
# colorindex += 1
colorindex +=1
ymin = min([ymin, min(ydata)])
l = axes.plot( xdata, ydata, color=colors[colorindex], linewidth=1 )
#l = axes.plot( xdata, ydata, color=colors[colorindex], linestyle=styles[dash], linewidth=0.5 )
lines.append(l)
#dash = not dash
libplot.editSpine( axes )
title = options.title
if ref != '':
title += ', %s' % libplot.properName(ref)
axes.set_title(title)
pyplot.xlabel("Distance")
pyplot.ylabel("Correct proportion")
return lines, sampleNames, ymin
def drawAggData( axes, data, sortedIndex, xmin, xmax, cutoff, nbins=10 ):
data = sorted( data, key=lambda point:point[sortedIndex] )
#Above the x axis is point[sortedIndex] < point[1-sortedIndex]
updata = []
equaldata = []
downdata = []
for p in data:
if p[ 0 ] < cutoff or p[ 1 ] < cutoff:
continue
if p[ sortedIndex ] < p[ 1 - sortedIndex ]:
updata.append( p[ sortedIndex ] )
elif p[ sortedIndex ] == p[ 1 - sortedIndex ]:
equaldata.append( p[ sortedIndex ] )
else:
downdata.append( p[ sortedIndex ] )
if sortedIndex == 0:
orientation = 'vertical'
else:
orientation = 'horizontal'
#bins = linspace( xmin, xmax, nbins )
bins = linspace( cutoff, xmax, nbins )
ymin, ymax = libplot.bihist( updata, downdata, axes, bins, orientation, color='#0198E1' )
n3, bins3, patch3 = axes.hist( equaldata, bins=bins, orientation = orientation, color = '#800000' )
if sortedIndex == 0:
ymax3 = max( [ i.get_height() for i in patch3 ] )
else:
ymax3 = max( [ i.get_width() for i in patch3 ] )
ymax = max( ymax3, ymax )
return ymin, ymax
def intersect(sample1, sample2):
#sample1 and sample2 must be sorted by the 'mid' field
s1 = Sample( sample1.name, sample1.reference )
s2 = Sample( sample2.name, sample2.reference )
buckets1 = []
buckets2 = []
i1 = 0
i2 = 0
while i1 < len(sample1) and i2 < len(sample2):
b1 = sample1[i1]
b2 = sample2[i2]
if b1.mid == b2.mid: #sample bucket
buckets1.append(b1)
buckets2.append(b2)
i1 += 1
i2 += 1
elif b1.mid > b2.mid:
i2 += 1
else:
i1 += 1
s1.setBuckets2( buckets1 )
s2.setBuckets2( buckets2 )
return s1, s2
def drawCompareData( axesList, xstats, ystats, options ):
#Only draw the overlapped samples:
#colors = libplot.getColors2( len(xstats) )
colors = libplot.getColors1()
#colorindex = -1
#colorindex = 0
colorindex = 1
lines = []
sampleNames = []
p0axes = axesList[0] #plot 0 axes (see def 'setCompareAxes')
aggData = [] #data points (buckets) of all samples
minval = float('inf')
for xsample in xstats:
ysample = getSample( ystats, xsample.name )
if ysample is None:
continue
xsample, ysample = intersect(xsample, ysample)
#if len(xsample) != len(ysample):
# xsample, ysample = intersect(xsample, ysample)
# sys.stderr.write( "Error: Two xml files do not have the same number of buckets for sample %s\n" % xsample.name )
#sys.exit( 1 )
data = [] #list of (x,y) tuples
colorindex += 1
for i in range( len( xsample ) ): #each bucket
if xsample[i].mid != ysample[i].mid:
sys.stderr.write( "Two xml files have different buckets\n " )
sys.exit( 1 )
if options.includeCov:
data.append( (xsample[i].correctPerSample, ysample[i].correctPerSample) )
else:
data.append( (xsample[i].correctPerAligned, ysample[i].correctPerAligned) )
x2data = [ point[0] for point in data ]
y2data = [ point[1] for point in data ]
l = p0axes.plot( x2data, y2data, color=colors[colorindex], marker='.', markersize=4.0, linestyle='none' )
lines.append( l )
sampleNames.append( xsample.name )
aggData.extend( data )
minval = min( [min(x2data), min(y2data)] )
#Draw the y=x line
x = [0, 1]
y = [0, 1]
p0axes.plot(x, y, color="#919191")
fontP = FontProperties()
fontP.set_size('small')
libplot.editSpine( p0axes )
p0axes.set_title(options.title)
p0axes.set_xlabel( libplot.properName(xstats.refname) )
p0axes.set_ylabel( libplot.properName(ystats.refname) )
libplot.setTicks( p0axes )
for l in p0axes.xaxis.get_ticklabels():
l.set_fontsize('small')
for l in p0axes.yaxis.get_ticklabels():
l.set_fontsize('small')
#legend:
legend = p0axes.legend( lines, [ libplot.properName(n) for n in sampleNames], 'lower right', numpoints = 1, prop=fontP, ncol = 2)
legend._drawFrame = False
#p0axes.set_xlim( -0.005, 1.005 )
#p0axes.set_ylim( -0.005, 1.005 )
ycutoff = minval
if options.ycutoff:
ycutoff = options.ycutoff
p0axes.set_xlim( ycutoff - (1-ycutoff)*0.02, 1 + (1 - ycutoff)*0.01 )
p0axes.set_ylim( ycutoff - (1-ycutoff)*0.02, 1 + (1 - ycutoff)*0.01 )
#box = p0axes.get_position()
#p0axes.set_position([box.x0, box.y0, box.width * 0.8, box.height * 0.8])
#legend = pyplot.legend( lines, sampleNames, numpoints = 1, prop= fontP, loc="best", bbox_to_anchor=(1, 0.6))
#legend._drawFrame=False
#DRAW AGGREGATE DATA (plot 1 and plot 2):
nbins = 20
p1axes = axesList[1]
y1min, y1max = drawAggData( p1axes, aggData, 0, 0, 1, ycutoff, nbins )
y1lim = max( abs(y1min), abs(y1max) )
p1axes.set_ylim( -y1lim*1.1, y1lim*1.1 )
p1axes.set_xlim( ycutoff - (1-ycutoff)*0.02, 1 + (1-ycutoff)*0.01 )
#p1axes.set_ylim( y1min*1.1, y1max*1.1 )
for loc, spine in p1axes.spines.iteritems():
if loc == 'left':
spine.set_position( ( 'outward', 10 ) )
spine.set_color( 'none' )
p1axes.axhline( 0, color = '#000000' )
p1axes.xaxis.set_major_locator( NullLocator() )
p1axes.xaxis.set_major_formatter( NullFormatter() )
p1axes.yaxis.set_ticks([-y1lim, 0, y1lim])
for l in p1axes.yaxis.get_ticklabels():
l.set_fontsize('small')
p2axes = axesList[2]
x2min, x2max = drawAggData( p2axes, aggData, 1, 0, 1, ycutoff, nbins )
x2lim = max( abs(x2min), abs(x2max) )
p2axes.set_xlim( -x2lim*1.1, x2lim*1.1 )
p2axes.set_ylim( ycutoff - (1-ycutoff)*0.02, 1 + (1- ycutoff)*0.01 )
#p2axes.set_xlim( x2min*1.1, x2max*1.1 )
for loc, spine in p2axes.spines.iteritems():
if loc == 'bottom':
spine.set_position( ( 'outward', 10 ) )
spine.set_color( 'none' )
p2axes.axvline( 0, color = '#000000' )
p2axes.yaxis.set_major_locator( NullLocator() )
p2axes.yaxis.set_major_formatter( NullFormatter() )
p2axes.xaxis.set_ticks([-x2lim, 0, x2lim])
for l in p2axes.xaxis.get_ticklabels():
l.set_fontsize('small')
l.set_rotation( 45 )
return
def drawContiguityPlot( options, stats ):
#options.out = os.path.join(options.outdir, "contiguity_" + stats.refname) #name of output file
options.out = os.path.join(options.outdir, options.exp + "_" + stats.refname) #name of output file
if options.includeCov:
options.out = options.out + "_incCov"
options.ycutoff = 0.7 #HACK
else:#HACK
options.ycutoff = 0.95 #HACK
fig, pdf = libplot.initImage( 8.0, 10.0, options )
axes = libplot.setAxes( fig )
lines, sampleNames, ymin = drawData( axes, stats, options )
drawLegend( axes, lines, sampleNames, options )
if options.ycutoff:
setAxisLimits( axes, options.ycutoff )
else:
setAxisLimits( axes, ymin*0.98 )
libplot.setTicks( axes )
libplot.writeImage( fig, pdf, options )
def setCompareAxes( fig ):
"""
Set axes for the CompareContiguityPlot. There are 3 subplots total:
Plot 0: The main plot where each axis represents one reference seq. Eg: xaxis <- cactusref, yaxis <- hg19
Plot 1: Right at the top of plot 1, shows frequencies of data points that lie above and below the y=x axis
Plot 2: To the right of plot 1, shows frequencies of data points that lie on the left and right the y=x axis
"""
axesList = []
axleft = 0.12
axright = 0.88
axwidth = axright - axleft
axbottom = 0.1
axtop = 0.95
axheight = axtop - axbottom
margin = 0.07 #space between plots
plot0wh = 0.7 #plot1 width = plot1 height
plot1h = axheight - (plot0wh + margin)
plot2w = plot1h
axesList.append( fig.add_axes( [axleft, axbottom, plot0wh, plot0wh] ) ) #Plot 0
axesList.append( fig.add_axes( [axleft, axbottom + plot0wh + margin, plot0wh, plot1h] ) ) #Plot 1
axesList.append( fig.add_axes( [axleft + plot0wh + margin, axbottom, plot2w, plot0wh] ) ) #Plot 2
return axesList
def drawCompareContiguityPlot( options, xstats, ystats ):
#options.out = os.path.join(options.outdir, "contiguity_" + xstats.refname + "_" + ystats.refname)
options.out = os.path.join(options.outdir, options.exp + "_" + xstats.refname + "_" + ystats.refname)
if options.includeCov:
options.out = options.out + "_incCov"
fig, pdf = libplot.initImage( 8.0, 8.0, options )
#Set axes:
#axes = fig.add_axes( [0.12, 0.1, 0.85, 0.85] )
axesList = setCompareAxes( fig )
drawCompareData( axesList, xstats, ystats, options )
libplot.writeImage( fig, pdf, options )
def readfiles( options ):
statsList = [] #each element represents one input XML file (one contiguity plot)
for f in options.files:
name = os.path.basename( f ).split( '.' )[0]
stats = Stats( name )
xmltree = ET.parse( f )
root = xmltree.getroot()
for sample in root.findall( 'statsForSample' ):
name = sample.attrib[ 'sampleName' ]
if name != '' and name != 'ROOT' and name not in options.filteredSamples:
s = Sample( name, sample.attrib[ 'referenceName' ] )
s.setSummary(sample)
s.setBuckets( sample )
stats.append( s )
if len(stats) > 0:
stats.setRefName( stats[0].reference )
statsList.append( stats )
return statsList
def getAverage(statsList):
sys.stderr.write("Reference\tCorrectPerAligned\tWrongPerAligned\tWrongPerMillion\n")
for stats in statsList:
numsamples = 0
correct = 0
for sample in stats:
if sample.name == stats.refname or sample.name =='panTro3' or sample.name == stats.name:
continue
numsamples +=1
correct += float(sample.correctPerAligned)
correct /= numsamples
wrong = 1 - correct
wrongPerMil = wrong*1000000
sys.stderr.write("%s\t%f\t%f\t%f\n" %(stats.refname, correct, wrong, wrongPerMil))
return
def initOptions( parser ):
parser.add_option('--title', dest='title', default='Contiguity',
help='Based title of the plots, default=%default')
parser.add_option('--legendElements', dest='legendElements',
help='Specify the legend text - comma separated list' )
parser.add_option('--ycutoff', dest='ycutoff', type='float',
help='Only points with y-value from ycutoff to 1 are displayed')
parser.add_option('--outdir', dest='outdir', default='.', help='Output directory')
parser.add_option('--includeCoverage', dest='includeCov', action="store_true", default=False, help='If specified, will include coverage info in the plots')
parser.add_option('--samplesOrder', dest="samplesOrder", default="reference,hg19,apd,cox,dbb,mann,mcf,qbl,ssto,venter,NA12892,NA12878,NA19239,NA19238,NA19240,nigerian,yanhuang,panTro3", help="Samples order")
parser.add_option('--filteredSamples', dest='filteredSamples', help='Hyphen separated list of samples that were filtered out (not to include in the plot)')
def checkOptions( args, options, parser ):
options.files = []
for f in args:
if not os.path.exists( f ):
parser.error('%s does not exist\n' %f)
options.files.append( os.path.abspath( f ) )
if len(options.files) < 1:
parser.error('Please specify at least one valid contiguityStats file.\n')
options.exp = ( os.path.basename( options.files[0] ).split('_') )[0]
if options.includeCov and options.title == 'Contiguity':
options.title = 'Contiguity and Coverage'
#system("mkdir -p %s" % options.outdir)
if options.legendElements:
options.legendElements = options.legendElements.split(',')
options.samplesOrder = options.samplesOrder.split(',')
if options.filteredSamples:
options.filteredSamples = options.filteredSamples.split('-')
else:
options.filteredSamples = []
def main():
usage = ( 'usage: %prog [options] file1.xml file2.xml\n\n'
'%prog takes in contiguityStats.xml files and create an image file' )
parser = OptionParser( usage = usage )
initOptions( parser )
libplot.initOptions( parser )
options, args = parser.parse_args()
checkOptions( args, options, parser )
libplot.checkOptions( options, parser )
statsList = readfiles( options )
getAverage(statsList)
#Sort statsList:
sortedStatsList = []
for stats in statsList:
sortedStats = Stats( stats.name )
sortedStats.setRefName( stats.refname )
if len(options.samplesOrder) == 0:
sortedSamples = sorted(stats, key=lambda s:s.name)
sortedStats.extend( sortedSamples )
else:
for name in options.samplesOrder:
for sample in stats:
if sample.name == name:
sortedStats.append( sample )
sortedStatsList.append( sortedStats )
for stats in sortedStatsList:
drawContiguityPlot( options, stats )
if len(sortedStatsList) >= 2:
for i in range( len(sortedStatsList) -1 ):
for j in range( i + 1, len(sortedStatsList) ):
drawCompareContiguityPlot( options, sortedStatsList[i], sortedStatsList[j] )
if __name__ == "__main__":
main()
| 37.514851
| 211
| 0.622486
|
4a1797d097cce00d94b6eae4f7f9029aff50a1cb
| 1,534
|
py
|
Python
|
NLP/tweet_NLP(Review).py
|
Neeraj0001/Machine-Learning-Python-
|
ffebfb302d5759f05cb37a55f47bccdea27e3b14
|
[
"MIT"
] | null | null | null |
NLP/tweet_NLP(Review).py
|
Neeraj0001/Machine-Learning-Python-
|
ffebfb302d5759f05cb37a55f47bccdea27e3b14
|
[
"MIT"
] | null | null | null |
NLP/tweet_NLP(Review).py
|
Neeraj0001/Machine-Learning-Python-
|
ffebfb302d5759f05cb37a55f47bccdea27e3b14
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import nltk
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
ps=PorterStemmer()
df = pd.read_csv('train.csv',engine='python')
clean_review=[]
l=len(df.index)
for i in range(l):
word=re.sub('[^a-zA-Z]',' ',df['tweet'][i])
word=word.lower()
word=word.split()
word=[ps.stem(text) for text in word if not text in set(stopwords.words('english'))]
word=' '.join(word)
clean_review.append(word)
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer(max_features=1000)
X=cv.fit_transform(clean_review)
X=X.toarray()
y=df['label']
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=0)
from sklearn.linear_model import LogisticRegression
log_reg=LogisticRegression()
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier()
from sklearn.svm import SVC
svm=SVC()
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
from sklearn.tree import DecisionTreeClassifier
dtf=DecisionTreeClassifier()
log_reg.fit(X_train,y_train)
knn.fit(X_train,y_train)
svm.fit(X_train,y_train)
nb.fit(X_train,y_train)
dtf.fit(X_train,y_train)
log_reg.score(X_train,y_train)
knn.score(X_train,y_train)
svm.score(X_train,y_train)
nb.score(X_train,y_train)
dtf.score(X_train,y_train)
log_reg.score(X_test,y_test)
knn.score(X_test,y_test)
svm.score(X_test,y_test)
nb.score(X_test,y_test)
dtf.score(X_test,y_test)
| 24.349206
| 88
| 0.785528
|
4a179a34367ff6e37cb223e6b2b84d6a1ba1577d
| 3,921
|
py
|
Python
|
pyspi/spi_frame.py
|
tsiegert/pyspi
|
bd2ef1cb54ab9ca6e49df092cdb9ed6ae28318ae
|
[
"BSD-3-Clause"
] | null | null | null |
pyspi/spi_frame.py
|
tsiegert/pyspi
|
bd2ef1cb54ab9ca6e49df092cdb9ed6ae28318ae
|
[
"BSD-3-Clause"
] | null | null | null |
pyspi/spi_frame.py
|
tsiegert/pyspi
|
bd2ef1cb54ab9ca6e49df092cdb9ed6ae28318ae
|
[
"BSD-3-Clause"
] | null | null | null |
import astropy.coordinates as coord
import astropy.units as u
import numpy as np
from astropy.coordinates import BaseCoordinateFrame, Attribute, RepresentationMapping
from astropy.coordinates import frame_transform_graph, spherical_to_cartesian
from pyspi.spi_pointing import _construct_sc_matrix
class SPIFrame(BaseCoordinateFrame):
"""
INTEGRAL SPI Frame
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
"""
default_representation = coord.SphericalRepresentation
frame_specific_representation_info = {
'spherical': [
RepresentationMapping(
reprname='lon', framename='lon', defaultunit=u.degree),
RepresentationMapping(
reprname='lat', framename='lat', defaultunit=u.degree),
RepresentationMapping(
reprname='distance', framename='DIST', defaultunit=None)
],
'unitspherical': [
RepresentationMapping(
reprname='lon', framename='lon', defaultunit=u.degree),
RepresentationMapping(
reprname='lat', framename='lat', defaultunit=u.degree)
],
'cartesian': [
RepresentationMapping(
reprname='x', framename='SCX'), RepresentationMapping(
reprname='y', framename='SCY'), RepresentationMapping(
reprname='z', framename='SCZ')
]
}
# Specify frame attributes required to fully specify the frame
scx_ra = Attribute(default=None)
scx_dec = Attribute(default=None)
scy_ra = Attribute(default=None)
scy_dec = Attribute(default=None)
scz_ra = Attribute(default=None)
scz_dec = Attribute(default=None)
@frame_transform_graph.transform(coord.FunctionTransform, SPIFrame, coord.ICRS)
def spi_to_j2000(spi_coord, j2000_frame):
"""
"""
sc_matrix = _construct_sc_matrix(spi_coord.scx_ra,
spi_coord.scx_dec,
spi_coord.scy_ra,
spi_coord.scy_dec,
spi_coord.scz_ra,
spi_coord.scz_dec)
# X,Y,Z = gbm_coord.cartesian
pos = spi_coord.cartesian.xyz.value
X0 = np.dot(sc_matrix[:, 0], pos)
X1 = np.dot(sc_matrix[:, 1], pos)
X2 = np.clip(np.dot(sc_matrix[:, 2], pos), -1., 1.)
dec = np.pi / 2. - np.arccos(X2)# np.arcsin(X2)
idx = np.logical_and(np.abs(X0) < 1E-6, np.abs(X1) < 1E-6)
ra = np.zeros_like(dec)
ra[~idx] = np.arctan2(X1[~idx], X0[~idx]) % (2 * np.pi)
return coord.ICRS(ra=ra * u.radian, dec=dec * u.radian)
@frame_transform_graph.transform(coord.FunctionTransform, coord.ICRS, SPIFrame)
def j2000_to_spi(j2000_frame, spi_coord):
"""
"""
sc_matrix = _construct_sc_matrix(spi_coord.scx_ra,
spi_coord.scx_dec,
spi_coord.scy_ra,
spi_coord.scy_dec,
spi_coord.scz_ra,
spi_coord.scz_dec)
pos = j2000_frame.cartesian.xyz.value
X0 = np.dot(sc_matrix[0, :], pos)
X1 = np.dot(sc_matrix[1, :], pos)
X2 = np.dot(sc_matrix[2, :], pos)
lat = np.pi / 2. - np.arccos(X2) # convert to proper frame
idx = np.logical_and(np.abs(X0) < 1E-6, np.abs(X1) < 1E-6)
lon = np.zeros_like(lat)
lon[~idx] = np.arctan2(X1[~idx], X0[~idx]) % (2 * np.pi)
return SPIFrame(
lon=lon * u.radian,
lat=lat * u.radian,
scx_ra=spi_coord.scx_ra,
scx_dec=spi_coord.scx_dec,
scy_ra=spi_coord.scy_ra,
scy_dec=spi_coord.scy_dec,
scz_ra=spi_coord.scz_ra,
scz_dec=spi_coord.scz_dec
)
| 30.161538
| 85
| 0.58327
|
4a179b3e5b945001b345b68f997cb2351d0985ce
| 35,668
|
py
|
Python
|
virtual/lib/python3.6/site-packages/werkzeug/serving.py
|
kenmutuma001/Blog
|
6b19a77b71694bbe9f5e84207de46c68f87ebc5e
|
[
"Unlicense"
] | 13
|
2020-02-02T13:53:50.000Z
|
2022-03-20T19:50:02.000Z
|
virtual/lib/python3.6/site-packages/werkzeug/serving.py
|
kenmutuma001/Blog
|
6b19a77b71694bbe9f5e84207de46c68f87ebc5e
|
[
"Unlicense"
] | 8
|
2019-03-24T19:36:30.000Z
|
2019-04-02T18:09:39.000Z
|
virtual/lib/python3.6/site-packages/werkzeug/serving.py
|
kenmutuma001/Blog
|
6b19a77b71694bbe9f5e84207de46c68f87ebc5e
|
[
"Unlicense"
] | 10
|
2019-12-25T20:42:37.000Z
|
2021-11-17T15:19:00.000Z
|
# -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `click`
(http://click.pocoo.org) instead of a simple start file.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import io
import os
import signal
import socket
import sys
import werkzeug
from ._compat import PY2
from ._compat import reraise
from ._compat import WIN
from ._compat import wsgi_encoding_dance
from ._internal import _log
from .exceptions import InternalServerError
from .urls import uri_to_iri
from .urls import url_parse
from .urls import url_unquote
try:
import socketserver
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
except ImportError:
import SocketServer as socketserver
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
try:
import ssl
except ImportError:
class _SslDummy(object):
def __getattr__(self, name):
raise RuntimeError("SSL support unavailable")
ssl = _SslDummy()
try:
import termcolor
except ImportError:
termcolor = None
def _get_openssl_crypto_module():
try:
from OpenSSL import crypto
except ImportError:
raise TypeError("Using ad-hoc certificates requires the pyOpenSSL library.")
else:
return crypto
ThreadingMixIn = socketserver.ThreadingMixIn
can_fork = hasattr(os, "fork")
if can_fork:
ForkingMixIn = socketserver.ForkingMixIn
else:
class ForkingMixIn(object):
pass
try:
af_unix = socket.AF_UNIX
except AttributeError:
af_unix = None
LISTEN_QUEUE = 128
can_open_by_fd = not WIN and hasattr(socket, "fromfd")
# On Python 3, ConnectionError represents the same errnos as
# socket.error from Python 2, while socket.error is an alias for the
# more generic OSError.
if PY2:
_ConnectionError = socket.error
else:
_ConnectionError = ConnectionError
class DechunkedInput(io.RawIOBase):
"""An input stream that handles Transfer-Encoding 'chunked'"""
def __init__(self, rfile):
self._rfile = rfile
self._done = False
self._len = 0
def readable(self):
return True
def read_chunk_len(self):
try:
line = self._rfile.readline().decode("latin1")
_len = int(line.strip(), 16)
except ValueError:
raise IOError("Invalid chunk header")
if _len < 0:
raise IOError("Negative chunk length not allowed")
return _len
def readinto(self, buf):
read = 0
while not self._done and read < len(buf):
if self._len == 0:
# This is the first chunk or we fully consumed the previous
# one. Read the next length of the next chunk
self._len = self.read_chunk_len()
if self._len == 0:
# Found the final chunk of size 0. The stream is now exhausted,
# but there is still a final newline that should be consumed
self._done = True
if self._len > 0:
# There is data (left) in this chunk, so append it to the
# buffer. If this operation fully consumes the chunk, this will
# reset self._len to 0.
n = min(len(buf), self._len)
buf[read : read + n] = self._rfile.read(n)
self._len -= n
read += n
if self._len == 0:
# Skip the terminating newline of a chunk that has been fully
# consumed. This also applies to the 0-sized final chunk
terminator = self._rfile.readline()
if terminator not in (b"\n", b"\r\n", b"\r"):
raise IOError("Missing chunk terminating newline")
return read
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return "Werkzeug/" + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = "http" if self.server.ssl_context is None else "https"
if not self.client_address:
self.client_address = "<local>"
if isinstance(self.client_address, str):
self.client_address = (self.client_address, 0)
else:
pass
path_info = url_unquote(request_url.path)
environ = {
"wsgi.version": (1, 0),
"wsgi.url_scheme": url_scheme,
"wsgi.input": self.rfile,
"wsgi.errors": sys.stderr,
"wsgi.multithread": self.server.multithread,
"wsgi.multiprocess": self.server.multiprocess,
"wsgi.run_once": False,
"werkzeug.server.shutdown": shutdown_server,
"SERVER_SOFTWARE": self.server_version,
"REQUEST_METHOD": self.command,
"SCRIPT_NAME": "",
"PATH_INFO": wsgi_encoding_dance(path_info),
"QUERY_STRING": wsgi_encoding_dance(request_url.query),
# Non-standard, added by mod_wsgi, uWSGI
"REQUEST_URI": wsgi_encoding_dance(self.path),
# Non-standard, added by gunicorn
"RAW_URI": wsgi_encoding_dance(self.path),
"REMOTE_ADDR": self.address_string(),
"REMOTE_PORT": self.port_integer(),
"SERVER_NAME": self.server.server_address[0],
"SERVER_PORT": str(self.server.server_address[1]),
"SERVER_PROTOCOL": self.request_version,
}
for key, value in self.get_header_items():
key = key.upper().replace("-", "_")
if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):
key = "HTTP_" + key
if key in environ:
value = "{},{}".format(environ[key], value)
environ[key] = value
if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked":
environ["wsgi.input_terminated"] = True
environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"])
if request_url.scheme and request_url.netloc:
environ["HTTP_HOST"] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get("Expect", "").lower().strip() == "100-continue":
self.wfile.write(b"HTTP/1.1 100 Continue\r\n\r\n")
self.environ = environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, "write() before start_response"
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
code = int(code)
self.send_response(code, msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if not (
"content-length" in header_keys
or environ["REQUEST_METHOD"] == "HEAD"
or code < 200
or code in (204, 304)
):
self.close_connection = True
self.send_header("Connection", "close")
if "server" not in header_keys:
self.send_header("Server", self.version_string())
if "date" not in header_keys:
self.send_header("Date", self.date_time_string())
self.end_headers()
assert isinstance(data, bytes), "applications must write bytes"
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError("Headers already set")
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b"")
finally:
if hasattr(application_iter, "close"):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (_ConnectionError, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from .debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log("error", "Error on request:\n%s", traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (_ConnectionError, socket.timeout) as e:
self.connection_dropped(e)
except Exception as e:
if self.server.ssl_context is None or not is_ssl_error(e):
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, "SIGKILL", signal.SIGTERM)
# reloader active
if is_running_from_reloader():
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ""
if self.request_version != "HTTP/0.9":
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode("ascii"))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
if getattr(self, "environ", None):
return self.environ["REMOTE_ADDR"]
elif not self.client_address:
return "<local>"
elif isinstance(self.client_address, str):
return self.client_address
else:
return self.client_address[0]
def port_integer(self):
return self.client_address[1]
def log_request(self, code="-", size="-"):
try:
path = uri_to_iri(self.path)
msg = "%s %s %s" % (self.command, path, self.request_version)
except AttributeError:
# path isn't set if the requestline was bad
msg = self.requestline
code = str(code)
if termcolor:
color = termcolor.colored
if code[0] == "1": # 1xx - Informational
msg = color(msg, attrs=["bold"])
elif code[0] == "2": # 2xx - Success
msg = color(msg, color="white")
elif code == "304": # 304 - Resource Not Modified
msg = color(msg, color="cyan")
elif code[0] == "3": # 3xx - Redirection
msg = color(msg, color="green")
elif code == "404": # 404 - Resource Not Found
msg = color(msg, color="yellow")
elif code[0] == "4": # 4xx - Client Error
msg = color(msg, color="red", attrs=["bold"])
else: # 5xx, or any other response
msg = color(msg, color="magenta", attrs=["bold"])
self.log("info", '"%s" %s %s', msg, code, size)
def log_error(self, *args):
self.log("error", *args)
def log_message(self, format, *args):
self.log("info", format, *args)
def log(self, type, message, *args):
_log(
type,
"%s - - [%s] %s\n"
% (self.address_string(), self.log_date_time_string(), message % args),
)
def get_header_items(self):
"""
Get an iterable list of key/value pairs representing headers.
This function provides Python 2/3 compatibility as related to the
parsing of request headers. Python 2.7 is not compliant with
RFC 3875 Section 4.1.18 which requires multiple values for headers
to be provided. This function will return a matching list regardless
of Python version. It can be removed once Python 2.7 support
is dropped.
:return: List of tuples containing header hey/value pairs
"""
if PY2:
# For Python 2, process the headers manually according to
# W3C RFC 2616 Section 4.2.
items = []
for header in self.headers.headers:
# Remove "\n\r" from the header and split on ":" to get
# the field name and value.
key, value = header[0:-2].split(":", 1)
# Add the key and the value once stripped of leading
# white space. The specification allows for stripping
# trailing white space but the Python 3 code does not
# strip trailing white space. Therefore, trailing space
# will be left as is to match the Python 3 behavior.
items.append((key, value.lstrip()))
else:
items = self.headers.items()
return items
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
crypto = _get_openssl_crypto_module()
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = "*"
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxsize))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = "Dummy Certificate" # noqa: E741
issuer = cert.get_issuer()
issuer.CN = subject.CN
issuer.O = subject.O # noqa: E741
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
cert.set_pubkey(pkey)
cert.sign(pkey, "sha256")
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = "*.%s/CN=%s" % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + ".crt"
pkey_file = base_path + ".key"
with open(cert_file, "wb") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, "wb") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
crypto = _get_openssl_crypto_module()
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx
def load_ssl_context(cert_file, pkey_file=None, protocol=None):
"""Loads SSL context from cert/private key files and optional protocol.
Many parameters are directly taken from the API of
:py:class:`ssl.SSLContext`.
:param cert_file: Path of the certificate to use.
:param pkey_file: Path of the private key to use. If not given, the key
will be obtained from the certificate file.
:param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl``
module. Defaults to ``PROTOCOL_SSLv23``.
"""
if protocol is None:
protocol = ssl.PROTOCOL_SSLv23
ctx = _SSLContext(protocol)
ctx.load_cert_chain(cert_file, pkey_file)
return ctx
class _SSLContext(object):
"""A dummy class with a small subset of Python3's ``ssl.SSLContext``, only
intended to be used with and by Werkzeug."""
def __init__(self, protocol):
self._protocol = protocol
self._certfile = None
self._keyfile = None
self._password = None
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._certfile = certfile
self._keyfile = keyfile or certfile
self._password = password
def wrap_socket(self, sock, **kwargs):
return ssl.wrap_socket(
sock,
keyfile=self._keyfile,
certfile=self._certfile,
ssl_version=self._protocol,
**kwargs
)
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
exc_types = (ssl.SSLError,)
try:
from OpenSSL.SSL import Error
exc_types += (Error,)
except ImportError:
pass
if error is None:
error = sys.exc_info()[1]
return isinstance(error, exc_types)
def select_address_family(host, port):
"""Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
the host and port."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if host.startswith("unix://"):
return socket.AF_UNIX
elif ":" in host and hasattr(socket, "AF_INET6"):
return socket.AF_INET6
return socket.AF_INET
def get_sockaddr(host, port, family):
"""Return a fully qualified socket address that can be passed to
:func:`socket.bind`."""
if family == af_unix:
return host.split("://", 1)[1]
try:
res = socket.getaddrinfo(
host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP
)
except socket.gaierror:
return host, port
return res[0][4]
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = LISTEN_QUEUE
def __init__(
self,
host,
port,
app,
handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_address_family(host, port)
if fd is not None:
real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM)
port = 0
server_address = get_sockaddr(host, int(port), self.address_family)
# remove socket file if it already exists
if self.address_family == af_unix and os.path.exists(server_address):
os.unlink(server_address)
HTTPServer.__init__(self, server_address, handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
self.host = host
self.port = self.socket.getsockname()[1]
# Patch in the original socket.
if fd is not None:
self.socket.close()
self.socket = real_sock
self.server_address = self.socket.getsockname()
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == "adhoc":
ssl_context = generate_adhoc_ssl_context()
# If we are on Python 2 the return value from socket.fromfd
# is an internal socket object but what we need for ssl wrap
# is the wrapper around it :(
sock = self.socket
if PY2 and not isinstance(sock, socket.socket):
sock = socket.socket(sock.family, sock.type, sock.proto, sock)
self.socket = ssl_context.wrap_socket(sock, server_side=True)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
finally:
self.server_close()
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
# Python 2 still causes a socket.error after the earlier
# handling, so silence it here.
if isinstance(sys.exc_info()[1], _ConnectionError):
return
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
daemon_threads = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(
self,
host,
port,
app,
processes=40,
handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
if not can_fork:
raise ValueError("Your platform does not support forking.")
BaseWSGIServer.__init__(
self, host, port, app, handler, passthrough_errors, ssl_context, fd
)
self.max_children = processes
def make_server(
host=None,
port=None,
app=None,
threaded=False,
processes=1,
request_handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and multi process server.")
elif threaded:
return ThreadedWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
elif processes > 1:
return ForkingWSGIServer(
host,
port,
app,
processes,
request_handler,
passthrough_errors,
ssl_context,
fd=fd,
)
else:
return BaseWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
def is_running_from_reloader():
"""Checks if the application is running from within the Werkzeug
reloader subprocess.
.. versionadded:: 0.10
"""
return os.environ.get("WERKZEUG_RUN_MAIN") == "true"
def run_simple(
hostname,
port,
application,
use_reloader=False,
use_debugger=False,
use_evalex=True,
extra_files=None,
reloader_interval=1,
reloader_type="auto",
threaded=False,
processes=1,
request_handler=None,
static_files=None,
passthrough_errors=False,
ssl_context=None,
):
"""Start a WSGI application. Optional features include a reloader,
multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
.. versionadded:: 0.10
Improved the reloader and added support for changing the backend
through the `reloader_type` parameter. See :ref:`reloader`
for more information.
.. versionchanged:: 0.15
Bind to a Unix socket by passing a path that starts with
``unix://`` as the ``hostname``.
:param hostname: The host to bind to, for example ``'localhost'``.
If the value is a path that starts with ``unix://`` it will bind
to a Unix socket instead of a TCP socket..
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a list or dict of paths for static files. This works
exactly like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
"""
if not isinstance(port, int):
raise TypeError("port must be an integer")
if use_debugger:
from .debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from .middleware.shared_data import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def log_startup(sock):
display_hostname = hostname if hostname not in ("", "*") else "localhost"
quit_msg = "(Press CTRL+C to quit)"
if sock.family == af_unix:
_log("info", " * Running on %s %s", display_hostname, quit_msg)
else:
if ":" in display_hostname:
display_hostname = "[%s]" % display_hostname
port = sock.getsockname()[1]
_log(
"info",
" * Running on %s://%s:%d/ %s",
"http" if ssl_context is None else "https",
display_hostname,
port,
quit_msg,
)
def inner():
try:
fd = int(os.environ["WERKZEUG_SERVER_FD"])
except (LookupError, ValueError):
fd = None
srv = make_server(
hostname,
port,
application,
threaded,
processes,
request_handler,
passthrough_errors,
ssl_context,
fd=fd,
)
if fd is None:
log_startup(srv.socket)
srv.serve_forever()
if use_reloader:
# If we're not running already in the subprocess that is the
# reloader we want to open up a socket early to make sure the
# port is actually available.
if not is_running_from_reloader():
if port == 0 and not can_open_by_fd:
raise ValueError(
"Cannot bind to a random port with enabled "
"reloader if the Python interpreter does "
"not support socket opening by fd."
)
# Create and destroy a socket so that any exceptions are
# raised before we spawn a separate Python interpreter and
# lose this ability.
address_family = select_address_family(hostname, port)
server_address = get_sockaddr(hostname, port, address_family)
s = socket.socket(address_family, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(server_address)
if hasattr(s, "set_inheritable"):
s.set_inheritable(True)
# If we can open the socket by file descriptor, then we can just
# reuse this one and our socket will survive the restarts.
if can_open_by_fd:
os.environ["WERKZEUG_SERVER_FD"] = str(s.fileno())
s.listen(LISTEN_QUEUE)
log_startup(s)
else:
s.close()
if address_family == af_unix:
_log("info", "Unlinking %s" % server_address)
os.unlink(server_address)
# Do not use relative imports, otherwise "python -m werkzeug.serving"
# breaks.
from ._reloader import run_with_reloader
run_with_reloader(inner, extra_files, reloader_interval, reloader_type)
else:
inner()
def run_with_reloader(*args, **kwargs):
# People keep using undocumented APIs. Do not use this function
# please, we do not guarantee that it continues working.
from ._reloader import run_with_reloader
return run_with_reloader(*args, **kwargs)
def main():
"""A simple command-line interface for :py:func:`run_simple`."""
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from .utils import import_string
parser = optparse.OptionParser(usage="Usage: %prog [options] app_module:app_object")
parser.add_option(
"-b",
"--bind",
dest="address",
help="The hostname:port the app should listen on.",
)
parser.add_option(
"-d",
"--debug",
dest="use_debugger",
action="store_true",
default=False,
help="Use Werkzeug's debugger.",
)
parser.add_option(
"-r",
"--reload",
dest="use_reloader",
action="store_true",
default=False,
help="Reload Python process if modules change.",
)
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(":")
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write("No application supplied, or too much. See --help\n")
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or "127.0.0.1"),
port=int(port or 5000),
application=app,
use_reloader=options.use_reloader,
use_debugger=options.use_debugger,
)
if __name__ == "__main__":
main()
| 33.776515
| 88
| 0.598323
|
4a179c7128878fdcf2352facb1fde9371d2ccb47
| 1,194
|
py
|
Python
|
mailmsg/createmsgpage.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | null | null | null |
mailmsg/createmsgpage.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | 1
|
2020-02-05T13:00:29.000Z
|
2020-02-05T13:00:29.000Z
|
mailmsg/createmsgpage.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | null | null | null |
#-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# System imports
# Google imports
from google.appengine.ext import ndb
# Local imports
import basehandler
import custommsg
class CreateMsgPage(basehandler.BaseHandler):
def get(self):
conf_key = self.request.get("conf_key")
template_values = {
"conf_key": conf_key,
"messages": custommsg.retrieve_custom_message(ndb.Key(urlsafe=conf_key))
}
self.write_page('mailmsg/createmsgpage.html', template_values)
def get_all_checked(self):
checked = self.request.get_all("selectedmsg")
keys = []
for c in checked:
keys.append(ndb.Key(urlsafe=c))
return keys
def delete_msgs(self):
for k in self.get_all_checked():
k.delete()
def post(self):
if self.request.get("deletemsgs"):
self.delete_msgs()
self.redirect("/custommsgpage?conf_key=" + self.request.get("conf_key"))
| 27.767442
| 84
| 0.592965
|
4a179e148d964b2e4d1e704f14e3f016d64cbc9b
| 20,246
|
py
|
Python
|
test/functional/test_runner.py
|
jcooper036/garrycoin
|
b8bb83486619527dfdecfc00a94603611dc050ee
|
[
"MIT"
] | null | null | null |
test/functional/test_runner.py
|
jcooper036/garrycoin
|
b8bb83486619527dfdecfc00a94603611dc050ee
|
[
"MIT"
] | null | null | null |
test/functional/test_runner.py
|
jcooper036/garrycoin
|
b8bb83486619527dfdecfc00a94603611dc050ee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'keypool-topup.py',
'zmq_test.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'multiwallet.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'disconnect_ban.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'test_script_address2.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'mining.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
'bipdersig-p2p.py',
'bip65-cltv-p2p.py',
'uptime.py',
'resendwallettransactions.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'example_test.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'p2p-acceptblock.py',
'replace-by-fee.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/garrycoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and garrycoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "garrycoind"]) is not None:
print("%sWARNING!%s There is already a garrycoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "garrycoinD" not in os.environ:
os.environ["garrycoinD"] = build_dir + '/src/garrycoind' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage(object):
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| 38.417457
| 195
| 0.624617
|
4a179ed99fbfd08e5e8934cc58e0773d4656458b
| 1,861
|
py
|
Python
|
utils/dictSet.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
utils/dictSet.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
utils/dictSet.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
class DictContainer:
"""
A container for dictionnaries such that each dict is unique.
"""
def __init__(self):
self._tree = _DictTree(isRoot=True)
def add(self, d):
return self._tree.add(d, set([]))
class _DictTree:
def __init__(self, label=None, value=None, isRoot=False):
self._label = label
self._value = value
self._children = []
self._isFinal = False
self._isRoot = isRoot
def add(self, kwargs, readKWArgs):
newReadKWArgs = readKWArgs
if not self._isRoot: # node is not root
try:
if kwargs[self._label] != self._value:
return None # This dict can not be added into that subtree
newReadKWArgs.add(self._label)
except KeyError:
return None # This dict can not be added into that subtree
if len(kwargs) == len(newReadKWArgs):
if self._isFinal:
return False # The container already contains the dictionnary
else:
self._isFinal = True
return True # The dictionnary was successfully added
for child in self._children:
added = child.add(kwargs, newReadKWArgs)
if added is not None:
return added
self._appendRemaining(kwargs, newReadKWArgs)
return True
def _appendRemaining(self, kwargs, readKWArgs):
t = self
for key, value in kwargs.iteritems():
if key not in readKWArgs:
ch = _DictTree(label=key, isRoot=False, value=value)
t._children.append(ch)
t = ch
t._isFinal = True
if __name__ == '__main__':
dc = DictContainer()
dct = {'Y': 'abc', 'X': 1, 'Z': 12.0, 'T': True}
print dc.add(dct)
print dc.add(dct)
| 30.016129
| 79
| 0.56475
|
4a179fc9cce88d72eae9c5810c81ae343ea779a9
| 7,612
|
py
|
Python
|
template_maker/builder/views.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 9
|
2015-02-23T22:03:30.000Z
|
2020-01-31T19:06:50.000Z
|
template_maker/builder/views.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 37
|
2015-03-01T01:10:22.000Z
|
2015-12-31T17:24:42.000Z
|
template_maker/builder/views.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 2
|
2016-01-21T09:59:17.000Z
|
2021-04-16T10:51:04.000Z
|
from flask import (
Blueprint, request, make_response,
render_template, redirect, abort, url_for,
flash, current_app
)
from flask.ext.login import current_user
from template_maker.extensions import login_manager
from template_maker.users.models import User
from template_maker.builder.forms import (
TemplateBaseForm, TemplateSectionForm,
TemplateSectionTextForm
)
from template_maker.data import (
templates as tp, sections as sc,
placeholders as ph, documents as dm
)
from template_maker.builder.boilerplate import boilerplate as html_boilerplate
blueprint = Blueprint(
'builder', __name__, url_prefix='/build',
template_folder='../templates',
)
@login_manager.user_loader
def load_user(userid):
return User.get_by_id(int(userid))
# restrict blueprint to only authenticated users
@blueprint.before_request
def restrict_access():
if current_app.config.get('ENV') != 'test':
if not current_user.is_authenticated() or current_user.is_anonymous():
return redirect(url_for('users.login'))
SECTION_FORM_MAP = {
'text': TemplateSectionTextForm,
'fixed_text': TemplateSectionTextForm,
'dummy': TemplateSectionForm
}
@blueprint.route('/')
def list_templates():
'''
Returns a list of all the templates.
Because there is no interacton on this page, it uses
Flask entirely
'''
templates = tp.get_all_templates()
output = []
for template in templates:
output.append({
'id': template.id,
'title': template.title,
'description': template.description,
})
return render_template('builder/list.html', templates=output)
@blueprint.route('/new', methods=['GET', 'POST'])
def new_template():
'''
Returns the page for building a new template.
'''
form = TemplateBaseForm()
if form.validate_on_submit():
template_base_id = tp.create_new_template(request.form)
return redirect(
url_for('builder.edit_template', template_id=template_base_id)
)
return render_template('builder/new.html', form=form)
@blueprint.route('/<int:template_id>/section/new/<section_type>')
def new_section(template_id, section_type=None):
new_section = { 'type': section_type, 'title': request.args.get('section_title', '') }
if request.args.get('boilerplate', False):
new_section['html'] = html_boilerplate.get(
request.args.get('boilerplate'), 'Please insert your text here.'
)
new_section_id = sc.create_new_section(new_section, template_id)
if new_section_id:
return redirect(
url_for('builder.edit_template', template_id=template_id, section_id=new_section_id)
)
return abort(403)
@blueprint.route('/<int:template_id>/edit', methods=['GET', 'PUT', 'DELETE'])
def edit_template_metadata(template_id):
'''
Route for managing individual template objects
methods can be request-level or come from the request args
GET - TODO
PUT - TODO
DELETE - Deletes the template (and cascades to delete
template text and associated placeholders) and returns a 204
or returns a 403
'''
template_base = tp.get_single_template(template_id)
if request.args.get('method') == 'DELETE':
if tp.delete_template(template_base):
return redirect(url_for('builder.list_templates'))
return abort(403)
@blueprint.route('/<int:template_id>/')
def redirect_to_section(template_id):
return redirect(url_for('builder.edit_template', template_id=template_id))
@blueprint.route('/<int:template_id>/section/', methods=['GET', 'POST'])
@blueprint.route('/<int:template_id>/section/<int:section_id>', methods=['GET', 'POST'])
def edit_template(template_id, section_id=None, section_type=None):
'''
Route for interacting with individual sections
GET - Gets the template and renders out the editing for that particular section
POST - Updates a section
'''
template_base = tp.get_single_template(template_id)
current_section = sc.get_single_section(section_id, template_id)
if template_base is None or (current_section and current_section.template_id != template_id):
return render_template('404.html')
# handle re-ordering
old_order = template_base.section_order
if request.method == 'POST':
request_sections = request.form.getlist('id')
new_order = sc.reorder_sections(template_base, request_sections) if len(request_sections) > 0 else None
else:
new_order = None
# initialize the forms
form = SECTION_FORM_MAP[current_section.section_type]()
new_section_form = TemplateSectionForm()
placeholders = ph.get_template_placeholders(template_base.id)
# if the form is valid, go ahead and save everything
if form.validate_on_submit():
sc.update_section(current_section, placeholders, template_id, request.form)
total_documents = str(dm.update_documents(template_id))
flash('Successfully saved! ' + total_documents + ' updated', 'alert-success')
return redirect(url_for(
'builder.edit_template', template_id=template_id
))
elif request.method == 'POST':
if new_order and new_order != old_order:
flash('Successfully saved!', 'alert-success')
if section_id == 0:
return redirect(url_for('builder.edit_template', template_id=template_id))
else:
return redirect(url_for('builder.edit_template',
template_id=template_id, section_id=section_id
))
# otherwise, we are doing a get request, so get the sections and placeholders
sections = sc.get_template_sections(template_base)
response = make_response(render_template(
'builder/edit.html', template=template_base,
sections=sections, placeholders=placeholders,
form=form, new_section_form=new_section_form,
current_section=current_section
))
return response
@blueprint.route('/<int:template_id>/section/<int:section_id>/delete')
def delete_section(template_id, section_id):
template = tp.get_single_template(template_id)
if template.section_order and len(template.section_order) > 0:
sc.reorder_sections(template, template.section_order, to_delete=section_id)
sc.delete_section(section_id, template_id)
flash('Section successfully deleted!', 'alert-success')
return redirect(url_for('builder.edit_template', template_id=template_id))
@blueprint.route('/<int:template_id>/publish', methods=['GET', 'POST'])
def publish_template(template_id):
'''
Route for taking documents from the BUILDER and turning them into TEMPLATES via the GENERATOR
GET - Returns the preview for the template
POST - Data contains sections and placeholders. Publish freezes the current
version of the template into new database tables, allowing the builder documents
to be edited and create new templates later on.
'''
template_base = tp.get_single_template(template_id)
if template_base is None:
return render_template('404.html')
if request.method == 'GET':
sections = sc.get_template_sections(template_base)
return render_template('builder/preview.html', sections=sections, template=template_base, preview=True)
elif request.method == 'POST':
# set the publish flag to be true, set the section order
template = tp.publish_template(template_id)
sc.reorder_sections(template, request.form.getlist('id'))
return redirect(url_for('builder.list_templates'))
| 37.313725
| 111
| 0.709406
|
4a17a001272c1b7cc40db3bf8872ac8392c053ee
| 22,078
|
py
|
Python
|
mycroft/skills/intent_service.py
|
mcdonc/mycroft-core
|
3c76177e75c1859a4a23b639438379455033d063
|
[
"Apache-2.0"
] | null | null | null |
mycroft/skills/intent_service.py
|
mcdonc/mycroft-core
|
3c76177e75c1859a4a23b639438379455033d063
|
[
"Apache-2.0"
] | null | null | null |
mycroft/skills/intent_service.py
|
mcdonc/mycroft-core
|
3c76177e75c1859a4a23b639438379455033d063
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mycroft's intent service, providing intent parsing since forever!"""
from copy import copy
import time
from mycroft.configuration import Configuration, set_default_lf_lang
from mycroft.util.log import LOG
from mycroft.util.parse import normalize
from mycroft.metrics import report_timing, Stopwatch
from .intent_services import (
AdaptService, AdaptIntent, FallbackService, PadatiousService, IntentMatch
)
from .intent_service_interface import open_intent_envelope
def _get_message_lang(message):
"""Get the language from the message or the default language.
Args:
message: message to check for language code.
Returns:
The languge code from the message or the default language.
"""
default_lang = Configuration.get().get('lang', 'en-us')
return message.data.get('lang', default_lang).lower()
def _normalize_all_utterances(utterances):
"""Create normalized versions and pair them with the original utterance.
This will create a list of tuples with the original utterance as the
first item and if normalizing changes the utterance the normalized version
will be set as the second item in the tuple, if normalization doesn't
change anything the tuple will only have the "raw" original utterance.
Args:
utterances (list): list of utterances to normalize
Returns:
list of tuples, [(original utterance, normalized) ... ]
"""
# normalize() changes "it's a boy" to "it is a boy", etc.
norm_utterances = [normalize(u.lower(), remove_articles=False)
for u in utterances]
# Create pairs of original and normalized counterparts for each entry
# in the input list.
combined = []
for utt, norm in zip(utterances, norm_utterances):
if utt == norm:
combined.append((utt,))
else:
combined.append((utt, norm))
LOG.debug("Utterances: {}".format(combined))
return combined
class IntentService:
"""Mycroft intent service. parses utterances using a variety of systems.
The intent service also provides the internal API for registering and
querying the intent service.
"""
def __init__(self, bus):
# Dictionary for translating a skill id to a name
self.bus = bus
self.skill_names = {}
config = Configuration.get()
self.adapt_service = AdaptService(config.get('context', {}))
try:
self.padatious_service = PadatiousService(bus, config['padatious'])
except Exception as err:
LOG.exception('Failed to create padatious handlers '
'({})'.format(repr(err)))
self.fallback = FallbackService(bus)
self.bus.on('register_vocab', self.handle_register_vocab)
self.bus.on('register_intent', self.handle_register_intent)
self.bus.on('recognizer_loop:utterance', self.handle_utterance)
self.bus.on('detach_intent', self.handle_detach_intent)
self.bus.on('detach_skill', self.handle_detach_skill)
# Context related handlers
self.bus.on('add_context', self.handle_add_context)
self.bus.on('remove_context', self.handle_remove_context)
self.bus.on('clear_context', self.handle_clear_context)
# Converse method
self.bus.on('mycroft.speech.recognition.unknown', self.reset_converse)
self.bus.on('mycroft.skills.loaded', self.update_skill_name_dict)
def add_active_skill_handler(message):
self.add_active_skill(message.data['skill_id'])
self.bus.on('active_skill_request', add_active_skill_handler)
self.active_skills = [] # [skill_id , timestamp]
self.converse_timeout = 5 # minutes to prune active_skills
# Intents API
self.registered_vocab = []
self.bus.on('intent.service.intent.get', self.handle_get_intent)
self.bus.on('intent.service.skills.get', self.handle_get_skills)
self.bus.on('intent.service.active_skills.get',
self.handle_get_active_skills)
self.bus.on('intent.service.adapt.get', self.handle_get_adapt)
self.bus.on('intent.service.adapt.manifest.get',
self.handle_adapt_manifest)
self.bus.on('intent.service.adapt.vocab.manifest.get',
self.handle_vocab_manifest)
self.bus.on('intent.service.padatious.get',
self.handle_get_padatious)
self.bus.on('intent.service.padatious.manifest.get',
self.handle_padatious_manifest)
self.bus.on('intent.service.padatious.entities.manifest.get',
self.handle_entity_manifest)
@property
def registered_intents(self):
return [parser.__dict__
for parser in self.adapt_service.engine.intent_parsers]
def update_skill_name_dict(self, message):
"""Messagebus handler, updates dict of id to skill name conversions."""
self.skill_names[message.data['id']] = message.data['name']
def get_skill_name(self, skill_id):
"""Get skill name from skill ID.
Args:
skill_id: a skill id as encoded in Intent handlers.
Returns:
(str) Skill name or the skill id if the skill wasn't found
"""
return self.skill_names.get(skill_id, skill_id)
def reset_converse(self, message):
"""Let skills know there was a problem with speech recognition"""
lang = _get_message_lang(message)
set_default_lf_lang(lang)
for skill in copy(self.active_skills):
self.do_converse(None, skill[0], lang, message)
def do_converse(self, utterances, skill_id, lang, message):
"""Call skill and ask if they want to process the utterance.
Args:
utterances (list of tuples): utterances paired with normalized
versions.
skill_id: skill to query.
lang (str): current language
message (Message): message containing interaction info.
"""
converse_msg = (message.reply("skill.converse.request", {
"skill_id": skill_id, "utterances": utterances, "lang": lang}))
result = self.bus.wait_for_response(converse_msg,
'skill.converse.response')
if result and 'error' in result.data:
self.handle_converse_error(result)
ret = False
elif result is not None:
ret = result.data.get('result', False)
else:
ret = False
return ret
def handle_converse_error(self, message):
"""Handle error in converse system.
Args:
message (Message): info about the error.
"""
skill_id = message.data["skill_id"]
error_msg = message.data['error']
LOG.error("{}: {}".format(skill_id, error_msg))
if message.data["error"] == "skill id does not exist":
self.remove_active_skill(skill_id)
def remove_active_skill(self, skill_id):
"""Remove a skill from being targetable by converse.
Args:
skill_id (str): skill to remove
"""
for skill in self.active_skills:
if skill[0] == skill_id:
self.active_skills.remove(skill)
def add_active_skill(self, skill_id):
"""Add a skill or update the position of an active skill.
The skill is added to the front of the list, if it's already in the
list it's removed so there is only a single entry of it.
Args:
skill_id (str): identifier of skill to be added.
"""
# search the list for an existing entry that already contains it
# and remove that reference
if skill_id != '':
self.remove_active_skill(skill_id)
# add skill with timestamp to start of skill_list
self.active_skills.insert(0, [skill_id, time.time()])
else:
LOG.warning('Skill ID was empty, won\'t add to list of '
'active skills.')
def send_metrics(self, intent, context, stopwatch):
"""Send timing metrics to the backend.
NOTE: This only applies to those with Opt In.
Args:
intent (IntentMatch or None): intet match info
context (dict): context info about the interaction
stopwatch (StopWatch): Timing info about the skill parsing.
"""
ident = context['ident'] if 'ident' in context else None
# Determine what handled the intent
if intent and intent.intent_service == 'Converse':
intent_type = '{}:{}'.format(intent.skill_id, 'converse')
elif intent and intent.intent_service == 'Fallback':
intent_type = 'fallback'
elif intent: # Handled by an other intent parser
# Recreate skill name from skill id
parts = intent.intent_type.split(':')
intent_type = self.get_skill_name(parts[0])
if len(parts) > 1:
intent_type = ':'.join([intent_type] + parts[1:])
else: # No intent was found
intent_type = 'intent_failure'
report_timing(ident, 'intent_service', stopwatch,
{'intent_type': intent_type})
def handle_utterance(self, message):
"""Main entrypoint for handling user utterances with Mycroft skills
Monitor the messagebus for 'recognizer_loop:utterance', typically
generated by a spoken interaction but potentially also from a CLI
or other method of injecting a 'user utterance' into the system.
Utterances then work through this sequence to be handled:
1) Active skills attempt to handle using converse()
2) Padatious high match intents (conf > 0.95)
3) Adapt intent handlers
5) High Priority Fallbacks
6) Padatious near match intents (conf > 0.8)
7) General Fallbacks
8) Padatious loose match intents (conf > 0.5)
9) Catch all fallbacks including Unknown intent handler
If all these fail the complete_intent_failure message will be sent
and a generic info of the failure will be spoken.
Args:
message (Message): The messagebus data
"""
try:
lang = _get_message_lang(message)
set_default_lf_lang(lang)
utterances = message.data.get('utterances', [])
combined = _normalize_all_utterances(utterances)
stopwatch = Stopwatch()
# List of functions to use to match the utterance with intent.
# These are listed in priority order.
match_funcs = [
self._converse, self.padatious_service.match_high,
self.adapt_service.match_intent, self.fallback.high_prio,
self.padatious_service.match_medium, self.fallback.medium_prio,
self.padatious_service.match_low, self.fallback.low_prio
]
match = None
with stopwatch:
# Loop through the matching functions until a match is found.
for match_func in match_funcs:
match = match_func(combined, lang, message)
if match:
break
if match:
if match.skill_id:
self.add_active_skill(match.skill_id)
# If the service didn't report back the skill_id it
# takes on the responsibility of making the skill "active"
# Launch skill if not handled by the match function
if match.intent_type:
reply = message.reply(match.intent_type, match.intent_data)
# Add back original list of utterances for intent handlers
# match.intent_data only includes the utterance with the
# highest confidence.
reply.data["utterances"] = utterances
self.bus.emit(reply)
else:
# Nothing was able to handle the intent
# Ask politely for forgiveness for failing in this vital task
self.send_complete_intent_failure(message)
self.send_metrics(match, message.context, stopwatch)
except Exception as err:
LOG.exception(err)
def _converse(self, utterances, lang, message):
"""Give active skills a chance at the utterance
Args:
utterances (list): list of utterances
lang (string): 4 letter ISO language code
message (Message): message to use to generate reply
Returns:
IntentMatch if handled otherwise None.
"""
utterances = [item for tup in utterances for item in tup]
# check for conversation time-out
self.active_skills = [skill for skill in self.active_skills
if time.time() - skill[
1] <= self.converse_timeout * 60]
# check if any skill wants to handle utterance
for skill in copy(self.active_skills):
if self.do_converse(utterances, skill[0], lang, message):
# update timestamp, or there will be a timeout where
# intent stops conversing whether its being used or not
return IntentMatch('Converse', None, None, skill[0])
return None
def send_complete_intent_failure(self, message):
"""Send a message that no skill could handle the utterance.
Args:
message (Message): original message to forward from
"""
self.bus.emit(message.forward('complete_intent_failure'))
def handle_register_vocab(self, message):
"""Register adapt vocabulary.
Args:
message (Message): message containing vocab info
"""
start_concept = message.data.get('start')
end_concept = message.data.get('end')
regex_str = message.data.get('regex')
alias_of = message.data.get('alias_of')
self.adapt_service.register_vocab(start_concept, end_concept,
alias_of, regex_str)
self.registered_vocab.append(message.data)
def handle_register_intent(self, message):
"""Register adapt intent.
Args:
message (Message): message containing intent info
"""
intent = open_intent_envelope(message)
self.adapt_service.register_intent(intent)
def handle_detach_intent(self, message):
"""Remover adapt intent.
Args:
message (Message): message containing intent info
"""
intent_name = message.data.get('intent_name')
self.adapt_service.detach_intent(intent_name)
def handle_detach_skill(self, message):
"""Remove all intents registered for a specific skill.
Args:
message (Message): message containing intent info
"""
skill_id = message.data.get('skill_id')
self.adapt_service.detach_skill(skill_id)
def handle_add_context(self, message):
"""Add context
Args:
message: data contains the 'context' item to add
optionally can include 'word' to be injected as
an alias for the context item.
"""
entity = {'confidence': 1.0}
context = message.data.get('context')
word = message.data.get('word') or ''
origin = message.data.get('origin') or ''
# if not a string type try creating a string from it
if not isinstance(word, str):
word = str(word)
entity['data'] = [(word, context)]
entity['match'] = word
entity['key'] = word
entity['origin'] = origin
self.adapt_service.context_manager.inject_context(entity)
def handle_remove_context(self, message):
"""Remove specific context
Args:
message: data contains the 'context' item to remove
"""
context = message.data.get('context')
if context:
self.adapt_service.context_manager.remove_context(context)
def handle_clear_context(self, _):
"""Clears all keywords from context """
self.adapt_service.context_manager.clear_context()
def handle_get_intent(self, message):
"""Get intent from either adapt or padatious.
Args:
message (Message): message containing utterance
"""
utterance = message.data["utterance"]
lang = message.data.get("lang", "en-us")
combined = _normalize_all_utterances([utterance])
# List of functions to use to match the utterance with intent.
# These are listed in priority order.
# TODO once we have a mechanism for checking if a fallback will
# trigger without actually triggering it, those should be added here
match_funcs = [
self.padatious_service.match_high,
self.adapt_service.match_intent,
# self.fallback.high_prio,
self.padatious_service.match_medium,
# self.fallback.medium_prio,
self.padatious_service.match_low,
# self.fallback.low_prio
]
# Loop through the matching functions until a match is found.
for match_func in match_funcs:
match = match_func(combined, lang, message)
if match:
if match.intent_type:
intent_data = match.intent_data
intent_data["intent_name"] = match.intent_type
intent_data["intent_service"] = match.intent_service
intent_data["skill_id"] = match.skill_id
intent_data["handler"] = match_func.__name__
self.bus.emit(message.reply("intent.service.intent.reply",
{"intent": intent_data}))
return
# signal intent failure
self.bus.emit(message.reply("intent.service.intent.reply",
{"intent": None}))
def handle_get_skills(self, message):
"""Send registered skills to caller.
Argument:
message: query message to reply to.
"""
self.bus.emit(message.reply("intent.service.skills.reply",
{"skills": self.skill_names}))
def handle_get_active_skills(self, message):
"""Send active skills to caller.
Argument:
message: query message to reply to.
"""
self.bus.emit(message.reply("intent.service.active_skills.reply",
{"skills": self.active_skills}))
def handle_get_adapt(self, message):
"""handler getting the adapt response for an utterance.
Args:
message (Message): message containing utterance
"""
utterance = message.data["utterance"]
lang = message.data.get("lang", "en-us")
combined = _normalize_all_utterances([utterance])
intent = self.adapt_service.match_intent(combined, lang)
intent_data = intent.intent_data if intent else None
self.bus.emit(message.reply("intent.service.adapt.reply",
{"intent": intent_data}))
def handle_adapt_manifest(self, message):
"""Send adapt intent manifest to caller.
Argument:
message: query message to reply to.
"""
self.bus.emit(message.reply("intent.service.adapt.manifest",
{"intents": self.registered_intents}))
def handle_vocab_manifest(self, message):
"""Send adapt vocabulary manifest to caller.
Argument:
message: query message to reply to.
"""
self.bus.emit(message.reply("intent.service.adapt.vocab.manifest",
{"vocab": self.registered_vocab}))
def handle_get_padatious(self, message):
"""messagebus handler for perfoming padatious parsing.
Args:
message (Message): message triggering the method
"""
utterance = message.data["utterance"]
norm = message.data.get('norm_utt', utterance)
intent = self.padatious_service.calc_intent(utterance)
if not intent and norm != utterance:
intent = self.padatious_service.calc_intent(norm)
if intent:
intent = intent.__dict__
self.bus.emit(message.reply("intent.service.padatious.reply",
{"intent": intent}))
def handle_padatious_manifest(self, message):
"""Messagebus handler returning the registered padatious intents.
Args:
message (Message): message triggering the method
"""
self.bus.emit(message.reply(
"intent.service.padatious.manifest",
{"intents": self.padatious_service.registered_intents}))
def handle_entity_manifest(self, message):
"""Messagebus handler returning the registered padatious entities.
Args:
message (Message): message triggering the method
"""
self.bus.emit(message.reply(
"intent.service.padatious.entities.manifest",
{"entities": self.padatious_service.registered_entities}))
| 39.637343
| 79
| 0.61704
|
4a17a060b02e6ef592d47a2abae0a2f135913672
| 1,100
|
py
|
Python
|
storage/legacy/pythos/pynetwork/node_0/test.py
|
opensourceplanet/ICS
|
1946aa22ca7f02c08be8359c0ca5de1b13af2dc5
|
[
"MIT"
] | null | null | null |
storage/legacy/pythos/pynetwork/node_0/test.py
|
opensourceplanet/ICS
|
1946aa22ca7f02c08be8359c0ca5de1b13af2dc5
|
[
"MIT"
] | null | null | null |
storage/legacy/pythos/pynetwork/node_0/test.py
|
opensourceplanet/ICS
|
1946aa22ca7f02c08be8359c0ca5de1b13af2dc5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# for testing the current local build
# initial import
try:
import os, sys
import platform
from classes import User, Idea
import ipfs
import subprocess
import time
#import bloacks, bloacks, chain, client
#import generate, menu, writer, ledger
except:
print('FATALBUILDERROR')
error = sys.exc_info()
print(error)
print(sys.exc_info()[0])
raise
# os definitions
os_name = os.name
os_platform = platform.system() + platform.release()
os_id = os_name + os_platform
print(os_id)
def start():
if os.name == 'Windows':
os.system("python -i test.py")
elif os.name == 'Darwin':
os.system("py -i test.py")
elif os.name == "Linux":
os.system("py -i test.py")
# instantiation of User class
name = input('name>\t')
pin = input('pin>\t')
testuser = User(name, pin)
testuser.u_os_plt = os_name + os_platform
print(testuser.fullname)
print(testuser.u_os_plt)
subprocess.Popen([sys.executable, 'ipfsdaemon.py'], creationflags = subprocess.CREATE_NEW_CONSOLE)
time.sleep(3)
ipfs.initialize_ipfsapi()
| 22.44898
| 98
| 0.68
|
4a17a1f9f76874ee93b5b546a81961ad94a0456d
| 1,635
|
py
|
Python
|
evaluator.py
|
Zenodia/nativePytorch_NMT
|
bfced09eb6e5476d34619dfc0dd41d4ed610248f
|
[
"MIT"
] | 60
|
2018-09-28T07:53:11.000Z
|
2020-11-06T11:59:07.000Z
|
evaluator.py
|
Pravin74/transformer-pytorch
|
c31e163ed57321e405771ef7fb556d4d92fd5efb
|
[
"MIT"
] | 2
|
2021-02-15T14:08:08.000Z
|
2021-09-12T12:52:37.000Z
|
evaluator.py
|
Pravin74/transformer-pytorch
|
c31e163ed57321e405771ef7fb556d4d92fd5efb
|
[
"MIT"
] | 18
|
2018-09-28T07:56:35.000Z
|
2020-11-24T00:11:33.000Z
|
from nltk.translate.bleu_score import sentence_bleu, corpus_bleu, SmoothingFunction
from tqdm import tqdm
class Evaluator:
def __init__(self, predictor, save_filepath):
self.predictor = predictor
self.save_filepath = save_filepath
def evaluate_dataset(self, test_dataset):
tokenize = lambda x: x.split()
predictions = []
for source, target in tqdm(test_dataset):
prediction = self.predictor.predict_one(source, num_candidates=1)[0]
predictions.append(prediction)
hypotheses = [tokenize(prediction) for prediction in predictions]
list_of_references = [[tokenize(target)] for source, target in test_dataset]
smoothing_function = SmoothingFunction()
with open(self.save_filepath, 'w') as file:
for (source, target), prediction, hypothesis, references in zip(test_dataset, predictions,
hypotheses, list_of_references):
sentence_bleu_score = sentence_bleu(references, hypothesis,
smoothing_function=smoothing_function.method3)
line = "{bleu_score}\t{source}\t{target}\t|\t{prediction}".format(
bleu_score=sentence_bleu_score,
source=source,
target=target,
prediction=prediction
)
file.write(line + '\n')
bleu_score = corpus_bleu(list_of_references, hypotheses, smoothing_function=smoothing_function.method3)
return bleu_score
| 40.875
| 111
| 0.609786
|
4a17a292f686c0c3b7391a8dfb24a906b7100089
| 2,243
|
py
|
Python
|
myutils/wbutils.py
|
sony/dolp-colorconstancy
|
ebd57216db0a91bb1a5d27613cff15c2bcd9de0d
|
[
"MIT"
] | 1
|
2022-03-25T01:38:13.000Z
|
2022-03-25T01:38:13.000Z
|
myutils/wbutils.py
|
sony/dolp-colorconstancy
|
ebd57216db0a91bb1a5d27613cff15c2bcd9de0d
|
[
"MIT"
] | null | null | null |
myutils/wbutils.py
|
sony/dolp-colorconstancy
|
ebd57216db0a91bb1a5d27613cff15c2bcd9de0d
|
[
"MIT"
] | 2
|
2022-03-25T16:50:56.000Z
|
2022-03-28T07:49:17.000Z
|
"""
wbutils.py
Copyright (c) 2022 Sony Group Corporation
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
"""
import numpy as np
def polarAWB_achromatic(imean, weight):
pixels_r = np.copy(imean[..., 0])
pixels_g = np.copy(imean[..., 1])
pixels_b = np.copy(imean[..., 2])
pixels_g = np.clip(pixels_g, 1e-06, None)
illum_r = np.sum(pixels_r * weight / pixels_g) / np.sum(weight)
illum_b = np.sum(pixels_b * weight / pixels_g) / np.sum(weight)
return np.array([illum_r, 1, illum_b])
def polarAWB_chromatic(dolp, imean, weight):
weight_zero_mask = (weight > 0)
weight_masked = weight[weight_zero_mask]
dop_valid_R = dolp[..., 0][weight_zero_mask]
dop_valid_G = dolp[..., 1][weight_zero_mask]
dop_valid_B = dolp[..., 2][weight_zero_mask]
imean_valid_R = imean[..., 0][weight_zero_mask]
imean_valid_G = imean[..., 1][weight_zero_mask]
imean_valid_B = imean[..., 2][weight_zero_mask]
ys = (dop_valid_R - dop_valid_B) * imean_valid_G * weight_masked / np.sum(weight_masked)
A = np.zeros((np.cumsum(weight_zero_mask)[-1], 2), dtype=np.float32)
A[:, 0] = (dop_valid_G - dop_valid_B) * imean_valid_R * weight_masked / np.sum(weight_masked)
A[:, 1] = (dop_valid_R - dop_valid_G) * imean_valid_B * weight_masked / np.sum(weight_masked)
A_inv = np.linalg.pinv(A)
r_gain, b_gain = A_inv.dot(ys)
return np.array([1 / r_gain, 1, 1 / b_gain])
def polarAWB(dolp, imean, weight_ach, weight_ch, achromatic_ratio_default):
if np.sum(weight_ach) > 0:
illum_achromatic = polarAWB_achromatic(imean, weight_ach)
achromatic_ratio = achromatic_ratio_default
else:
illum_achromatic = np.array([1, 1, 1])
achromatic_ratio = 0
if np.sum(weight_ch) > 0:
illum_chromatic = polarAWB_chromatic(dolp, imean, weight_ch)
chromatic_ratio = 1 - achromatic_ratio
else:
illum_chromatic = np.array([1, 1, 1])
chromatic_ratio = 0
if achromatic_ratio + chromatic_ratio == 0:
print('Your image does not have available pixels.')
return np.array([1, 1, 1])
return achromatic_ratio * illum_achromatic + chromatic_ratio * illum_chromatic
| 34.507692
| 97
| 0.674543
|
4a17a369272dd73d61bcf9a1aa9faee2c7c67761
| 193
|
py
|
Python
|
src/runner/i_discord_bot_runner.py
|
konrad2508/kokomi-discord-bot
|
5a9d459e92d552fa24ba3ada5188db19d93f0aaa
|
[
"MIT"
] | 2
|
2022-03-02T15:56:41.000Z
|
2022-03-10T16:30:41.000Z
|
src/runner/i_discord_bot_runner.py
|
konrad2508/kokomi-discord-bot
|
5a9d459e92d552fa24ba3ada5188db19d93f0aaa
|
[
"MIT"
] | 2
|
2022-03-10T16:30:20.000Z
|
2022-03-16T14:33:59.000Z
|
src/runner/i_discord_bot_runner.py
|
konrad2508/kokomi-discord-bot
|
5a9d459e92d552fa24ba3ada5188db19d93f0aaa
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class IDiscordBotRunner(ABC):
'''Class responsible for running the bot.'''
@abstractmethod
def run(self) -> None:
'''Runs the bot.'''
| 19.3
| 48
| 0.647668
|
4a17a37af30ca2e4f0cea24a2a2e6d334e799fc8
| 188
|
py
|
Python
|
challs/electronic_codebad/build.py
|
gkanwar/ctf2021
|
faf17d833f72ddcfb8a54c6d50a781de5f937381
|
[
"MIT"
] | null | null | null |
challs/electronic_codebad/build.py
|
gkanwar/ctf2021
|
faf17d833f72ddcfb8a54c6d50a781de5f937381
|
[
"MIT"
] | null | null | null |
challs/electronic_codebad/build.py
|
gkanwar/ctf2021
|
faf17d833f72ddcfb8a54c6d50a781de5f937381
|
[
"MIT"
] | null | null | null |
import struct
with open('flag.bmp', 'rb') as f:
raw_bytes = f.read()
with open('flag.bin', 'wb') as f:
off = struct.unpack('<I', raw_bytes[10:14])[0]
f.write(raw_bytes[off:])
| 23.5
| 50
| 0.606383
|
4a17a383ac65a80ef23430148eca21cd0cb3ea48
| 125
|
py
|
Python
|
visits_detector/core/__init__.py
|
AlexFridman/visits-detector
|
3df0c719c60c751ad449341fe5f68383352e8f5e
|
[
"MIT"
] | null | null | null |
visits_detector/core/__init__.py
|
AlexFridman/visits-detector
|
3df0c719c60c751ad449341fe5f68383352e8f5e
|
[
"MIT"
] | null | null | null |
visits_detector/core/__init__.py
|
AlexFridman/visits-detector
|
3df0c719c60c751ad449341fe5f68383352e8f5e
|
[
"MIT"
] | null | null | null |
from extract_events_reducer import ExtractEventsReducer
from filter_and_map_to_index_mapper import FilterAndMapToIndexMapper
| 41.666667
| 68
| 0.936
|
4a17a5d702ba2181f4473d34de18257f0bc46d60
| 17,110
|
py
|
Python
|
examples/pytorch/text-classification/run_xnli.py
|
edbeeching/transformers
|
104c065277562ba276c46c329144e10c44dce286
|
[
"Apache-2.0"
] | 3
|
2022-01-15T08:06:07.000Z
|
2022-03-10T07:13:18.000Z
|
examples/pytorch/text-classification/run_xnli.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/text-classification/run_xnli.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM).
Adapted from `examples/text-classification/run_glue.py`"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.18.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
language: str = field(
default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
)
train_language: Optional[str] = field(
default=None, metadata={"help": "Train language if it is different from the evaluation language."}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
do_lower_case: Optional[bool] = field(
default=False,
metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup distant debugging if needed
if data_args.server_ip and data_args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(data_args.server_ip, data_args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
train_dataset = load_dataset(
"xnli",
model_args.language,
split="train",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
train_dataset = load_dataset(
"xnli",
model_args.train_language,
split="train",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
label_list = train_dataset.features["label"].names
if training_args.do_eval:
eval_dataset = load_dataset(
"xnli",
model_args.language,
split="validation",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
label_list = eval_dataset.features["label"].names
if training_args.do_predict:
predict_dataset = load_dataset(
"xnli",
model_args.language,
split="test",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
label_list = predict_dataset.features["label"].names
# Labels
num_labels = len(label_list)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task="xnli",
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
do_lower_case=model_args.do_lower_case,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
def preprocess_function(examples):
# Tokenize the texts
return tokenizer(
examples["premise"],
examples["hypothesis"],
padding=padding,
max_length=data_args.max_seq_length,
truncation=True,
)
if training_args.do_train:
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
if training_args.do_eval:
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Get the metric function
metric = load_metric("xnli")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.argmax(preds, axis=1)
return metric.compute(predictions=preds, references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
predictions = np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, "predictions.txt")
if trainer.is_world_process_zero():
with open(output_predict_file, "w") as writer:
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
item = label_list[item]
writer.write(f"{index}\t{item}\n")
if __name__ == "__main__":
main()
| 40.164319
| 119
| 0.677206
|
4a17a723c370c9afb22ba9a67968aba4ea399881
| 1,097
|
py
|
Python
|
vtcdata/code/make_parameter_files_facetask.py
|
mvdoc/famretino2
|
c5c7d1a757aedca81f62f4b2d0738f8fdb019e48
|
[
"Apache-2.0"
] | null | null | null |
vtcdata/code/make_parameter_files_facetask.py
|
mvdoc/famretino2
|
c5c7d1a757aedca81f62f4b2d0738f8fdb019e48
|
[
"Apache-2.0"
] | null | null | null |
vtcdata/code/make_parameter_files_facetask.py
|
mvdoc/famretino2
|
c5c7d1a757aedca81f62f4b2d0738f8fdb019e48
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
import os.path as op
from scipy.io import loadmat
rois = ['V1', 'V2', 'V3', 'hV4', 'IOG', 'pFus', 'mFus']
def filter_voxels(res, cutoff=50):
# as in Kay et al., select non-noisy voxels with at least 50% variance explained
idx = res['aggregatedtestperformance'][0] >= cutoff
return np.median(res['params'][..., idx], axis=0)
HERE = op.dirname(op.abspath(__file__))
OUTDIR = op.join(op.dirname(HERE), 'output')
params = dict()
for hemi in ['L', 'R']:
for roi in rois:
ok_voxs = []
for s in range(1, 4):
res = loadmat(op.join(OUTDIR, f'sub-{s:02d}_{hemi}{roi}_facetask.mat'))
ok_voxs.append(filter_voxels(res))
ok_voxs = np.hstack(ok_voxs)
params[f'{hemi}{roi}'] = ok_voxs
# save parameters for later use
header = ['row', 'col', 'std', 'gain', 'n']
for roi, param in params.items():
fnout = op.join(OUTDIR, f'{roi}_facetask_median_param.txt')
if not op.exists(fnout):
np.savetxt(fnout, param, header=' '.join(header))
else:
print(f'Skipping {fnout}, file exists')
| 30.472222
| 84
| 0.620784
|
4a17a8f610810fcb135cfeb49b9d7e6dc26b24a1
| 20,455
|
py
|
Python
|
paypal/express/views.py
|
evonove/django-oscar-paypal
|
f3561efb4654470e84087c2a7823d95feb8d28f1
|
[
"BSD-3-Clause"
] | null | null | null |
paypal/express/views.py
|
evonove/django-oscar-paypal
|
f3561efb4654470e84087c2a7823d95feb8d28f1
|
[
"BSD-3-Clause"
] | 2
|
2022-02-02T10:13:09.000Z
|
2022-02-02T12:07:25.000Z
|
paypal/express/views.py
|
evonove/django-oscar-paypal
|
f3561efb4654470e84087c2a7823d95feb8d28f1
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from decimal import Decimal as D
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.translation import gettext_lazy as _
from django.views.generic import RedirectView, View
from oscar.apps.payment.exceptions import UnableToTakePayment
from oscar.apps.shipping.methods import FixedPrice, NoShippingRequired
from oscar.core.exceptions import ModuleNotFoundError
from oscar.core.loading import get_class, get_model
from paypal.exceptions import PayPalError
from paypal.express.exceptions import (
EmptyBasketException, InvalidBasket, MissingShippingAddressException, MissingShippingMethodException)
from paypal.express.facade import confirm_transaction, fetch_transaction_details, get_paypal_url
from paypal.express.gateway import buyer_pays_on_paypal
# Load views dynamically
PaymentDetailsView = get_class('checkout.views', 'PaymentDetailsView')
CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin')
User = get_user_model()
ShippingAddress = get_model('order', 'ShippingAddress')
Country = get_model('address', 'Country')
Basket = get_model('basket', 'Basket')
Repository = get_class('shipping.repository', 'Repository')
Selector = get_class('partner.strategy', 'Selector')
Source = get_model('payment', 'Source')
SourceType = get_model('payment', 'SourceType')
try:
Applicator = get_class('offer.applicator', 'Applicator')
except ModuleNotFoundError:
# fallback for django-oscar<=1.1
Applicator = get_class('offer.utils', 'Applicator')
logger = logging.getLogger('paypal.express')
class RedirectView(CheckoutSessionMixin, RedirectView):
"""
Initiate the transaction with Paypal and redirect the user
to PayPal's Express Checkout to perform the transaction.
"""
permanent = False
# Setting to distinguish if the site has already collected a shipping
# address. This is False when redirecting to PayPal straight from the
# basket page but True when redirecting from checkout.
as_payment_method = False
# If True redirect directly to credit card payment
ccard = False
def get_redirect_url(self, **kwargs):
try:
basket = self.build_submission()['basket']
url = self._get_redirect_url(basket, **kwargs)
except PayPalError as ppe:
messages.error(self.request, str(ppe))
if self.as_payment_method:
url = reverse('checkout:payment-details')
else:
url = reverse('basket:summary')
return url
except InvalidBasket as e:
messages.warning(self.request, str(e))
return reverse('basket:summary')
except EmptyBasketException:
messages.error(self.request, _("Your basket is empty"))
return reverse('basket:summary')
except MissingShippingAddressException:
messages.error(
self.request, _("A shipping address must be specified"))
return reverse('checkout:shipping-address')
except MissingShippingMethodException:
messages.error(
self.request, _("A shipping method must be specified"))
return reverse('checkout:shipping-method')
else:
# Transaction successfully registered with PayPal. Now freeze the
# basket so it can't be edited while the customer is on the PayPal
# site.
basket.freeze()
logger.info("Basket #%s - redirecting to %s", basket.id, url)
return url
def _get_redirect_url(self, basket, **kwargs):
if basket.is_empty:
raise EmptyBasketException()
params = {
'basket': basket,
'shipping_methods': [] # setup a default empty list
} # to support no_shipping
user = self.request.user
if self.as_payment_method:
if basket.is_shipping_required():
# Only check for shipping details if required.
shipping_addr = self.get_shipping_address(basket)
if not shipping_addr:
raise MissingShippingAddressException()
shipping_method = self.get_shipping_method(
basket, shipping_addr)
if not shipping_method:
raise MissingShippingMethodException()
params['shipping_address'] = shipping_addr
params['shipping_method'] = shipping_method
params['shipping_methods'] = []
else:
# Maik doubts that this code ever worked. Assigning
# shipping method instances to Paypal params
# isn't going to work, is it?
shipping_methods = Repository().get_shipping_methods(
user=user, basket=basket, request=self.request)
params['shipping_methods'] = shipping_methods
if settings.DEBUG:
# Determine the localserver's hostname to use when
# in testing mode
params['host'] = self.request.META['HTTP_HOST']
if user.is_authenticated:
params['user'] = user
params['paypal_params'] = self._get_paypal_params()
params['ccard'] = self.ccard
return get_paypal_url(**params)
def _get_paypal_params(self):
"""
Return any additional PayPal parameters
"""
return {}
class CancelResponseView(RedirectView):
permanent = False
def get(self, request, *args, **kwargs):
basket = get_object_or_404(Basket, id=kwargs['basket_id'],
status=Basket.FROZEN)
basket.thaw()
logger.info("Payment cancelled (token %s) - basket #%s thawed",
request.GET.get('token', '<no token>'), basket.id)
return super(CancelResponseView, self).get(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
messages.error(self.request, _("PayPal transaction cancelled"))
return reverse('basket:summary')
# Upgrading notes: when we drop support for Oscar 0.6, this class can be
# refactored to pass variables around more explicitly (instead of assigning
# things to self so they are accessible in a later method).
class SuccessResponseView(PaymentDetailsView):
template_name_preview = 'paypal/express/preview.html'
preview = True
error_message = _("A problem occurred communicating with PayPal - please try again later")
@property
def pre_conditions(self):
return []
def get(self, request, *args, **kwargs):
"""
Fetch details about the successful transaction from PayPal.
We use these details to show a preview of the order with a 'submit' button to place it.
The preview step can be skipped with `PAYPAL_BUYER_PAYS_ON_PAYPAL=True` inside settings.
"""
try:
self.payer_id = request.GET['PayerID']
self.token = request.GET['token']
except KeyError:
# Manipulation - redirect to basket page with warning message
logger.warning("Missing GET params on success response page")
messages.error(self.request, _("Unable to determine PayPal transaction details"))
return redirect('basket:summary')
try:
self.txn = fetch_transaction_details(self.token)
except PayPalError as e:
logger.warning("Unable to fetch transaction details for token %s: %s", self.token, e)
messages.error(self.request, self.error_message)
return redirect('basket:summary')
# Reload frozen basket which is specified in the URL
kwargs['basket'] = self.load_frozen_basket(kwargs['basket_id'])
if not kwargs['basket']:
logger.warning("Unable to load frozen basket with ID %s", kwargs['basket_id'])
messages.error(self.request, _("No basket was found that corresponds to your PayPal transaction"))
return redirect('basket:summary')
if buyer_pays_on_paypal():
return self.submit(**self.build_submission(basket=kwargs['basket']))
logger.info(
"Basket #%s - showing preview with payer ID %s and token %s",
kwargs['basket'].id, self.payer_id, self.token)
return super(SuccessResponseView, self).get(request, *args, **kwargs)
def load_frozen_basket(self, basket_id):
# Lookup the frozen basket that this txn corresponds to
try:
basket = Basket.objects.get(id=basket_id, status=Basket.FROZEN)
except Basket.DoesNotExist:
return None
# Assign strategy to basket instance
if Selector:
basket.strategy = Selector().strategy(self.request)
# Find the logged user (if any)
try:
if self.request and self.request.user:
user = User.objects.get(id=self.request.user.id)
else:
user = None
except User.DoesNotExist:
user = None
# Re-apply any offers
Applicator().apply(request=self.request, basket=basket, user=user)
return basket
def get_context_data(self, **kwargs):
ctx = super(SuccessResponseView, self).get_context_data(**kwargs)
if not hasattr(self, 'payer_id'):
return ctx
# This context generation only runs when in preview mode
ctx.update({
'payer_id': self.payer_id,
'token': self.token,
'paypal_user_email': self.txn.value('EMAIL'),
'paypal_amount': D(self.txn.value('AMT')),
})
return ctx
def post(self, request, *args, **kwargs):
"""
Place an order.
We fetch the txn details again and then proceed with oscar's standard
payment details view for placing the order.
"""
if buyer_pays_on_paypal():
return HttpResponseBadRequest() # we don't expect any user here if we let users buy on PayPal
try:
self.payer_id = request.POST['payer_id']
self.token = request.POST['token']
except KeyError:
# Probably suspicious manipulation if we get here
messages.error(self.request, self.error_message)
return redirect('basket:summary')
try:
self.txn = fetch_transaction_details(self.token)
except PayPalError:
# Unable to fetch txn details from PayPal - we have to bail out
messages.error(self.request, self.error_message)
return redirect('basket:summary')
# Reload frozen basket which is specified in the URL
basket = self.load_frozen_basket(kwargs['basket_id'])
if not basket:
messages.error(self.request, self.error_message)
return redirect('basket:summary')
submission = self.build_submission(basket=basket)
return self.submit(**submission)
def build_submission(self, **kwargs):
submission = super(
SuccessResponseView, self).build_submission(**kwargs)
# Pass the user email so it can be stored with the order
submission['order_kwargs']['guest_email'] = self.txn.value('EMAIL')
# Pass PP params
submission['payment_kwargs']['payer_id'] = self.payer_id
submission['payment_kwargs']['token'] = self.token
submission['payment_kwargs']['txn'] = self.txn
return submission
def handle_payment(self, order_number, total, **kwargs):
"""
Complete payment with PayPal - this calls the 'DoExpressCheckout'
method to capture the money from the initial transaction.
"""
try:
confirm_txn = confirm_transaction(
kwargs['payer_id'], kwargs['token'], kwargs['txn'].amount,
kwargs['txn'].currency)
except PayPalError:
raise UnableToTakePayment()
if not confirm_txn.is_successful:
raise UnableToTakePayment()
# Record payment source and event
source_type, is_created = SourceType.objects.get_or_create(
name='PayPal')
source = Source(source_type=source_type,
currency=confirm_txn.currency,
amount_allocated=confirm_txn.amount,
amount_debited=confirm_txn.amount,
reference=confirm_txn.token)
self.add_payment_source(source)
self.add_payment_event('Settled', confirm_txn.amount,
reference=confirm_txn.correlation_id)
def get_shipping_address(self, basket):
"""
Return a created shipping address instance, created using
the data returned by PayPal.
"""
# Determine names - PayPal uses a single field
ship_to_name = self.txn.value('PAYMENTREQUEST_0_SHIPTONAME')
if ship_to_name is None:
return None
first_name = last_name = ''
parts = ship_to_name.split()
if len(parts) == 1:
last_name = ship_to_name
elif len(parts) > 1:
first_name = parts[0]
last_name = " ".join(parts[1:])
return ShippingAddress(
first_name=first_name,
last_name=last_name,
line1=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET'),
line2=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET2', default=""),
line4=self.txn.value('PAYMENTREQUEST_0_SHIPTOCITY', default=""),
state=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTATE', default=""),
postcode=self.txn.value('PAYMENTREQUEST_0_SHIPTOZIP', default=""),
country=Country.objects.get(iso_3166_1_a2=self.txn.value('PAYMENTREQUEST_0_SHIPTOCOUNTRYCODE')),
phone_number=self.txn.value('PAYMENTREQUEST_0_SHIPTOPHONENUM', default=""),
)
def _get_shipping_method_by_name(self, name, basket, shipping_address=None):
methods = Repository().get_shipping_methods(
basket=basket, user=self.request.user,
shipping_addr=shipping_address, request=self.request)
for method in methods:
if method.name == name:
return method
def get_shipping_method(self, basket, shipping_address=None, **kwargs):
"""
Return the shipping method used
"""
if not basket.is_shipping_required():
return NoShippingRequired()
# Instantiate a new FixedPrice shipping method instance
charge_incl_tax = D(self.txn.value('PAYMENTREQUEST_0_SHIPPINGAMT'))
# Assume no tax for now
charge_excl_tax = charge_incl_tax
name = self.txn.value('SHIPPINGOPTIONNAME')
session_method = super(SuccessResponseView, self).get_shipping_method(
basket, shipping_address, **kwargs)
if not session_method or (name and name != session_method.name):
if name:
method = self._get_shipping_method_by_name(name, basket, shipping_address)
else:
method = None
if not method:
method = FixedPrice(charge_excl_tax, charge_incl_tax)
if session_method:
method.name = session_method.name
method.code = session_method.code
else:
method = session_method
return method
class ShippingOptionsView(View):
def get(self, request, *args, **kwargs):
"""
We use the shipping address given to use by PayPal to
determine the available shipping method
"""
# Basket ID is passed within the URL path. We need to do this as some
# shipping options depend on the user and basket contents. PayPal do
# pass back details of the basket contents but it would be royal pain to
# reconstitute the basket based on those - easier to just to piggy-back
# the basket ID in the callback URL.
basket = get_object_or_404(Basket, id=kwargs['basket_id'])
user = basket.owner
if not user:
user = AnonymousUser()
# Create a shipping address instance using the data passed back
country_code = self.request.GET.get(
'SHIPTOCOUNTRY', None)
try:
country = Country.objects.get(iso_3166_1_a2=country_code)
except Country.DoesNotExist:
country = Country()
shipping_address = ShippingAddress(
line1=self.request.GET.get('SHIPTOSTREET', ''),
line2=self.request.GET.get('SHIPTOSTREET2', ''),
line4=self.request.GET.get('SHIPTOCITY', ''),
state=self.request.GET.get('SHIPTOSTATE', ''),
postcode=self.request.GET.get('SHIPTOZIP', ''),
country=country
)
methods = Repository().get_shipping_methods(
basket=basket, shipping_addr=shipping_address,
request=self.request, user=user)
return self.render_to_response(methods, basket)
def post(self, request, *args, **kwargs):
"""
We use the shipping address given to use by PayPal to
determine the available shipping method
"""
# Basket ID is passed within the URL path. We need to do this as some
# shipping options depend on the user and basket contents. PayPal do
# pass back details of the basket contents but it would be royal pain to
# reconstitute the basket based on those - easier to just to piggy-back
# the basket ID in the callback URL.
basket = get_object_or_404(Basket, id=kwargs['basket_id'])
user = basket.owner
if not user:
user = AnonymousUser()
# Create a shipping address instance using the data passed back
country_code = self.request.POST.get(
'SHIPTOCOUNTRY', None)
try:
country = Country.objects.get(iso_3166_1_a2=country_code)
except Country.DoesNotExist:
country = Country()
shipping_address = ShippingAddress(
line1=self.request.POST.get('SHIPTOSTREET', ''),
line2=self.request.POST.get('SHIPTOSTREET2', ''),
line4=self.request.POST.get('SHIPTOCITY', ''),
state=self.request.POST.get('SHIPTOSTATE', ''),
postcode=self.request.POST.get('SHIPTOZIP', ''),
country=country
)
methods = Repository().get_shipping_methods(
basket=basket, shipping_addr=shipping_address,
request=self.request, user=user)
return self.render_to_response(methods, basket)
def render_to_response(self, methods, basket):
pairs = [
('METHOD', 'CallbackResponse'),
('CALLBACKVERSION', '61.0'),
('CURRENCYCODE', self.request.POST.get('CURRENCYCODE', 'GBP')),
]
if methods:
for index, method in enumerate(methods):
charge = method.calculate(basket).incl_tax
pairs.append(('L_SHIPPINGOPTIONNAME%d' % index,
str(method.name)))
pairs.append(('L_SHIPPINGOPTIONLABEL%d' % index,
str(method.description)))
pairs.append(('L_SHIPPINGOPTIONAMOUNT%d' % index, charge))
# For now, we assume tax and insurance to be zero
pairs.append(('L_TAXAMT%d' % index, D('0.00')))
pairs.append(('L_INSURANCEAMT%d' % index, D('0.00')))
# We assume that the first returned method is the default one
pairs.append(('L_SHIPPINGOPTIONISDEFAULT%d' % index,
1 if index == 0 else 0))
else:
# No shipping methods available - we flag this up to PayPal indicating that we
# do not ship to the shipping address.
pairs.append(('NO_SHIPPING_OPTION_DETAILS', 1))
payload = urlencode(pairs)
logger.debug("Basket #%s - returning postage costs payload = '%s'", basket.id, payload)
return HttpResponse(payload)
| 40.991984
| 110
| 0.629919
|
4a17a970c13474241c609c4f473e7d5b5a9948de
| 12,355
|
py
|
Python
|
src/sentry/plugins/bases/issue.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/plugins/bases/issue.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/plugins/bases/issue.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.plugins.bases.issue
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django import forms
from django.conf import settings
from django.utils.html import format_html
from social_auth.models import UserSocialAuth
from sentry.models import (
Activity,
Event,
GroupMeta,
)
from sentry.plugins import Plugin
from sentry.signals import issue_tracker_used
from sentry.utils.auth import get_auth_providers
from sentry.utils.http import absolute_uri
from sentry.utils.safe import safe_execute
class NewIssueForm(forms.Form):
title = forms.CharField(max_length=200, widget=forms.TextInput(attrs={'class': 'span9'}))
description = forms.CharField(widget=forms.Textarea(attrs={'class': 'span9'}))
class IssueTrackingPlugin(Plugin):
# project_conf_form = BaseIssueOptionsForm
new_issue_form = NewIssueForm
link_issue_form = None
create_issue_template = 'sentry/plugins/bases/issue/create_issue.html'
not_configured_template = 'sentry/plugins/bases/issue/not_configured.html'
needs_auth_template = 'sentry/plugins/bases/issue/needs_auth.html'
auth_provider = None
can_unlink_issues = False
can_link_existing_issues = False
def get_plugin_type(self):
return 'issue-tracking'
def _get_group_body(self, request, group, event, **kwargs):
result = []
for interface in six.itervalues(event.interfaces):
output = safe_execute(interface.to_string, event, _with_transaction=False)
if output:
result.append(output)
return '\n\n'.join(result)
def _get_group_description(self, request, group, event):
output = [
absolute_uri(group.get_absolute_url()),
]
body = self._get_group_body(request, group, event)
if body:
output.extend([
'',
'```',
body,
'```',
])
return '\n'.join(output)
def _get_group_title(self, request, group, event):
return event.error()
def is_configured(self, request, project, **kwargs):
raise NotImplementedError
def get_auth_for_user(self, user, **kwargs):
"""
Return a ``UserSocialAuth`` object for the given user based on this plugins ``auth_provider``.
"""
assert self.auth_provider, 'There is no auth provider configured for this plugin.'
if not user.is_authenticated():
return None
try:
return UserSocialAuth.objects.filter(user=user, provider=self.auth_provider)[0]
except IndexError:
return None
def needs_auth(self, request, project, **kwargs):
"""
Return ``True`` if the authenticated user needs to associate an auth service before
performing actions with this plugin.
"""
if self.auth_provider is None:
return False
if not request.user.is_authenticated():
return True
return bool(
not UserSocialAuth.objects.filter(user=request.user, provider=self.auth_provider
).exists()
)
def get_new_issue_title(self, **kwargs):
"""
Return a string for the "Create new issue" action label.
"""
return 'Create %s Issue' % self.get_title()
def get_unlink_issue_title(self, **kwargs):
"""
Return a string for the "Unlink plugin issue" action label.
"""
return 'Unlink %s Issue' % self.get_title()
def get_new_issue_form(self, request, group, event, **kwargs):
"""
Return a Form for the "Create new issue" page.
"""
return self.new_issue_form(
request.POST or None, initial=self.get_initial_form_data(request, group, event)
)
def get_new_issue_read_only_fields(self, *args, **kwargs):
"""
Return a list of additional read only fields that are helpful to
know when filing the issue.
"""
return []
def get_link_existing_issue_form(self, request, group, event, **kwargs):
if not self.link_issue_form:
return None
return self.link_issue_form(
request.POST or None, initial=self.get_initial_link_form_data(request, group, event)
)
def get_issue_url(self, group, issue_id, **kwargs):
"""
Given an issue_id (string) return an absolute URL to the issue's details
page.
"""
raise NotImplementedError
def get_issue_title_by_id(self, request, group, issue_id):
"""
Given an issue_id return the issue's title.
"""
raise NotImplementedError
def get_issue_label(self, group, issue_id, **kwargs):
"""
Given an issue_id (string) return a string representing the issue.
e.g. GitHub represents issues as GH-XXX
"""
return '#%s' % issue_id
def create_issue(self, request, group, form_data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
"""
raise NotImplementedError
def link_issue(self, request, group, form_data, **kwargs):
"""
Can be overridden for any actions needed when linking issues
(like adding a comment to an existing issue).
"""
pass
def get_initial_form_data(self, request, group, event, **kwargs):
return {
'description': self._get_group_description(request, group, event),
'title': self._get_group_title(request, group, event),
}
def get_initial_link_form_data(self, request, group, event, **kwargs):
return {}
def has_auth_configured(self, **kwargs):
if not self.auth_provider:
return True
return self.auth_provider in get_auth_providers()
def handle_unlink_issue(self, request, group, **kwargs):
GroupMeta.objects.unset_value(group, '%s:tid' % self.get_conf_key())
return self.redirect(group.get_absolute_url())
def view(self, request, group, **kwargs):
has_auth_configured = self.has_auth_configured()
if not (has_auth_configured and self.is_configured(
project=group.project, request=request)):
if self.auth_provider:
required_auth_settings = settings.AUTH_PROVIDERS[self.auth_provider]
else:
required_auth_settings = None
return self.render(
self.not_configured_template, {
'title': self.get_title(),
'project': group.project,
'has_auth_configured': has_auth_configured,
'required_auth_settings': required_auth_settings,
}
)
if self.needs_auth(project=group.project, request=request):
return self.render(
self.needs_auth_template, {
'title': self.get_title(),
'project': group.project,
}
)
if GroupMeta.objects.get_value(group, '%s:tid' % self.get_conf_key(), None):
if self.can_unlink_issues and request.GET.get('unlink'):
return self.handle_unlink_issue(request, group, **kwargs)
return None
prefix = self.get_conf_key()
event = group.get_latest_event()
Event.objects.bind_nodes([event], 'data')
op = request.POST.get('op', 'create')
create_form = self.get_new_issue_form(request, group, event)
link_form = None
if self.can_link_existing_issues:
link_form = self.get_link_existing_issue_form(request, group, event)
if op == 'create':
if create_form.is_valid():
try:
issue_id = self.create_issue(
group=group,
form_data=create_form.cleaned_data,
request=request,
)
except forms.ValidationError as e:
create_form.errors['__all__'] = [u'Error creating issue: %s' % e]
if create_form.is_valid():
GroupMeta.objects.set_value(group, '%s:tid' % prefix, issue_id)
issue_information = {
'title': create_form.cleaned_data['title'],
'provider': self.get_title(),
'location': self.get_issue_url(group, issue_id),
'label': self.get_issue_label(group=group, issue_id=issue_id),
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
issue_tracker_used.send_robust(
plugin=self,
project=group.project,
user=request.user,
sender=IssueTrackingPlugin
)
return self.redirect(group.get_absolute_url())
elif op == 'link':
if link_form.is_valid():
try:
self.link_issue(
group=group,
form_data=link_form.cleaned_data,
request=request,
)
except forms.ValidationError as e:
link_form.errors['__all__'] = [u'Error creating issue: %s' % e]
if link_form.is_valid():
issue_id = int(link_form.cleaned_data['issue_id'])
GroupMeta.objects.set_value(group, '%s:tid' % prefix, issue_id)
issue_information = {
'title': self.get_issue_title_by_id(request, group, issue_id),
'provider': self.get_title(),
'location': self.get_issue_url(group, issue_id),
'label': self.get_issue_label(group=group, issue_id=issue_id),
}
Activity.objects.create(
project=group.project,
group=group,
type=Activity.CREATE_ISSUE,
user=request.user,
data=issue_information,
)
return self.redirect(group.get_absolute_url())
context = {
'create_form': create_form,
# pass in 'form' for legacy compat
'form': create_form,
'title': self.get_new_issue_title(),
'read_only_fields': self.get_new_issue_read_only_fields(group=group),
'can_link_existing_issues': self.can_link_existing_issues,
'link_form': link_form,
'op': op
}
return self.render(self.create_issue_template, context)
def actions(self, request, group, action_list, **kwargs):
if not self.is_configured(request=request, project=group.project):
return action_list
prefix = self.get_conf_key()
if not GroupMeta.objects.get_value(group, '%s:tid' % prefix, None):
action_list.append((self.get_new_issue_title(), self.get_url(group)))
elif self.can_unlink_issues:
action_list.append(
(self.get_unlink_issue_title(), '%s?unlink=1' % self.get_url(group).rstrip('/'))
)
return action_list
def tags(self, request, group, tag_list, **kwargs):
if not self.is_configured(request=request, project=group.project):
return tag_list
prefix = self.get_conf_key()
issue_id = GroupMeta.objects.get_value(group, '%s:tid' % prefix)
if not issue_id:
return tag_list
tag_list.append(
format_html(
'<a href="{}" rel="noreferrer">{}</a>',
self.get_issue_url(group=group, issue_id=issue_id),
self.get_issue_label(group=group, issue_id=issue_id),
)
)
return tag_list
def get_issue_doc_html(self, **kwargs):
return ""
IssuePlugin = IssueTrackingPlugin
| 35.19943
| 102
| 0.584945
|
4a17a9d8adb3e5be2f0cd4d147a9430a0f7e93bb
| 8,813
|
py
|
Python
|
forms_builder/forms/admin.py
|
barsch/django-forms-builder
|
eb634dade42933dadc045818d5840719fef2d913
|
[
"BSD-2-Clause"
] | null | null | null |
forms_builder/forms/admin.py
|
barsch/django-forms-builder
|
eb634dade42933dadc045818d5840719fef2d913
|
[
"BSD-2-Clause"
] | null | null | null |
forms_builder/forms/admin.py
|
barsch/django-forms-builder
|
eb634dade42933dadc045818d5840719fef2d913
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
from future.builtins import bytes, open
from csv import writer
from mimetypes import guess_type
from os.path import join
from datetime import datetime
from io import BytesIO, StringIO
from django.contrib import admin
from django.core.files.storage import FileSystemStorage
try:
from django.urls import reverse, re_path
except ImportError:
# For django 1.8 compatiblity
from django.conf.urls import url as re_path
from django.core.urlresolvers import reverse
from django.db.models import Count
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ngettext, gettext_lazy as _
from forms_builder.forms.forms import EntriesForm
from forms_builder.forms.models import Form, Field, FormEntry, FieldEntry
from forms_builder.forms.settings import CSV_DELIMITER, UPLOAD_ROOT
from forms_builder.forms.settings import USE_SITES, EDITABLE_SLUGS
from forms_builder.forms.utils import now, slugify
try:
import xlwt
XLWT_INSTALLED = True
XLWT_DATETIME_STYLE = xlwt.easyxf(num_format_str='MM/DD/YYYY HH:MM:SS')
except ImportError:
XLWT_INSTALLED = False
fs = FileSystemStorage(location=UPLOAD_ROOT)
form_admin_filter_horizontal = ()
form_admin_fieldsets = [
(None, {"fields": ("title", ("status", "login_required",),
("publish_date", "expiry_date",),
"intro", "button_text", "response", "redirect_url")}),
(_("Email"), {"fields": ("send_email", "email_from", "email_copies",
"email_subject", "email_message")}),]
if EDITABLE_SLUGS:
form_admin_fieldsets.append(
(_("Slug"), {"fields": ("slug",), "classes": ("collapse",)}))
if USE_SITES:
form_admin_fieldsets.append((_("Sites"), {"fields": ("sites",),
"classes": ("collapse",)}))
form_admin_filter_horizontal = ("sites",)
class FieldAdmin(admin.TabularInline):
model = Field
exclude = ('slug', )
class FormAdmin(admin.ModelAdmin):
formentry_model = FormEntry
fieldentry_model = FieldEntry
inlines = (FieldAdmin,)
list_display = ("title", "status", "email_copies", "publish_date",
"expiry_date", "total_entries", "admin_links")
list_display_links = ("title",)
list_editable = ("status", "email_copies", "publish_date", "expiry_date")
list_filter = ("status",)
filter_horizontal = form_admin_filter_horizontal
search_fields = ("title", "intro", "response", "email_from",
"email_copies")
radio_fields = {"status": admin.HORIZONTAL}
fieldsets = form_admin_fieldsets
def get_queryset(self, request):
"""
Annotate the queryset with the entries count for use in the
admin list view.
"""
qs = super(FormAdmin, self).get_queryset(request)
return qs.annotate(total_entries=Count("entries"))
def get_urls(self):
"""
Add the entries view to urls.
"""
urls = super(FormAdmin, self).get_urls()
extra_urls = [
re_path(r"^(?P<form_id>\d+)/entries/$",
self.admin_site.admin_view(self.entries_view),
name="form_entries"),
re_path(r"^(?P<form_id>\d+)/entries/show/$",
self.admin_site.admin_view(self.entries_view),
{"show": True}, name="form_entries_show"),
re_path(r"^(?P<form_id>\d+)/entries/export/$",
self.admin_site.admin_view(self.entries_view),
{"export": True}, name="form_entries_export"),
re_path(r"^file/(?P<field_entry_id>\d+)/$",
self.admin_site.admin_view(self.file_view),
name="form_file"),
]
return extra_urls + urls
def entries_view(self, request, form_id, show=False, export=False,
export_xls=False):
"""
Displays the form entries in a HTML table with option to
export as CSV file.
"""
if request.POST.get("back"):
bits = (self.model._meta.app_label, self.model.__name__.lower())
change_url = reverse("admin:%s_%s_change" % bits, args=(form_id,))
return HttpResponseRedirect(change_url)
form = get_object_or_404(self.model, id=form_id)
post = request.POST or None
args = form, request, self.formentry_model, self.fieldentry_model, post
entries_form = EntriesForm(*args)
delete = "%s.delete_formentry" % self.formentry_model._meta.app_label
can_delete_entries = request.user.has_perm(delete)
submitted = entries_form.is_valid() or show or export or export_xls
export = export or request.POST.get("export")
export_xls = export_xls or request.POST.get("export_xls")
if submitted:
if export:
response = HttpResponse(content_type="text/csv")
fname = "%s-%s.csv" % (form.slug, slugify(now().ctime()))
attachment = "attachment; filename=%s" % fname
response["Content-Disposition"] = attachment
queue = StringIO()
try:
csv = writer(queue, delimiter=CSV_DELIMITER)
writerow = csv.writerow
except TypeError:
queue = BytesIO()
delimiter = bytes(CSV_DELIMITER, encoding="utf-8")
csv = writer(queue, delimiter=delimiter)
writerow = lambda row: csv.writerow([c.encode("utf-8")
if hasattr(c, "encode") else c for c in row])
writerow(entries_form.columns())
for row in entries_form.rows(csv=True):
writerow(row)
data = queue.getvalue()
response.write(data)
return response
elif XLWT_INSTALLED and export_xls:
response = HttpResponse(content_type="application/vnd.ms-excel")
fname = "%s-%s.xls" % (form.slug, slugify(now().ctime()))
attachment = "attachment; filename=%s" % fname
response["Content-Disposition"] = attachment
queue = BytesIO()
workbook = xlwt.Workbook(encoding='utf8')
sheet = workbook.add_sheet(form.title[:31])
for c, col in enumerate(entries_form.columns()):
sheet.write(0, c, col)
for r, row in enumerate(entries_form.rows(csv=True)):
for c, item in enumerate(row):
if isinstance(item, datetime):
item = item.replace(tzinfo=None)
sheet.write(r + 2, c, item, XLWT_DATETIME_STYLE)
else:
sheet.write(r + 2, c, item)
workbook.save(queue)
data = queue.getvalue()
response.write(data)
return response
elif request.POST.get("delete") and can_delete_entries:
selected = request.POST.getlist("selected")
if selected:
try:
from django.contrib.messages import info
except ImportError:
def info(request, message, fail_silently=True):
request.user.message_set.create(message=message)
entries = self.formentry_model.objects.filter(id__in=selected)
count = entries.count()
if count > 0:
entries.delete()
message = ngettext("1 entry deleted",
"%(count)s entries deleted", count)
info(request, message % {"count": count})
template = "admin/forms/entries.html"
context = {"title": _("View Entries"), "entries_form": entries_form,
"opts": self.model._meta, "original": form,
"can_delete_entries": can_delete_entries,
"submitted": submitted,
"xlwt_installed": XLWT_INSTALLED}
return render(request, template, context)
def file_view(self, request, field_entry_id):
"""
Output the file for the requested field entry.
"""
model = self.fieldentry_model
field_entry = get_object_or_404(model, id=field_entry_id)
path = join(fs.location, field_entry.value)
response = HttpResponse(content_type=guess_type(path)[0])
f = open(path, "r+b")
response["Content-Disposition"] = "attachment; filename=%s" % f.name
response.write(f.read())
f.close()
return response
admin.site.register(Form, FormAdmin)
| 42.574879
| 82
| 0.593782
|
4a17a9e0dd403f2c98c53462b5a1efff3ca7287f
| 982
|
py
|
Python
|
tests/clvm/test_serialized_program.py
|
13767849/chia-blockchain
|
ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66
|
[
"Apache-2.0"
] | 1
|
2021-05-28T01:38:23.000Z
|
2021-05-28T01:38:23.000Z
|
tests/clvm/test_serialized_program.py
|
13767849/chia-blockchain
|
ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66
|
[
"Apache-2.0"
] | null | null | null |
tests/clvm/test_serialized_program.py
|
13767849/chia-blockchain
|
ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from src.types.blockchain_format.program import Program, SerializedProgram
from src.wallet.puzzles.load_clvm import load_clvm
SHA256TREE_MOD = load_clvm("sha256tree_module.clvm")
# TODO: test multiple args
class TestSerializedProgram(TestCase):
def test_tree_hash(self):
p = SHA256TREE_MOD
s = SerializedProgram.from_bytes(bytes(SHA256TREE_MOD))
self.assertEqual(s.get_tree_hash(), p.get_tree_hash())
def test_program_execution(self):
p_result = SHA256TREE_MOD.run(SHA256TREE_MOD)
sp = SerializedProgram.from_bytes(bytes(SHA256TREE_MOD))
cost, sp_result = sp.run_with_cost(sp)
self.assertEqual(p_result, sp_result)
def test_serialization(self):
s0 = SerializedProgram.from_bytes(b"\x00")
p0 = Program.from_bytes(b"\x00")
print(s0, p0)
# TODO: enable when clvm updated for minimal encoding of zero
# self.assertEqual(bytes(p0), bytes(s0))
| 35.071429
| 74
| 0.718941
|
4a17aa01bfcd38111b4677980d4a9c504ccc966a
| 29,988
|
py
|
Python
|
src/transformers/modeling_tf_openai.py
|
josecannete/transformers
|
c76c3cebed3c707178d9f721349c5abd5206a57f
|
[
"Apache-2.0"
] | 75
|
2020-10-07T04:55:48.000Z
|
2022-03-31T09:06:18.000Z
|
src/transformers/modeling_tf_openai.py
|
rpowalski/transformers
|
dfe012ad9d6b6f0c9d30bc508b9f1e4c42280c07
|
[
"Apache-2.0"
] | 14
|
2020-10-26T11:44:55.000Z
|
2022-03-25T07:36:48.000Z
|
src/transformers/modeling_tf_openai.py
|
rpowalski/transformers
|
dfe012ad9d6b6f0c9d30bc508b9f1e4c42280c07
|
[
"Apache-2.0"
] | 19
|
2020-10-10T23:08:24.000Z
|
2022-01-14T09:44:17.000Z
|
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT model."""
import logging
import numpy as np
import tensorflow as tf
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings
from .modeling_tf_utils import (
TFConv1D,
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
shape_list,
)
logger = logging.getLogger(__name__)
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-tf_model.h5"
}
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.math.sigmoid(x)
ACT_FNS = {
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
}
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super(TFAttention, self).__init__(**kwargs)
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:, None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask, head_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super(TFMLP, self).__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super(TFBlock, self).__init__(**kwargs)
nx = config.n_embd
self.attn = TFAttention(nx, n_ctx, config, scale, name="attn")
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
self.mlp = TFMLP(4 * nx, config, name="mlp")
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
output_attn = self.attn([x, attention_mask, head_mask], training=training)
a = output_attn[0] # output_attn: a, (attentions)
n = self.ln_1(x + a)
m = self.mlp(n, training=training)
h = self.ln_2(n + m)
outputs = [h] + output_attn[1:]
return outputs # x, (attentions)
class TFOpenAIGPTMainLayer(tf.keras.layers.Layer):
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTMainLayer, self).__init__(config, *inputs, **kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.tokens_embed = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed"
)
self.positions_embed = tf.keras.layers.Embedding(
config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name="positions_embed",
)
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx, config, scale=True, name="h_._{}".format(i)) for i in range(config.n_layer)]
def get_input_embeddings(self):
return self.tokens_embed
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
position_ids = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids, mode="embedding")
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.tokens_embed(token_type_ids, mode="embedding")
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
all_attentions = []
all_hidden_states = ()
for i, block in enumerate(self.h):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, attention_mask, head_mask[i]], training=training)
hidden_states = outputs[0]
if self.output_attentions:
all_attentions.append(outputs[1])
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden_states), (attentions)
class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
pretrained_model_archive_map = TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
OPENAI_GPT_START_DOCSTRING = r""" OpenAI GPT model was proposed in
`Improving Language Understanding by Generative Pre-Training`_
by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
It's a causal (unidirectional) transformer pre-trained using language modeling on a large
corpus will long range dependencies, the Toronto Book Corpus.
This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. _`Improving Language Understanding by Generative Pre-Training`:
https://openai.com/blog/language-unsupervised/
.. _`tf.keras.Model`:
https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
Note on the model inputs:
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :
- a single Tensor with input_ids only and nothing else: `model(inputs_ids)
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associaed to the input names given in the docstring:
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.BPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices)
**position_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputing raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
OPENAI_GPT_INPUTS_DOCSTRING,
)
class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
def call(self, inputs, **kwargs):
outputs = self.transformer(inputs, **kwargs)
return outputs
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
OPENAI_GPT_START_DOCSTRING,
OPENAI_GPT_INPUTS_DOCSTRING,
)
class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTLMHeadModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTLMHeadModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
def get_output_embeddings(self):
return self.transformer.tokens_embed
def call(self, inputs, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, (all hidden_states), (attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
OPENAI_GPT_INPUTS_DOCSTRING,
)
class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
# Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0
raise NotImplementedError
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = tf.constant([tokenizer.encode(s) for s in choices])[None, :] # Batch size 1, 2 choices
mc_token_ids = tf.constant([input_ids.size(-1), input_ids.size(-1)])[None, :] # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTDoubleHeadsModel, self).__init__(config, *inputs, **kwargs)
config.num_labels = 1
self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
self.multiple_choice_head = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="multiple_choice_head"
)
def get_output_embeddings(self):
return self.transformer.tokens_embed
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids
assert len(inputs) <= 7, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
mc_token_ids = inputs.get("mc_token_ids", mc_token_ids)
assert len(inputs) <= 7, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None:
input_shapes = shape_list(input_ids)
else:
input_shapes = shape_list(inputs_embeds)[:-1]
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_inputs = [
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
head_mask,
inputs_embeds,
]
transformer_outputs = self.transformer(flat_inputs, training=training)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training)
mc_logits = tf.squeeze(mc_logits, axis=-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
return outputs # lm logits, mc logits, (all hidden_states), (attentions)
| 46.71028
| 193
| 0.668667
|
4a17aa8afee64e06dcd3b3e9237e5d029c468b78
| 683
|
py
|
Python
|
qmacroThinkingAloud.py
|
yasinnaal/Code-Python
|
81828dbb959368a62a901dbc38f966ed5f3f8783
|
[
"CC0-1.0"
] | 1
|
2021-03-30T10:47:09.000Z
|
2021-03-30T10:47:09.000Z
|
qmacroThinkingAloud.py
|
yasinnaal/Python-YN
|
81828dbb959368a62a901dbc38f966ed5f3f8783
|
[
"CC0-1.0"
] | null | null | null |
qmacroThinkingAloud.py
|
yasinnaal/Python-YN
|
81828dbb959368a62a901dbc38f966ed5f3f8783
|
[
"CC0-1.0"
] | null | null | null |
#qmacro Thinking Aloud
categories = ["Major", "Minor", "Mini", "Micro"]
blog = ["qmacro.org SAP Community", "autodidactics", "(something missing)", "Twitter"]
blog_e_length = len(blog)
blog_c_length = sum(len(i) for i in blog)
hdash = "-"* (blog_e_length + blog_c_length)
def DrawBoard(rows,cols):
print(hdash)
for r1 in range(0,cols):
line1length = len(blog[r1]) - len (categories[r1])
print(categories[r1] + " " * line1length + "|", end = '')
print()
for r2 in range(0,cols):
print(blog[r2] + "" + "|", end = '')
print()
print(hdash)
DrawBoard(2, len(categories))
| 35.947368
| 87
| 0.551977
|
4a17aba439f324aa522353bbd6ebe83bc014f15b
| 334
|
py
|
Python
|
Scripts/003_hackerrank/Python/p095.py
|
OrangePeelFX/Python-Tutorial
|
0d47f194553666304765f5bbc928374b7aec8a48
|
[
"MIT"
] | null | null | null |
Scripts/003_hackerrank/Python/p095.py
|
OrangePeelFX/Python-Tutorial
|
0d47f194553666304765f5bbc928374b7aec8a48
|
[
"MIT"
] | 1
|
2021-06-02T00:28:17.000Z
|
2021-06-02T00:28:17.000Z
|
Scripts/003_hackerrank/Python/p095.py
|
florianwns/python-scripts
|
0d47f194553666304765f5bbc928374b7aec8a48
|
[
"MIT"
] | 1
|
2020-01-13T11:08:18.000Z
|
2020-01-13T11:08:18.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Problem 095
String Split and Join
Source : https://www.hackerrank.com/challenges/python-string-split-and-join/problem
"""
def split_and_join(line):
return "-".join(line.split())
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
| 19.647059
| 83
| 0.670659
|
4a17ac6ae62b6f8483f7dadac701423c45644934
| 1,065
|
py
|
Python
|
dev/merger/__init__.py
|
ekpo-d/merger
|
a07c595e97b91defbae82bce502286bf51281dbf
|
[
"MIT"
] | null | null | null |
dev/merger/__init__.py
|
ekpo-d/merger
|
a07c595e97b91defbae82bce502286bf51281dbf
|
[
"MIT"
] | 4
|
2020-03-24T15:35:06.000Z
|
2021-02-02T21:42:15.000Z
|
dev/merger/__init__.py
|
ekpo-d/merger
|
a07c595e97b91defbae82bce502286bf51281dbf
|
[
"MIT"
] | null | null | null |
# merger/__init__.py
from flask import Flask, jsonify, make_response
from flask.ext.bcrypt import Bcrypt
from flask.ext.sqlalchemy import SQLAlchemy
from merger.config import BaseConfig
# config
app = Flask(__name__)
app.config.from_object(BaseConfig)
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.errorhandler(400)
def bad_req(eror):
return make_response(jsonify({"error": "Bad Request"}), 400)
@app.errorhandler(405)
def bad_req(eror):
return make_response(jsonify({"error": "Method Not Allowed"}), 405)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({"error" : "Resource not found"}), 404)
@app.errorhandler(500)
def internal_error(error):
return make_response(jsonify({"error" : "Internal Error"}), 500)
from merger.auth import auth as auth_blueprint
from merger.core import core as core_blueprint
app.register_blueprint(auth_blueprint, url_prefix="/api/auth")
app.register_blueprint(core_blueprint, url_prefix="/api/v1.0")
| 24.204545
| 72
| 0.752113
|
4a17acc8f0dca210533216b90eab2663df37257a
| 893
|
py
|
Python
|
Tracker/event.py
|
nordwind80/BT-Tracker
|
558c15b399871c1ca11d0c4ae1eb598e3060931e
|
[
"MIT"
] | 1
|
2019-05-05T06:46:27.000Z
|
2019-05-05T06:46:27.000Z
|
Tracker/event.py
|
nordwind80/BT-Tracker
|
558c15b399871c1ca11d0c4ae1eb598e3060931e
|
[
"MIT"
] | null | null | null |
Tracker/event.py
|
nordwind80/BT-Tracker
|
558c15b399871c1ca11d0c4ae1eb598e3060931e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Author: eaglewings
# E-Mail: ZWFnbGV3aW5ncy55aUBnbWFpbC5jb20=
# Created Time: 2019-04-14 20:14
# Last Modified:
# Description:
# - Project: BT Trackers Updater
# - File Name: event.py
# - singleton instance of Event
import time
from functools import wraps
class Event(object):
def __init__(self):
self._finish = False
@classmethod
def check(cls, func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
cls.finished = True
time.sleep(0.1)
cls.finished = False
return result
return wrapper
@property
def finished(self):
return self._finish
@finished.setter
def finished(self, new_status: bool):
if new_status is True:
self._finish = new_status
status = Event()
| 20.295455
| 42
| 0.596865
|
4a17ad1792b9a31e2b9b1dc82893365e38804c27
| 1,466
|
py
|
Python
|
StardewSpeak/lib/speech-client/speech-client/characters.py
|
evfredericksen/StardewBot
|
f5d80cbeb95840aa4366d9104eb766935491a8e3
|
[
"MIT"
] | 10
|
2021-06-02T18:30:40.000Z
|
2022-01-15T18:11:05.000Z
|
StardewSpeak/lib/speech-client/speech-client/characters.py
|
evfredericksen/StardewBot
|
f5d80cbeb95840aa4366d9104eb766935491a8e3
|
[
"MIT"
] | 1
|
2021-11-12T16:16:23.000Z
|
2021-11-18T18:44:07.000Z
|
StardewSpeak/lib/speech-client/speech-client/characters.py
|
evfredericksen/StardewBot
|
f5d80cbeb95840aa4366d9104eb766935491a8e3
|
[
"MIT"
] | null | null | null |
import constants
npcs = {
'abigail': constants.ABIGAIL,
'alex': constants.ALEX,
'birdie': constants.BIRDIE,
'[the] bouncer': constants.BOUNCER,
'caroline': constants.CAROLINE,
'clint': constants.CLINT,
'demetrius': constants.DEMETRIUS,
'[the] dwarf': constants.DWARF,
'elliott': constants.ELLIOTT,
'emily': constants.EMILY,
'evelyn': constants.EVELYN,
'george': constants.GEORGE,
'gil': constants.GIL,
'[the] governor': constants.GOVERNOR,
'grandpa': constants.GRANDPA,
'gunther': constants.GUNTHER,
'gus': constants.GUS,
'haley': constants.HALEY,
'harvey': constants.HARVEY,
'jas': constants.JAS,
'jodi': constants.JODI,
'kent': constants.KENT,
'krobus': constants.KROBUS,
'leah': constants.LEAH,
'leo': constants.LEO,
'[mayor] lewis': constants.LEWIS,
'linus': constants.LINUS,
'marlon': constants.MARLON,
'marnie': constants.MARNIE,
'(muh roo | mar oo)': constants.MARU,
'morris': constants.MORRIS,
'mister (kwee | key)': constants.MR_QI,
'pam': constants.PAM,
'penny': constants.PENNY,
'pierre': constants.PIERRE,
'professor snail': constants.PROFESSOR_SNAIL,
'robin': constants.ROBIN,
'sam': constants.SAM,
'sandy': constants.SANDY,
'sebastian': constants.SEBASTIAN,
'shane': constants.SHANE,
'vincent': constants.VINCENT,
'willy': constants.WILLY,
'[the] wizard': constants.WIZARD,
}
| 30.541667
| 49
| 0.643929
|
4a17ad2fc97b21843cbf79aff137d7169b6027ec
| 36,511
|
py
|
Python
|
sabnzbd/nzbqueue.py
|
wfriesen/sabnzbd
|
c2ba998e7b47e28d1c904976293eba518b073d2e
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | null | null | null |
sabnzbd/nzbqueue.py
|
wfriesen/sabnzbd
|
c2ba998e7b47e28d1c904976293eba518b073d2e
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | null | null | null |
sabnzbd/nzbqueue.py
|
wfriesen/sabnzbd
|
c2ba998e7b47e28d1c904976293eba518b073d2e
|
[
"MIT",
"PSF-2.0",
"0BSD"
] | null | null | null |
#!/usr/bin/python3 -OO
# Copyright 2007-2020 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.nzbqueue - nzb queue
"""
import os
import logging
import time
import datetime
import functools
import sabnzbd
from sabnzbd.nzbstuff import NzbObject
from sabnzbd.misc import exit_sab, cat_to_opts, int_conv, caller_name, cmp, safe_lower
from sabnzbd.filesystem import get_admin_path, remove_all, globber_full, remove_file
from sabnzbd.panic import panic_queue
import sabnzbd.database as database
from sabnzbd.decorators import NzbQueueLocker
from sabnzbd.constants import QUEUE_FILE_NAME, QUEUE_VERSION, FUTURE_Q_FOLDER, \
JOB_ADMIN, LOW_PRIORITY, NORMAL_PRIORITY, HIGH_PRIORITY, TOP_PRIORITY, \
REPAIR_PRIORITY, STOP_PRIORITY, VERIFIED_FILE, \
Status, IGNORED_FOLDERS, QNFO, DIRECT_WRITE_TRIGGER
import sabnzbd.cfg as cfg
import sabnzbd.downloader
from sabnzbd.assembler import Assembler, file_has_articles
import sabnzbd.notifier as notifier
from sabnzbd.bpsmeter import BPSMeter
from sabnzbd.dirscanner import process_single_nzb
class NzbQueue:
""" Singleton NzbQueue """
do = None
def __init__(self):
self.__top_only = cfg.top_only()
self.__nzo_list = []
self.__nzo_table = {}
NzbQueue.do = self
def read_queue(self, repair):
""" Read queue from disk, supporting repair modes
0 = no repairs
1 = use existing queue, add missing "incomplete" folders
2 = Discard all queue admin, reconstruct from "incomplete" folders
"""
nzo_ids = []
if repair < 2:
# Try to process the queue file
try:
data = sabnzbd.load_admin(QUEUE_FILE_NAME)
if data:
queue_vers, nzo_ids, _ = data
if not queue_vers == QUEUE_VERSION:
nzo_ids = []
logging.error(T('Incompatible queuefile found, cannot proceed'))
if not repair:
panic_queue(os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME))
exit_sab(2)
except:
nzo_ids = []
logging.error(T('Error loading %s, corrupt file detected'),
os.path.join(cfg.admin_dir.get_path(), QUEUE_FILE_NAME))
# First handle jobs in the queue file
folders = []
for nzo_id in nzo_ids:
folder, _id = os.path.split(nzo_id)
path = get_admin_path(folder, future=False)
# Try as normal job
nzo = sabnzbd.load_data(_id, path, remove=False)
if not nzo:
# Try as future job
path = get_admin_path(folder, future=True)
nzo = sabnzbd.load_data(_id, path)
if nzo:
self.add(nzo, save=False, quiet=True)
folders.append(folder)
# Scan for any folders in "incomplete" that are not yet in the queue
if repair:
self.scan_jobs(not folders)
# Handle any lost future jobs
for item in globber_full(os.path.join(cfg.admin_dir.get_path(), FUTURE_Q_FOLDER)):
path, nzo_id = os.path.split(item)
if nzo_id not in self.__nzo_table:
if nzo_id.startswith('SABnzbd_nzo'):
nzo = sabnzbd.load_data(nzo_id, path, remove=True)
if nzo:
self.add(nzo, save=True)
else:
try:
remove_file(item)
except:
pass
@NzbQueueLocker
def scan_jobs(self, all_jobs=False, action=True):
""" Scan "incomplete" for missing folders,
'all' is True: Include active folders
'action' is True, do the recovery action
returns list of orphaned folders
"""
result = []
# Folders from the download queue
if all_jobs:
registered = []
else:
registered = [nzo.work_name for nzo in self.__nzo_list]
# Retryable folders from History
items = sabnzbd.api.build_history(output=True)[0]
# Anything waiting or active or retryable is a known item
registered.extend([os.path.basename(item['path'])
for item in items if item['retry'] or item['loaded'] or item['status'] == Status.QUEUED])
# Repair unregistered folders
for folder in globber_full(cfg.download_dir.get_path()):
name = os.path.basename(folder)
if os.path.isdir(folder) and name not in registered and name not in IGNORED_FOLDERS:
if action:
logging.info('Repairing job %s', folder)
self.repair_job(folder)
result.append(os.path.basename(folder))
else:
if action:
logging.info('Skipping repair for job %s', folder)
return result
def repair_job(self, folder, new_nzb=None, password=None):
""" Reconstruct admin for a single job folder, optionally with new NZB """
# Check if folder exists
if not folder or not os.path.exists(folder):
return None
name = os.path.basename(folder)
path = os.path.join(folder, JOB_ADMIN)
if hasattr(new_nzb, 'filename'):
filename = new_nzb.filename
else:
filename = ''
if not filename:
# Was this file already post-processed?
verified = sabnzbd.load_data(VERIFIED_FILE, path, remove=False)
if not verified or not all(verified[x] for x in verified):
filename = globber_full(path, '*.gz')
if len(filename) > 0:
logging.debug('Repair job %s by re-parsing stored NZB', name)
nzo_id = sabnzbd.add_nzbfile(filename[0], pp=None, script=None, cat=None, priority=None, nzbname=name,
reuse=True, password=password)[1]
else:
logging.debug('Repair job %s without stored NZB', name)
nzo = NzbObject(name, pp=None, script=None, nzb='', cat=None, priority=None, nzbname=name, reuse=True)
nzo.password = password
self.add(nzo)
nzo_id = nzo.nzo_id
else:
remove_all(path, '*.gz')
logging.debug('Repair job %s with new NZB (%s)', name, filename)
nzo_id = sabnzbd.add_nzbfile(new_nzb, pp=None, script=None, cat=None, priority=None, nzbname=name,
reuse=True, password=password)[1]
return nzo_id
@NzbQueueLocker
def send_back(self, nzo):
""" Send back job to queue after successful pre-check """
try:
nzb_path = globber_full(nzo.workpath, '*.gz')[0]
except:
logging.debug('Failed to find NZB file after pre-check (%s)', nzo.nzo_id)
return
# Need to remove it first, otherwise it might still be downloading
self.remove(nzo, add_to_history=False, cleanup=False)
res, nzo_ids = process_single_nzb(nzo.work_name, nzb_path, keep=True, reuse=True, nzo_id=nzo.nzo_id)
if res == 0 and nzo_ids:
# Reset reuse flag to make pause/abort on encryption possible
self.__nzo_table[nzo_ids[0]].reuse = False
@NzbQueueLocker
def save(self, save_nzo=None):
""" Save queue, all nzo's or just the specified one """
logging.info("Saving queue")
nzo_ids = []
# Aggregate nzo_ids and save each nzo
for nzo in self.__nzo_list[:]:
if not nzo.is_gone():
nzo_ids.append(os.path.join(nzo.work_name, nzo.nzo_id))
if save_nzo is None or nzo is save_nzo:
if not nzo.futuretype:
# Also includes save_data for NZO
nzo.save_to_disk()
else:
sabnzbd.save_data(nzo, nzo.nzo_id, nzo.workpath)
sabnzbd.save_admin((QUEUE_VERSION, nzo_ids, []), QUEUE_FILE_NAME)
def set_top_only(self, value):
self.__top_only = value
def generate_future(self, msg, pp=None, script=None, cat=None, url=None, priority=NORMAL_PRIORITY, nzbname=None):
""" Create and return a placeholder nzo object """
logging.debug('Creating placeholder NZO')
future_nzo = NzbObject(msg, pp, script, None, futuretype=True, cat=cat, url=url, priority=priority, nzbname=nzbname, status=Status.GRABBING)
self.add(future_nzo)
return future_nzo
def change_opts(self, nzo_ids, pp):
result = 0
for nzo_id in [item.strip() for item in nzo_ids.split(',')]:
if nzo_id in self.__nzo_table:
self.__nzo_table[nzo_id].set_pp(pp)
result += 1
return result
def change_script(self, nzo_ids, script):
result = 0
for nzo_id in [item.strip() for item in nzo_ids.split(',')]:
if nzo_id in self.__nzo_table:
self.__nzo_table[nzo_id].script = script
logging.info('Set script=%s for job %s', script, self.__nzo_table[nzo_id].final_name)
result += 1
return result
def change_cat(self, nzo_ids, cat, explicit_priority=None):
result = 0
for nzo_id in [item.strip() for item in nzo_ids.split(',')]:
if nzo_id in self.__nzo_table:
nzo = self.__nzo_table[nzo_id]
nzo.cat, pp, nzo.script, prio = cat_to_opts(cat)
logging.info('Set cat=%s for job %s', cat, nzo.final_name)
nzo.set_pp(pp)
if explicit_priority is None:
self.set_priority(nzo_id, prio)
# Abort any ongoing unpacking if the category changed
nzo.abort_direct_unpacker()
result += 1
return result
def change_name(self, nzo_id, name, password=None):
if nzo_id in self.__nzo_table:
nzo = self.__nzo_table[nzo_id]
logging.info('Renaming %s to %s', nzo.final_name, name)
# Abort any ongoing unpacking if the name changed (dirs change)
nzo.abort_direct_unpacker()
if not nzo.futuretype:
nzo.set_final_name_and_scan_password(name, password)
else:
# Reset url fetch wait time
nzo.url_wait = None
nzo.url_tries = 0
return True
else:
return False
def get_nzo(self, nzo_id):
if nzo_id in self.__nzo_table:
return self.__nzo_table[nzo_id]
else:
return None
@NzbQueueLocker
def add(self, nzo, save=True, quiet=False):
if not nzo.nzo_id:
nzo.nzo_id = sabnzbd.get_new_id('nzo', nzo.workpath, self.__nzo_table)
# If no files are to be downloaded anymore, send to postproc
if not nzo.files and not nzo.futuretype:
self.end_job(nzo)
return ''
# Reset try_lists
nzo.reset_try_list()
if nzo.nzo_id:
nzo.deleted = False
priority = nzo.priority
if sabnzbd.scheduler.analyse(False, priority):
nzo.status = Status.PAUSED
self.__nzo_table[nzo.nzo_id] = nzo
if priority > HIGH_PRIORITY:
# Top and repair priority items are added to the top of the queue
self.__nzo_list.insert(0, nzo)
elif priority == LOW_PRIORITY:
self.__nzo_list.append(nzo)
else:
# for high priority we need to add the item at the bottom
# of any other high priority items above the normal priority
# for normal priority we need to add the item at the bottom
# of the normal priority items above the low priority
if self.__nzo_list:
pos = 0
added = False
for position in self.__nzo_list:
if position.priority < priority:
self.__nzo_list.insert(pos, nzo)
added = True
break
pos += 1
if not added:
# if there are no other items classed as a lower priority
# then it will be added to the bottom of the queue
self.__nzo_list.append(nzo)
else:
# if the queue is empty then simple append the item to the bottom
self.__nzo_list.append(nzo)
if save:
self.save(nzo)
if not (quiet or nzo.status == Status.FETCHING):
notifier.send_notification(T('NZB added to queue'), nzo.filename, 'download', nzo.cat)
if not quiet and cfg.auto_sort():
self.sort_by_avg_age()
return nzo.nzo_id
@NzbQueueLocker
def remove(self, nzo_id, add_to_history=True, cleanup=True, delete_all_data=True):
""" Remove NZO from queue.
It can be added to history directly.
Or, we do some clean-up, sometimes leaving some data.
"""
if nzo_id in self.__nzo_table:
nzo = self.__nzo_table.pop(nzo_id)
logging.info('[%s] Removing job %s', caller_name(), nzo.final_name)
# Set statuses
nzo.deleted = True
if cleanup and not nzo.is_gone():
nzo.status = Status.DELETED
self.__nzo_list.remove(nzo)
if add_to_history:
# Create the history DB instance
history_db = database.HistoryDB()
# Add the nzo to the database. Only the path, script and time taken is passed
# Other information is obtained from the nzo
history_db.add_history_db(nzo)
history_db.close()
sabnzbd.history_updated()
elif cleanup:
nzo.purge_data(delete_all_data=delete_all_data)
self.save(False)
return nzo_id
return None
@NzbQueueLocker
def remove_multiple(self, nzo_ids, delete_all_data=True):
removed = []
for nzo_id in nzo_ids:
if self.remove(nzo_id, add_to_history=False, delete_all_data=delete_all_data):
removed.append(nzo_id)
# Save with invalid nzo_id, to that only queue file is saved
self.save(False)
# Any files left? Otherwise let's disconnect
if self.actives(grabs=False) == 0 and cfg.autodisconnect():
# This was the last job, close server connections
sabnzbd.downloader.Downloader.do.disconnect()
return removed
@NzbQueueLocker
def remove_all(self, search=None):
""" Remove NZO's that match the search-pattern """
nzo_ids = []
search = safe_lower(search)
for nzo_id, nzo in self.__nzo_table.items():
if not search or search in nzo.final_name.lower():
nzo_ids.append(nzo_id)
return self.remove_multiple(nzo_ids)
def remove_nzf(self, nzo_id, nzf_id, force_delete=False):
removed = []
if nzo_id in self.__nzo_table:
nzo = self.__nzo_table[nzo_id]
nzf = nzo.get_nzf_by_id(nzf_id)
if nzf:
removed.append(nzf_id)
nzo.abort_direct_unpacker()
post_done = nzo.remove_nzf(nzf)
if post_done:
if nzo.finished_files:
self.end_job(nzo)
else:
self.remove(nzo_id, add_to_history=False, keep_basic=False)
elif force_delete:
# Force-remove all trace
nzo.bytes -= nzf.bytes
nzo.bytes_tried -= (nzf.bytes - nzf.bytes_left)
del nzo.files_table[nzf_id]
nzo.finished_files.remove(nzf)
logging.info('Removed NZFs %s from job %s', removed, nzo.final_name)
return removed
def pause_multiple_nzo(self, nzo_ids):
handled = []
for nzo_id in nzo_ids:
self.pause_nzo(nzo_id)
handled.append(nzo_id)
return handled
def pause_nzo(self, nzo_id):
handled = []
if nzo_id in self.__nzo_table:
nzo = self.__nzo_table[nzo_id]
nzo.pause()
logging.info("Paused nzo: %s", nzo_id)
handled.append(nzo_id)
return handled
def resume_multiple_nzo(self, nzo_ids):
handled = []
for nzo_id in nzo_ids:
self.resume_nzo(nzo_id)
handled.append(nzo_id)
return handled
@NzbQueueLocker
def resume_nzo(self, nzo_id):
handled = []
if nzo_id in self.__nzo_table:
nzo = self.__nzo_table[nzo_id]
nzo.resume()
nzo.reset_all_try_lists()
logging.info("Resumed nzo: %s", nzo_id)
handled.append(nzo_id)
return handled
@NzbQueueLocker
def switch(self, item_id_1, item_id_2):
try:
# Allow an index as second parameter, easier for some skins
i = int(item_id_2)
item_id_2 = self.__nzo_list[i].nzo_id
except:
pass
try:
nzo1 = self.__nzo_table[item_id_1]
nzo2 = self.__nzo_table[item_id_2]
except KeyError:
# One or both jobs missing
return -1, 0
if nzo1 == nzo2:
return -1, 0
# get the priorities of the two items
nzo1_priority = nzo1.priority
nzo2_priority = nzo2.priority
try:
# get the item id of the item below to use in priority changing
item_id_3 = self.__nzo_list[i + 1].nzo_id
# if there is an item below the id1 and id2 then we need that too
# to determine whether to change the priority
nzo3 = self.__nzo_table[item_id_3]
nzo3_priority = nzo3.priority
# if id1 is surrounded by items of a different priority then change it's pririty to match
if nzo2_priority != nzo1_priority and nzo3_priority != nzo1_priority or nzo2_priority > nzo1_priority:
nzo1.priority = nzo2_priority
except:
nzo1.priority = nzo2_priority
item_id_pos1 = -1
item_id_pos2 = -1
for i in range(len(self.__nzo_list)):
if item_id_1 == self.__nzo_list[i].nzo_id:
item_id_pos1 = i
elif item_id_2 == self.__nzo_list[i].nzo_id:
item_id_pos2 = i
if (item_id_pos1 > -1) and (item_id_pos2 > -1):
item = self.__nzo_list[item_id_pos1]
logging.info('Switching job [%s] %s => [%s] %s', item_id_pos1, item.final_name, item_id_pos2, self.__nzo_list[item_id_pos2].final_name)
del self.__nzo_list[item_id_pos1]
self.__nzo_list.insert(item_id_pos2, item)
return item_id_pos2, nzo1.priority
# If moving failed/no movement took place
return -1, nzo1.priority
@NzbQueueLocker
def move_up_bulk(self, nzo_id, nzf_ids, size):
if nzo_id in self.__nzo_table:
for unused in range(size):
self.__nzo_table[nzo_id].move_up_bulk(nzf_ids)
@NzbQueueLocker
def move_top_bulk(self, nzo_id, nzf_ids):
if nzo_id in self.__nzo_table:
self.__nzo_table[nzo_id].move_top_bulk(nzf_ids)
@NzbQueueLocker
def move_down_bulk(self, nzo_id, nzf_ids, size):
if nzo_id in self.__nzo_table:
for unused in range(size):
self.__nzo_table[nzo_id].move_down_bulk(nzf_ids)
@NzbQueueLocker
def move_bottom_bulk(self, nzo_id, nzf_ids):
if nzo_id in self.__nzo_table:
self.__nzo_table[nzo_id].move_bottom_bulk(nzf_ids)
@NzbQueueLocker
def sort_by_avg_age(self, reverse=False):
logging.info("Sorting by average date... (reversed:%s)", reverse)
self.__nzo_list = sort_queue_function(self.__nzo_list, _nzo_date_cmp, reverse)
@NzbQueueLocker
def sort_by_name(self, reverse=False):
logging.info("Sorting by name... (reversed:%s)", reverse)
self.__nzo_list = sort_queue_function(self.__nzo_list, _nzo_name_cmp, reverse)
@NzbQueueLocker
def sort_by_size(self, reverse=False):
logging.info("Sorting by size... (reversed:%s)", reverse)
self.__nzo_list = sort_queue_function(self.__nzo_list, _nzo_size_cmp, reverse)
def sort_queue(self, field, reverse=None):
if isinstance(reverse, str):
if reverse.lower() == 'desc':
reverse = True
else:
reverse = False
if reverse is None:
reverse = False
if field.lower() == 'name':
self.sort_by_name(reverse)
elif field.lower() == 'size' or field.lower() == 'bytes':
self.sort_by_size(reverse)
elif field.lower() == 'avg_age':
self.sort_by_avg_age(reverse)
else:
logging.debug("Sort: %s not recognized", field)
@NzbQueueLocker
def __set_priority(self, nzo_id, priority):
""" Sets the priority on the nzo and places it in the queue at the appropriate position """
try:
priority = int_conv(priority)
nzo = self.__nzo_table[nzo_id]
nzo_id_pos1 = -1
pos = -1
# If priority == STOP_PRIORITY, then send to queue
if priority == STOP_PRIORITY:
self.end_job(nzo)
return
# Get the current position in the queue
for i in range(len(self.__nzo_list)):
if nzo_id == self.__nzo_list[i].nzo_id:
nzo_id_pos1 = i
break
# Don't change priority and order if priority is the same as asked
if priority == self.__nzo_list[nzo_id_pos1].priority:
return nzo_id_pos1
nzo.set_priority(priority)
if sabnzbd.scheduler.analyse(False, priority) and \
nzo.status in (Status.CHECKING, Status.DOWNLOADING, Status.QUEUED):
nzo.status = Status.PAUSED
elif nzo.status == Status.PAUSED:
nzo.status = Status.QUEUED
nzo.save_to_disk()
if nzo_id_pos1 != -1:
del self.__nzo_list[nzo_id_pos1]
if priority == TOP_PRIORITY:
# A top priority item (usually a completed download fetching pars)
# is added to the top of the queue
self.__nzo_list.insert(0, nzo)
pos = 0
elif priority == LOW_PRIORITY:
pos = len(self.__nzo_list)
self.__nzo_list.append(nzo)
else:
# for high priority we need to add the item at the bottom
# of any other high priority items above the normal priority
# for normal priority we need to add the item at the bottom
# of the normal priority items above the low priority
if self.__nzo_list:
p = 0
added = False
for position in self.__nzo_list:
if position.priority < priority:
self.__nzo_list.insert(p, nzo)
pos = p
added = True
break
p += 1
if not added:
# if there are no other items classed as a lower priority
# then it will be added to the bottom of the queue
pos = len(self.__nzo_list)
self.__nzo_list.append(nzo)
else:
# if the queue is empty then simple append the item to the bottom
self.__nzo_list.append(nzo)
pos = 0
logging.info('Set priority=%s for job %s => position=%s ', priority, self.__nzo_table[nzo_id].final_name, pos)
return pos
except:
return -1
@NzbQueueLocker
def set_priority(self, nzo_ids, priority):
try:
n = -1
for nzo_id in [item.strip() for item in nzo_ids.split(',')]:
n = self.__set_priority(nzo_id, priority)
return n
except:
return -1
def reset_try_lists(self, article, article_reset=True):
""" Let article get new fetcher and reset trylists """
article.fetcher = None
if article_reset:
article.reset_try_list()
article.nzf.reset_try_list()
article.nzf.nzo.reset_try_list()
def reset_all_try_lists(self):
for nzo in self.__nzo_list:
nzo.reset_all_try_lists()
def has_forced_items(self):
""" Check if the queue contains any Forced
Priority items to download while paused
"""
for nzo in self.__nzo_list:
if nzo.priority == TOP_PRIORITY and nzo.status not in (Status.PAUSED, Status.GRABBING):
return True
return False
def get_article(self, server, servers):
""" Get next article for jobs in the queue
Not locked for performance, since it only reads the queue
"""
# Pre-calculate propagation delay
propagtion_delay = float(cfg.propagation_delay() * 60)
for nzo in self.__nzo_list:
# Not when queue paused and not a forced item
if nzo.status not in (Status.PAUSED, Status.GRABBING) or nzo.priority == TOP_PRIORITY:
# Check if past propagation delay, or forced
if not propagtion_delay or nzo.priority == TOP_PRIORITY or (nzo.avg_stamp + propagtion_delay) < time.time():
if not nzo.server_in_try_list(server):
article = nzo.get_article(server, servers)
if article:
return article
# Stop after first job that wasn't paused/propagating/etc
if self.__top_only:
return
def register_article(self, article, success=True):
""" Register the articles we tried
Not locked for performance, since it only modifies individual NZOs
"""
nzf = article.nzf
nzo = nzf.nzo
if nzf.deleted:
logging.debug("Discarding article %s, no longer in queue", article.article)
return
articles_left, file_done, post_done = nzo.remove_article(article, success)
if nzo.is_gone():
logging.debug('Discarding article for file %s, no longer in queue', nzf.filename)
else:
# Write data if file is done or at trigger time
if file_done or (articles_left and (articles_left % DIRECT_WRITE_TRIGGER) == 0):
if not nzo.precheck:
# Only start decoding if we have a filename and type
# The type is only set if sabyenc could decode the article
if nzf.filename and nzf.type:
Assembler.do.process((nzo, nzf, file_done))
elif nzf.filename.lower().endswith('.par2'):
# Broken par2 file, try to get another one
nzo.promote_par2(nzf)
else:
if file_has_articles(nzf):
logging.warning(T('%s -> Unknown encoding'), nzf.filename)
# Save bookkeeping in case of crash
if file_done and (nzo.next_save is None or time.time() > nzo.next_save):
nzo.save_to_disk()
BPSMeter.do.save()
if nzo.save_timeout is None:
nzo.next_save = None
else:
nzo.next_save = time.time() + nzo.save_timeout
# Remove post from Queue
if post_done:
self.end_job(nzo)
def end_job(self, nzo):
""" Send NZO to the post-processing queue """
logging.info('[%s] Ending job %s', caller_name(), nzo.final_name)
# Notify assembler to call postprocessor
if not nzo.deleted:
nzo.deleted = True
if nzo.precheck:
nzo.save_to_disk()
# Check result
enough, _ratio = nzo.check_availability_ratio()
if enough:
# Enough data present, do real download
self.send_back(nzo)
return
else:
# Not enough data, let postprocessor show it as failed
pass
Assembler.do.process((nzo, None, None))
def actives(self, grabs=True):
""" Return amount of non-paused jobs, optionally with 'grabbing' items
Not locked for performance, only reads the queue
"""
n = 0
for nzo in self.__nzo_list:
# Ignore any items that are paused
if grabs and nzo.status == Status.GRABBING:
n += 1
elif nzo.status not in (Status.PAUSED, Status.GRABBING):
n += 1
return n
def queue_info(self, search=None, start=0, limit=0):
""" Return list of queued jobs,
optionally filtered by 'search' and limited by start and limit.
Not locked for performance, only reads the queue
"""
if search:
search = search.lower()
bytes_left = 0
bytes_total = 0
bytes_left_previous_page = 0
q_size = 0
pnfo_list = []
n = 0
for nzo in self.__nzo_list:
if nzo.status not in (Status.PAUSED, Status.CHECKING) or nzo.priority == TOP_PRIORITY:
b_left = nzo.remaining
bytes_total += nzo.bytes
bytes_left += b_left
q_size += 1
# We need the number of bytes before the current page
if n < start:
bytes_left_previous_page += b_left
if (not search) or search in nzo.final_name.lower():
if (not limit) or (start <= n < start + limit):
pnfo_list.append(nzo.gather_info())
n += 1
if not search:
n = len(self.__nzo_list)
return QNFO(bytes_total, bytes_left, bytes_left_previous_page, pnfo_list, q_size, n)
def remaining(self):
""" Return bytes left in the queue by non-paused items
Not locked for performance, only reads the queue
"""
bytes_left = 0
for nzo in self.__nzo_list:
if nzo.status != Status.PAUSED:
bytes_left += nzo.remaining
return bytes_left
def is_empty(self):
empty = True
for nzo in self.__nzo_list:
if not nzo.futuretype and nzo.status != Status.PAUSED:
empty = False
break
return empty
def stop_idle_jobs(self):
""" Detect jobs that have zero files left and send them to post processing """
empty = []
for nzo in self.__nzo_list:
if not nzo.futuretype and not nzo.files and nzo.status not in (Status.PAUSED, Status.GRABBING):
logging.info('Found idle job %s', nzo.final_name)
empty.append(nzo)
# Stall prevention by checking if all servers are in the trylist
# This is a CPU-cheaper alternative to prevent stalling
if len(nzo.try_list) == sabnzbd.downloader.Downloader.do.server_nr:
# Maybe the NZF's need a reset too?
for nzf in nzo.files:
if len(nzf.try_list) == sabnzbd.downloader.Downloader.do.server_nr:
# We do not want to reset all article trylists, they are good
logging.info('Resetting bad trylist for file %s in job %s', nzf.filename, nzo.final_name)
nzf.reset_try_list()
# Reset main trylist, minimal performance impact
logging.info('Resetting bad trylist for job %s', nzo.final_name)
nzo.reset_try_list()
for nzo in empty:
self.end_job(nzo)
def pause_on_prio(self, priority):
for nzo in self.__nzo_list:
if nzo.priority == priority:
nzo.pause()
@NzbQueueLocker
def resume_on_prio(self, priority):
for nzo in self.__nzo_list:
if nzo.priority == priority:
# Don't use nzo.resume() to avoid resetting job warning flags
nzo.status = Status.QUEUED
def pause_on_cat(self, cat):
for nzo in self.__nzo_list:
if nzo.cat == cat:
nzo.pause()
@NzbQueueLocker
def resume_on_cat(self, cat):
for nzo in self.__nzo_list:
if nzo.cat == cat:
# Don't use nzo.resume() to avoid resetting job warning flags
nzo.status = Status.QUEUED
def get_urls(self):
""" Return list of future-types needing URL """
lst = []
for nzo_id in self.__nzo_table:
nzo = self.__nzo_table[nzo_id]
if nzo.futuretype:
url = nzo.url
if nzo.futuretype and url.lower().startswith('http'):
lst.append((url, nzo))
return lst
def __repr__(self):
return "<NzbQueue>"
def _nzo_date_cmp(nzo1, nzo2):
avg_date1 = nzo1.avg_date
avg_date2 = nzo2.avg_date
if avg_date1 is None and avg_date2 is None:
return 0
if avg_date1 is None:
avg_date1 = datetime.datetime.now()
elif avg_date2 is None:
avg_date2 = datetime.datetime.now()
return cmp(avg_date1, avg_date2)
def _nzo_name_cmp(nzo1, nzo2):
return cmp(nzo1.final_name.lower(), nzo2.final_name.lower())
def _nzo_size_cmp(nzo1, nzo2):
return cmp(nzo1.bytes, nzo2.bytes)
def sort_queue_function(nzo_list, method, reverse):
ultra_high_priority = [nzo for nzo in nzo_list if nzo.priority == REPAIR_PRIORITY]
super_high_priority = [nzo for nzo in nzo_list if nzo.priority == TOP_PRIORITY]
high_priority = [nzo for nzo in nzo_list if nzo.priority == HIGH_PRIORITY]
normal_priority = [nzo for nzo in nzo_list if nzo.priority == NORMAL_PRIORITY]
low_priority = [nzo for nzo in nzo_list if nzo.priority == LOW_PRIORITY]
ultra_high_priority.sort(key=functools.cmp_to_key(method), reverse=reverse)
super_high_priority.sort(key=functools.cmp_to_key(method), reverse=reverse)
high_priority.sort(key=functools.cmp_to_key(method), reverse=reverse)
normal_priority.sort(key=functools.cmp_to_key(method), reverse=reverse)
low_priority.sort(key=functools.cmp_to_key(method), reverse=reverse)
new_list = ultra_high_priority
new_list.extend(super_high_priority)
new_list.extend(high_priority)
new_list.extend(normal_priority)
new_list.extend(low_priority)
# Make sure any left-over jobs enter the new list
for item in nzo_list:
if item not in new_list:
new_list.append(item)
return new_list
| 39.772331
| 151
| 0.572677
|
4a17ae374709b77704deca0baa98401f9412a475
| 23,347
|
py
|
Python
|
hdf5index.py
|
CodingNowNow/jiaoyi
|
57513f8cf0d282fa70ac9e8e76ff785d7a2a019c
|
[
"MIT"
] | 1
|
2019-03-22T06:36:56.000Z
|
2019-03-22T06:36:56.000Z
|
hdf5index.py
|
nvsnvyu/hikyuu
|
57513f8cf0d282fa70ac9e8e76ff785d7a2a019c
|
[
"MIT"
] | null | null | null |
hdf5index.py
|
nvsnvyu/hikyuu
|
57513f8cf0d282fa70ac9e8e76ff785d7a2a019c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf8 -*-
# cp936
"""
建立HDF5索引
"""
import datetime
import tables
class IndexRecord(tables.IsDescription):
datetime = tables.UInt64Col() #IGNORE:E1101
start = tables.UInt64Col() #IGNORE:E1101
def UpdateWeekIndex(h5file):
try:
group = h5file.getNode("/","week")
except:
group = h5file.createGroup("/","week")
def getNewDate(olddate):
y = olddate/100000000
m = olddate/1000000 - y*100
d = olddate/10000 - (y*10000+m*100)
tempdate = datetime.date(y,m,d)
tempweekdate = tempdate - datetime.timedelta(tempdate.weekday())
newdate = tempweekdate.year*100000000 + tempweekdate.month*1000000 + tempweekdate.day*10000
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
pre_index_date = getNewDate(int(table[0]['datetime']))
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateMonthIndex(h5file):
try:
group = h5file.getNode("/","month")
except:
group = h5file.createGroup("/","month")
def getNewDate(olddate):
y = olddate/100000000
m = olddate/1000000 - y*100
return(y*100000000 + m*1000000 + 10000)
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateYearIndex(h5file):
try:
group = h5file.getNode("/","year")
except:
group = h5file.createGroup("/","year")
def getNewDate(olddate):
y = olddate/100000000
return(y*100000000 + 1010000)
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateHalfYearIndex(h5file):
try:
group = h5file.getNode("/","halfyear")
except:
group = h5file.createGroup("/","halfyear")
def getNewDate(olddate):
halfyearDict={1:1,2:1,3:1,4:1,5:1,6:1,7:7,8:7,9:7,10:7,11:7,12:7}
y = olddate/100000000
m = olddate/1000000 - y*100
return( y*100000000 + halfyearDict[m]*1000000 + 10000 )
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateQuarterIndex(h5file):
try:
group = h5file.getNode("/","quarter")
except:
group = h5file.createGroup("/","quarter")
def getNewDate(olddate):
quarterDict={1:1,2:1,3:1,4:4,5:4,6:4,7:7,8:7,9:7,10:10,11:10,12:10}
y = olddate/100000000
m = olddate/1000000 - y*100
return( y*100000000 + quarterDict[m]*1000000 + 10000 )
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateDayIndex(h5file):
try:
group = h5file.getNode("/","day")
except:
group = h5file.createGroup("/","day")
def getNewDate(olddate):
newdate = olddate/10000*10000
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateHourIndex(h5file):
try:
group = h5file.getNode("/","min60")
except:
group = h5file.createGroup("/","min60")
def getNewDate(olddate):
min = olddate-olddate/10000*10000
if min<=1030:
newdate = olddate/10000*10000 + 1030
elif min<=1130:
newdate = olddate/10000*10000 + 1130
elif min<=1400:
newdate = olddate/10000*10000 + 1400
else:
newdate = olddate/10000*10000 + 1500
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateFifteenMinIndex(h5file):
try:
group = h5file.getNode("/","min15")
except:
group = h5file.createGroup("/","min15")
def getNewDate(olddate):
min = olddate-olddate/10000*10000
if min<=945:
newdate = olddate/10000*10000 + 945
elif min<=1000:
newdate = olddate/10000*10000 + 1000
elif min<=1015:
newdate = olddate/10000*10000 + 1015
elif min<=1030:
newdate = olddate/10000*10000 + 1030
elif min<=1045:
newdate = olddate/10000*10000 + 1045
elif min<=1100:
newdate = olddate/10000*10000 + 1100
elif min<=1115:
newdate = olddate/10000*10000 + 1115
elif min<=1130:
newdate = olddate/10000*10000 + 1130
elif min<=1315:
newdate = olddate/10000*10000 + 1315
elif min<=1330:
newdate = olddate/10000*10000 + 1330
elif min<=1345:
newdate = olddate/10000*10000 + 1345
elif min<=1400:
newdate = olddate/10000*10000 + 1400
elif min<=1415:
newdate = olddate/10000*10000 + 1415
elif min<=1430:
newdate = olddate/10000*10000 + 1430
elif min<=1445:
newdate = olddate/10000*10000 + 1445
else:
newdate = olddate/10000*10000 + 1500
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateHalfHourIndex(h5file):
try:
group = h5file.getNode("/","min30")
except:
group = h5file.createGroup("/","min30")
def getNewDate(olddate):
min = olddate-olddate/10000*10000
if min<=1000:
newdate = olddate/10000*10000 + 1000
elif min<=1030:
newdate = olddate/10000*10000 + 1030
elif min<=1100:
newdate = olddate/10000*10000 + 1100
elif min<=1130:
newdate = olddate/10000*10000 + 1130
elif min<=1330:
newdate = olddate/10000*10000 + 1330
elif min<=1400:
newdate = olddate/10000*10000 + 1400
elif min<=1430:
newdate = olddate/10000*10000 + 1430
else:
newdate = olddate/10000*10000 + 1500
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateFiveMinIndex(h5file):
try:
group = h5file.getNode("/","min5")
except:
group = h5file.createGroup("/","min5")
def getNewDate(olddate):
newdate = olddate/100*100
min = olddate-newdate
if min == 0:
pass
elif min <= 5:
newdate += 5
elif min<=10:
newdate += 10
elif min<=15:
newdate += 15
elif min<=20:
newdate += 20
elif min<=25:
newdate += 25
elif min<=30:
newdate += 30
elif min<=35:
newdate += 35
elif min<=40:
newdate += 40
elif min<=45:
newdate += 45
elif min<=50:
newdate += 50
elif min<=55:
newdate += 55
else:
newdate += 100
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateDayDataAllIndex(h5file):
UpdateWeekIndex(h5file)
UpdateMonthIndex(h5file)
UpdateQuarterIndex(h5file)
UpdateHalfYearIndex(h5file)
UpdateYearIndex(h5file)
def Update5MinDataAllIndex(h5file):
UpdateFifteenMinIndex(h5file)
UpdateHalfHourIndex(h5file)
UpdateHourIndex(h5file)
#UpdateDayIndex(h5file)
#UpdateWeekIndex(h5file)
#UpdateMonthIndex(h5file)
#UpdateQuarterIndex(h5file)
#UpdateHalfYearIndex(h5file)
#UpdateYearIndex(h5file)
def Update1MinDataAllIndex(h5file):
UpdateFiveMinIndex(h5file)
#UpdateFifteenMinIndex(h5file)
#UpdateHalfHourIndex(h5file)
#UpdateHourIndex(h5file)
#UpdateDayIndex(h5file)
#UpdateWeekIndex(h5file)
#UpdateMonthIndex(h5file)
#UpdateQuarterIndex(h5file)
#UpdateHalfYearIndex(h5file)
#UpdateYearIndex(h5file)
if __name__ == "__main__":
import time
starttime = time.time()
print "\nUpdate SH Day Data index ================> "
h5file = tables.openFile('d:/workspace/hikyuu/test/data/sh_day.h5', mode='a',
filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
UpdateDayDataAllIndex(h5file)
h5file.close()
print "\nUpdate SH 5min Data index ================> "
h5file = tables.openFile('d:/workspace/hikyuu/test/data/sh_5min.h5', mode='a',
filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
Update5MinDataAllIndex(h5file)
h5file.close()
print "\nUpdate SZ Day Data index ================> "
h5file = tables.openFile('d:/workspace/hikyuu/test/data/sz_day.h5', mode='a',
filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
UpdateDayDataAllIndex(h5file)
h5file.close()
print "\nUpdate SZ 5min Data index ================> "
h5file = tables.openFile('d:/workspace/hikyuu/test/data/sz_5min.h5', mode='a',
filters=tables.Filters(complevel=9,complib='zlib', shuffle=True))
Update5MinDataAllIndex(h5file)
h5file.close()
endtime = time.time()
print "\nTotal time:"
print "%.2fs" % (endtime-starttime)
print "%.2fm" % ((endtime-starttime)/60)
| 32.381415
| 103
| 0.544524
|
4a17afbc52e5d6b325da534ac2ba0e25af4f5e88
| 1,065
|
bzl
|
Python
|
js/js_grpc_web_library.bzl
|
ashwin153/rules_proto_grpc
|
093ffe3d6303bcd848121db4fa347ba6c9d3985a
|
[
"Apache-2.0"
] | 162
|
2019-07-15T22:35:30.000Z
|
2022-03-28T21:16:29.000Z
|
js/js_grpc_web_library.bzl
|
ashwin153/rules_proto_grpc
|
093ffe3d6303bcd848121db4fa347ba6c9d3985a
|
[
"Apache-2.0"
] | 166
|
2019-07-18T16:07:51.000Z
|
2022-03-31T17:35:39.000Z
|
js/js_grpc_web_library.bzl
|
ashwin153/rules_proto_grpc
|
093ffe3d6303bcd848121db4fa347ba6c9d3985a
|
[
"Apache-2.0"
] | 92
|
2019-08-21T04:15:24.000Z
|
2022-03-11T18:42:54.000Z
|
"""Generated definition of js_grpc_web_library."""
load("//js:js_grpc_web_compile.bzl", "js_grpc_web_compile")
load("//internal:compile.bzl", "proto_compile_attrs")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library")
def js_grpc_web_library(name, **kwargs):
# Compile protos
name_pb = name + "_pb"
js_grpc_web_compile(
name = name_pb,
**{
k: v
for (k, v) in kwargs.items()
if k in proto_compile_attrs.keys()
} # Forward args
)
# Resolve deps
deps = [
dep.replace("@npm", kwargs.get("deps_repo", "@npm"))
for dep in GRPC_DEPS
]
# Create js library
js_library(
name = name,
srcs = [name_pb],
deps = deps + kwargs.get("deps", []),
package_name = kwargs.get("package_name", name),
strip_prefix = name_pb if not kwargs.get("legacy_path") else None,
visibility = kwargs.get("visibility"),
tags = kwargs.get("tags"),
)
GRPC_DEPS = [
"@npm//google-protobuf",
"@npm//grpc-web",
]
| 26.625
| 74
| 0.585915
|
4a17afc86c3a9d35002a2ac15fa9b6e0eecdc7d3
| 449
|
py
|
Python
|
website/tracks/migrations/0015_auto_20161209_1248.py
|
abecede753/trax
|
00fb2abdcab5b5ff564e2e97d26e5b16c631b778
|
[
"MIT"
] | null | null | null |
website/tracks/migrations/0015_auto_20161209_1248.py
|
abecede753/trax
|
00fb2abdcab5b5ff564e2e97d26e5b16c631b778
|
[
"MIT"
] | 10
|
2016-11-28T09:32:53.000Z
|
2018-03-18T10:24:14.000Z
|
website/tracks/migrations/0015_auto_20161209_1248.py
|
abecede753/trax
|
00fb2abdcab5b5ff564e2e97d26e5b16c631b778
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-09 12:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracks', '0014_auto_20161206_1246'),
]
operations = [
migrations.AlterField(
model_name='laptime',
name='recorded',
field=models.DateField(null=True),
),
]
| 21.380952
| 48
| 0.614699
|
4a17afd878f17d509df58fc4f883d34fe4506c64
| 709
|
py
|
Python
|
Program_3.py
|
Saumya-09/Machine_learning
|
ac7ea20b6a65efbbd8bad36c5b9512468ca40486
|
[
"MIT"
] | null | null | null |
Program_3.py
|
Saumya-09/Machine_learning
|
ac7ea20b6a65efbbd8bad36c5b9512468ca40486
|
[
"MIT"
] | null | null | null |
Program_3.py
|
Saumya-09/Machine_learning
|
ac7ea20b6a65efbbd8bad36c5b9512468ca40486
|
[
"MIT"
] | null | null | null |
#Write a python program to perform Linear classification using AND and OR logic.
#Code:
from random import choice
from numpy import array, dot, random
def unit_step(x): return 0 if x < 0.5 else 1
training_data = [
(array([0, 0, 1]), 0),
(array([0, 1, 1]), 1),
(array([1, 0, 1]), 1),
(array([1, 1, 1]), 1),
]
w = random.rand(3)
errors = []
n = 100
try:
xrange
except NameError:
xrange = range
for i in xrange(n):
x, expected = choice(training_data)
result = dot(w, x)
error = expected - unit_step(result)
errors.append(error)
w += error * x
for x, _ in training_data:
result = dot(x, w)
print("{}: {} -> {}".format(x[:2], result, unit_step(result)))
| 19.694444
| 80
| 0.600846
|
4a17b0646716da0295ed8aab0c6e2e2ef68814fc
| 11,146
|
py
|
Python
|
fairseq/tasks/multitask_translation.py
|
wirehack/multitask_transformer
|
a973e3cd43bc7d2dbcda5ffa4b1eafcaba936afe
|
[
"BSD-3-Clause"
] | 1
|
2019-10-26T16:29:10.000Z
|
2019-10-26T16:29:10.000Z
|
fairseq/tasks/multitask_translation.py
|
wirehack/multitask_transformer
|
a973e3cd43bc7d2dbcda5ffa4b1eafcaba936afe
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/tasks/multitask_translation.py
|
wirehack/multitask_transformer
|
a973e3cd43bc7d2dbcda5ffa4b1eafcaba936afe
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import os
import torch
from fairseq import options, utils
from fairseq.data import (
ConcatDataset,
data_utils,
Dictionary,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
LanguageTripleDataset
)
from . import FairseqTask, register_task
@register_task('multitask_translation')
class MultitaskTranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (Dictionary): dictionary for the source language
tgt_dict (Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', nargs='+', help='path(s) to data directorie(s)')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--lazy-load', action='store_true',
help='load the dataset lazily')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
# fmt: on
@staticmethod
def load_pretrained_model(path, src_dict_path, tgt_dict_path, arg_overrides=None):
model = utils.load_checkpoint_to_cpu(path)
args = model['args']
state_dict = model['model']
args = utils.override_model_args(args, arg_overrides)
src_dict = Dictionary.load(src_dict_path)
tgt_dict = Dictionary.load(tgt_dict_path)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
task = MultitaskTranslationTask(args, src_dict, tgt_dict)
model = task.build_model(args)
model.upgrade_state_dict(state_dict)
model.load_state_dict(state_dict, strict=True)
return model
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.translation_datasets = {}
def inference_step(self, generator, models, sample, prefix_tokens=None, bos_token=None,
is_translation=False,
noisy_clean_outs=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token,
is_translation=is_translation,
noisy_clean_outs=noisy_clean_outs
)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(args.data[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(os.path.join(args.data[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(args.data[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
if self.args.raw_text and IndexedRawTextDataset.exists(filename):
return True
elif not self.args.raw_text and IndexedDataset.exists(filename):
return True
return False
def indexed_dataset(path, dictionary):
if self.args.raw_text:
return IndexedRawTextDataset(path, dictionary)
elif IndexedDataset.exists(path):
if self.args.lazy_load:
return IndexedDataset(path, fix_lua_indexing=True)
else:
return IndexedCachedDataset(path, fix_lua_indexing=True)
return None
src_datasets = []
tgt_clean_datasets = []
tgt_translation_datasets = []
data_paths = self.args.data
for dk, data_path in enumerate(data_paths):
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
prefix_noisy = os.path.join(data_path, '{}.noisy-{}-{}.'.format(split_k, src, src))
elif split_exists(split_k, tgt, src, src, data_path):
raise NotImplementedError
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0 or dk > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_datasets.append(indexed_dataset(prefix_noisy + 'noisy-' + src, self.src_dict))
tgt_clean_datasets.append(indexed_dataset(prefix + src, self.src_dict))
tgt_translation_datasets.append(indexed_dataset(prefix + tgt, self.tgt_dict))
print('| {} {} {} examples'.format(data_path, split_k, len(src_datasets[-1])))
if not combine:
break
assert len(src_datasets) == len(tgt_clean_datasets)
assert len(src_datasets) == len(tgt_translation_datasets)
if len(src_datasets) == 1:
src_dataset, tgt_clean_dataset, tgt_trans_dataset = src_datasets[0], \
tgt_clean_datasets[0], \
tgt_translation_datasets[0]
else:
raise NotImplementedError
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = self.args.upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
self.datasets[split] = LanguageTripleDataset(
src_dataset, src_dataset.sizes, self.src_dict,
tgt_clean_dataset, tgt_clean_dataset.sizes, self.src_dict,
tgt_trans_dataset, tgt_trans_dataset.sizes, self.tgt_dict,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions)
def build_generator(self, args):
if args.score_reference:
raise NotImplementedError
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(self.target_dictionary)
else:
from fairseq.multitask_sequence_generator import MultitaskSequenceGenerator
return MultitaskSequenceGenerator(
self.source_dictionary,
self.target_dictionary,
beam_size=args.beam,
max_len_a=args.max_len_a,
max_len_b=args.max_len_b,
min_len=args.min_len,
stop_early=(not args.no_early_stop),
normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen,
unk_penalty=args.unkpen,
sampling=args.sampling,
sampling_topk=args.sampling_topk,
sampling_temperature=args.sampling_temperature,
diverse_beam_groups=args.diverse_beam_groups,
diverse_beam_strength=args.diverse_beam_strength,
match_source_len=args.match_source_len,
no_repeat_ngram_size=args.no_repeat_ngram_size,
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
| 43.36965
| 106
| 0.611251
|
4a17b0e594052fb06783ed0482621e605c13356a
| 10,840
|
py
|
Python
|
abcpy/NN_utilities/algorithms.py
|
anish-lu-yihe/abcpy
|
be58367c4d7e38ee696238e3d8405e8abe2defb7
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-08-24T10:40:55.000Z
|
2021-08-24T10:40:55.000Z
|
abcpy/NN_utilities/algorithms.py
|
anish-lu-yihe/abcpy
|
be58367c4d7e38ee696238e3d8405e8abe2defb7
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
abcpy/NN_utilities/algorithms.py
|
anish-lu-yihe/abcpy
|
be58367c4d7e38ee696238e3d8405e8abe2defb7
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
try:
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset
from abcpy.NN_utilities.datasets import Similarities, SiameseSimilarities, TripletSimilarities, \
ParameterSimulationPairs
from abcpy.NN_utilities.losses import ContrastiveLoss, TripletLoss
from abcpy.NN_utilities.networks import SiameseNet, TripletNet
from abcpy.NN_utilities.trainer import fit
except ImportError:
has_torch = False
else:
has_torch = True
def contrastive_training(samples, similarity_set, embedding_net, cuda, batch_size=16, n_epochs=200,
samples_val=None, similarity_set_val=None, early_stopping=False,
epochs_early_stopping_interval=1,
start_epoch_early_stopping=10, positive_weight=None, load_all_data_GPU=False, margin=1.,
lr=None, optimizer=None, scheduler=None, start_epoch_training=0, use_tqdm=True,
optimizer_kwargs={}, scheduler_kwargs={}, loader_kwargs={}):
""" Implements the algorithm for the contrastive distance learning training of a neural network; need to be
provided with a set of samples and the corresponding similarity matrix"""
# If the dataset is small enough, we can speed up training by loading all on the GPU at beginning, by using
# load_all_data_GPU=True. It may crash if the dataset is too large. Note that in some cases using only CPU may still
# be quicker.
# Do all the setups
# need to use the Similarities and SiameseSimilarities datasets
similarities_dataset = Similarities(samples, similarity_set, "cuda" if cuda and load_all_data_GPU else "cpu")
pairs_dataset = SiameseSimilarities(similarities_dataset, positive_weight=positive_weight)
if (samples_val is None) != (similarity_set_val is None):
raise RuntimeError("val samples and similarity set need to be provided together.")
if samples_val is not None:
similarities_dataset_val = Similarities(samples_val, similarity_set_val,
"cuda" if cuda and load_all_data_GPU else "cpu")
pairs_dataset_val = SiameseSimilarities(similarities_dataset_val, positive_weight=positive_weight)
if cuda:
if load_all_data_GPU:
loader_kwargs_2 = {'num_workers': 0, 'pin_memory': False}
else:
loader_kwargs_2 = {'num_workers': 1, 'pin_memory': True}
else:
loader_kwargs_2 = {}
loader_kwargs.update(loader_kwargs_2)
pairs_train_loader = torch.utils.data.DataLoader(pairs_dataset, batch_size=batch_size, shuffle=True,
**loader_kwargs)
if samples_val is not None:
pairs_train_loader_val = torch.utils.data.DataLoader(pairs_dataset_val, batch_size=batch_size, shuffle=False,
**loader_kwargs)
else:
pairs_train_loader_val = None
model_contrastive = SiameseNet(embedding_net)
if cuda:
model_contrastive.cuda()
loss_fn = ContrastiveLoss(margin)
if lr is None:
lr = 1e-3
if optimizer is None: # default value
optimizer = optim.Adam(embedding_net.parameters(), lr=lr, **optimizer_kwargs)
else:
optimizer = optimizer(embedding_net.parameters(), lr=lr, **optimizer_kwargs)
if scheduler is None: # default value, i.e. a dummy scheduler
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=1, last_epoch=-1)
else:
scheduler = scheduler(optimizer, **scheduler_kwargs)
# now train:
fit(pairs_train_loader, model_contrastive, loss_fn, optimizer, scheduler, n_epochs, cuda,
val_loader=pairs_train_loader_val,
early_stopping=early_stopping, start_epoch_early_stopping=start_epoch_early_stopping,
epochs_early_stopping_interval=epochs_early_stopping_interval, start_epoch_training=start_epoch_training,
use_tqdm=use_tqdm)
return embedding_net
def triplet_training(samples, similarity_set, embedding_net, cuda, batch_size=16, n_epochs=400,
samples_val=None, similarity_set_val=None, early_stopping=False, epochs_early_stopping_interval=1,
start_epoch_early_stopping=10,
load_all_data_GPU=False, margin=1., lr=None, optimizer=None, scheduler=None,
start_epoch_training=0, use_tqdm=True,
optimizer_kwargs={}, scheduler_kwargs={}, loader_kwargs={}):
""" Implements the algorithm for the triplet distance learning training of a neural network; need to be
provided with a set of samples and the corresponding similarity matrix"""
# If the dataset is small enough, we can speed up training by loading all on the GPU at beginning, by using
# load_all_data_GPU=True. It may crash if the dataset is too large. Note that in some cases using only CPU may still
# be quicker.
# Do all the setups
# need to use the Similarities and TripletSimilarities datasets
similarities_dataset = Similarities(samples, similarity_set, "cuda" if cuda and load_all_data_GPU else "cpu")
triplets_dataset = TripletSimilarities(similarities_dataset)
if (samples_val is None) != (similarity_set_val is None):
raise RuntimeError("val samples and similarity set need to be provided together.")
if samples_val is not None:
similarities_dataset_val = Similarities(samples_val, similarity_set_val,
"cuda" if cuda and load_all_data_GPU else "cpu")
triplets_dataset_val = TripletSimilarities(similarities_dataset_val)
if cuda:
if load_all_data_GPU:
loader_kwargs_2 = {'num_workers': 0, 'pin_memory': False}
else:
loader_kwargs_2 = {'num_workers': 1, 'pin_memory': True}
else:
loader_kwargs_2 = {}
loader_kwargs.update(loader_kwargs_2)
triplets_train_loader = torch.utils.data.DataLoader(triplets_dataset, batch_size=batch_size, shuffle=True,
**loader_kwargs)
if samples_val is not None:
triplets_train_loader_val = torch.utils.data.DataLoader(triplets_dataset_val, batch_size=batch_size,
shuffle=False, **loader_kwargs)
else:
triplets_train_loader_val = None
model_triplet = TripletNet(embedding_net)
if cuda:
model_triplet.cuda()
loss_fn = TripletLoss(margin)
if lr is None:
lr = 1e-3
if optimizer is None: # default value
optimizer = optim.Adam(embedding_net.parameters(), lr=lr, **optimizer_kwargs)
else:
optimizer = optimizer(embedding_net.parameters(), lr=lr, **optimizer_kwargs)
if scheduler is None: # default value, i.e. a dummy scheduler
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=1, last_epoch=-1)
else:
scheduler = scheduler(optimizer, **scheduler_kwargs)
# now train:
fit(triplets_train_loader, model_triplet, loss_fn, optimizer, scheduler, n_epochs, cuda,
val_loader=triplets_train_loader_val,
early_stopping=early_stopping, start_epoch_early_stopping=start_epoch_early_stopping,
epochs_early_stopping_interval=epochs_early_stopping_interval, start_epoch_training=start_epoch_training, use_tqdm=use_tqdm)
return embedding_net
def FP_nn_training(samples, target, embedding_net, cuda, batch_size=1, n_epochs=50, samples_val=None, target_val=None,
early_stopping=False, epochs_early_stopping_interval=1, start_epoch_early_stopping=10,
load_all_data_GPU=False,
lr=1e-3, optimizer=None, scheduler=None, start_epoch_training=0, use_tqdm=True, optimizer_kwargs={},
scheduler_kwargs={}, loader_kwargs={}):
""" Implements the algorithm for the training of a neural network based on regressing the values of the parameters
on the corresponding simulation outcomes; it is effectively a training with a mean squared error loss. Needs to be
provided with a set of samples and the corresponding parameters that generated the samples. Note that in this case
the network has to have same output size as the number of parameters, as the learned summary statistic will have the
same dimension as the parameter."""
# If the dataset is small enough, we can speed up training by loading all on the GPU at beginning, by using
# load_all_data_GPU=True. It may crash if the dataset is too large. Note that in some cases using only CPU may still
# be quicker.
# Do all the setups
dataset_FP_nn = ParameterSimulationPairs(samples, target, "cuda" if cuda and load_all_data_GPU else "cpu")
if (samples_val is None) != (target_val is None):
raise RuntimeError("val samples and similarity set need to be provided together.")
if samples_val is not None:
dataset_FP_nn_val = ParameterSimulationPairs(samples_val, target_val,
"cuda" if cuda and load_all_data_GPU else "cpu")
if cuda:
if load_all_data_GPU:
loader_kwargs_2 = {'num_workers': 0, 'pin_memory': False}
else:
loader_kwargs_2 = {'num_workers': 1, 'pin_memory': True}
else:
loader_kwargs_2 = {}
loader_kwargs.update(loader_kwargs_2)
data_loader_FP_nn = torch.utils.data.DataLoader(dataset_FP_nn, batch_size=batch_size, shuffle=True, **loader_kwargs)
if samples_val is not None:
data_loader_FP_nn_val = torch.utils.data.DataLoader(dataset_FP_nn_val, batch_size=batch_size,
shuffle=False, **loader_kwargs)
else:
data_loader_FP_nn_val = None
if cuda:
embedding_net.cuda()
loss_fn = nn.MSELoss(reduction="mean")
if optimizer is None: # default value
optimizer = optim.Adam(embedding_net.parameters(), lr=lr, **optimizer_kwargs)
else:
optimizer = optimizer(embedding_net.parameters(), lr=lr, **optimizer_kwargs)
if scheduler is None: # default value, i.e. a dummy scheduler
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=1, last_epoch=-1)
else:
scheduler = scheduler(optimizer, **scheduler_kwargs)
# now train:
fit(data_loader_FP_nn, embedding_net, loss_fn, optimizer, scheduler, n_epochs, cuda,
val_loader=data_loader_FP_nn_val,
early_stopping=early_stopping, start_epoch_early_stopping=start_epoch_early_stopping,
epochs_early_stopping_interval=epochs_early_stopping_interval, start_epoch_training=start_epoch_training, use_tqdm=use_tqdm)
return embedding_net
| 46.926407
| 132
| 0.692066
|
4a17b14345dad7f81bf976984020256402740f5e
| 851
|
py
|
Python
|
CMPE451/practice_app/recommend/tests.py
|
oztasoi/bachelor-projects
|
a3e3a39efc9fed09db7290227848552f9604befa
|
[
"MIT"
] | null | null | null |
CMPE451/practice_app/recommend/tests.py
|
oztasoi/bachelor-projects
|
a3e3a39efc9fed09db7290227848552f9604befa
|
[
"MIT"
] | null | null | null |
CMPE451/practice_app/recommend/tests.py
|
oztasoi/bachelor-projects
|
a3e3a39efc9fed09db7290227848552f9604befa
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import Customer
from .models import Product
from .models import Similarity
from .models import WillBuy
# Create your tests here.
class CustomerTest(TestCase):
def create_customer(self, user, email):
return Customer.objects.create(user=user, email=email)
def setUp(self):
user = User.objects.create(username='menekse')
email = 'menekse@mail.com'
self.create_customer(user, email)
user = User.objects.create(username='altan')
email = 'altan@mail.com'
self.create_customer(user, email)
def test_customer_creation(self):
self.setUp()
menekse = Customer.objects.get(email='menekse@mail.com')
self.assertTrue(isinstance(menekse, Customer))
self.assertEqual(menekse.__unicode__(), menekse.user.username)
| 32.730769
| 70
| 0.694477
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.