hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73a592611389f9da17baf034519e5d46934b979 | 166 | py | Python | postproc/ml_tools/pod.py | J-Massey/postproc | 4552b0ad79072f5d217cf62632c08617ea3d2d82 | [
"MIT"
] | null | null | null | postproc/ml_tools/pod.py | J-Massey/postproc | 4552b0ad79072f5d217cf62632c08617ea3d2d82 | [
"MIT"
] | null | null | null | postproc/ml_tools/pod.py | J-Massey/postproc | 4552b0ad79072f5d217cf62632c08617ea3d2d82 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Jonathan Massey
@description: Compute the PCA of a flow field
@contact: jmom1n15@soton.ac.uk
"""
from torch import pca_lowrank
| 16.6 | 45 | 0.698795 |
from torch import pca_lowrank
| true | true |
f73a5ac8c2568549eccf9a0d3512b7f99bce3dc8 | 1,471 | py | Python | filter_plugins/passwd.py | liweitianux/ansible-dfly-vps | 46c40aa13c9b5e4174ea6a0ff2b6ebe6afbe1e0c | [
"MIT"
] | 7 | 2018-03-16T03:24:58.000Z | 2022-01-12T02:40:36.000Z | filter_plugins/passwd.py | liweitianux/ansible-dfly-vps | 46c40aa13c9b5e4174ea6a0ff2b6ebe6afbe1e0c | [
"MIT"
] | null | null | null | filter_plugins/passwd.py | liweitianux/ansible-dfly-vps | 46c40aa13c9b5e4174ea6a0ff2b6ebe6afbe1e0c | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Aaron LI <aly@aaronly.me>
# MIT License
"""
Custom Ansible template filters to crypt/hash passwords.
"""
import os
import base64
import crypt
import hashlib
def cryptpass(p):
"""
Crypt the given plaintext password with salted SHA512 scheme,
which is supported by Linux/BSDs.
"""
hashtype = "$6$"
saltlen = 16
salt = os.urandom(saltlen)
salt = base64.b64encode(salt)[:saltlen].decode("utf-8")
return crypt.crypt(p, hashtype+salt)
def dovecot_makepass(p):
"""
Generate the salted hashed password for Dovecot using the
SHA512-CRYPT scheme.
Implement the "doveadm pw -s SHA512-CRYPT" command.
Dovecot password format: {<scheme>}$<type>$<salt>$<hash>
"""
scheme = "SHA512-CRYPT"
cp = cryptpass(p)
return "{%s}%s" % (scheme, cp)
def znc_makepass(p, method="sha256", saltlen=20):
"""
Generate the salted hashed password for ZNC configuration.
Implement the "znc --makepass" command.
ZNC password format: <method>#<hash>#<salt>
"""
salt = os.urandom(saltlen)
salt = base64.b64encode(salt)[:saltlen].decode("utf-8")
s = p + salt
h = getattr(hashlib, method)(s.encode("utf-8"))
return "%s#%s#%s" % (method, h.hexdigest(), salt)
class FilterModule(object):
def filters(self):
return {
"cryptpass": cryptpass,
"dovecot_makepass": dovecot_makepass,
"znc_makepass": znc_makepass,
}
| 23.725806 | 65 | 0.636302 |
import os
import base64
import crypt
import hashlib
def cryptpass(p):
hashtype = "$6$"
saltlen = 16
salt = os.urandom(saltlen)
salt = base64.b64encode(salt)[:saltlen].decode("utf-8")
return crypt.crypt(p, hashtype+salt)
def dovecot_makepass(p):
scheme = "SHA512-CRYPT"
cp = cryptpass(p)
return "{%s}%s" % (scheme, cp)
def znc_makepass(p, method="sha256", saltlen=20):
salt = os.urandom(saltlen)
salt = base64.b64encode(salt)[:saltlen].decode("utf-8")
s = p + salt
h = getattr(hashlib, method)(s.encode("utf-8"))
return "%s#%s#%s" % (method, h.hexdigest(), salt)
class FilterModule(object):
def filters(self):
return {
"cryptpass": cryptpass,
"dovecot_makepass": dovecot_makepass,
"znc_makepass": znc_makepass,
}
| true | true |
f73a5b13697bc3dd4b6ea921813fea8fb784758a | 2,070 | py | Python | magpie/ui/management/__init__.py | Ouranosinc/Magpie | 24c4ec2d3f98eb71fba7b199281882dbc62b6693 | [
"Apache-2.0"
] | null | null | null | magpie/ui/management/__init__.py | Ouranosinc/Magpie | 24c4ec2d3f98eb71fba7b199281882dbc62b6693 | [
"Apache-2.0"
] | 366 | 2017-10-04T14:58:52.000Z | 2022-03-16T21:44:07.000Z | magpie/ui/management/__init__.py | Ouranosinc/Magpie | 24c4ec2d3f98eb71fba7b199281882dbc62b6693 | [
"Apache-2.0"
] | 3 | 2018-09-05T13:46:04.000Z | 2021-02-05T14:52:15.000Z | from pyramid.settings import asbool
from magpie.constants import get_constant
from magpie.utils import get_logger
LOGGER = get_logger(__name__)
def includeme(config):
from magpie.ui.management.views import ManagementViews
LOGGER.info("Adding UI management...")
config.add_route(ManagementViews.view_groups.__name__,
"/ui/groups")
config.add_route(ManagementViews.add_group.__name__,
"/ui/groups/add")
config.add_route(ManagementViews.edit_group.__name__,
"/ui/groups/{group_name}/{cur_svc_type}")
config.add_route(ManagementViews.view_users.__name__,
"/ui/users")
config.add_route(ManagementViews.add_user.__name__,
"/ui/users/add")
config.add_route(ManagementViews.edit_user.__name__,
"/ui/users/{user_name}/{cur_svc_type}")
config.add_route(ManagementViews.view_services.__name__,
"/ui/services/{cur_svc_type}")
config.add_route(ManagementViews.add_service.__name__,
"/ui/services/{cur_svc_type}/add")
config.add_route(ManagementViews.edit_service.__name__,
"/ui/services/{cur_svc_type}/{service_name}")
config.add_route(ManagementViews.add_resource.__name__,
"/ui/services/{cur_svc_type}/{service_name}/add/{resource_id}")
register_user_enabled = asbool(get_constant("MAGPIE_USER_REGISTRATION_ENABLED", settings_container=config,
default_value=False, print_missing=True,
raise_missing=False, raise_not_set=False))
if register_user_enabled:
LOGGER.info("Adding UI pending user registration detail page.")
config.add_route("view_pending_user", "/ui/register/users/{user_name}")
config.add_view(ManagementViews, attr="view_pending_user", route_name="view_pending_user",
renderer="magpie.ui.management:templates/view_pending_user.mako")
config.scan()
| 48.139535 | 110 | 0.658937 | from pyramid.settings import asbool
from magpie.constants import get_constant
from magpie.utils import get_logger
LOGGER = get_logger(__name__)
def includeme(config):
from magpie.ui.management.views import ManagementViews
LOGGER.info("Adding UI management...")
config.add_route(ManagementViews.view_groups.__name__,
"/ui/groups")
config.add_route(ManagementViews.add_group.__name__,
"/ui/groups/add")
config.add_route(ManagementViews.edit_group.__name__,
"/ui/groups/{group_name}/{cur_svc_type}")
config.add_route(ManagementViews.view_users.__name__,
"/ui/users")
config.add_route(ManagementViews.add_user.__name__,
"/ui/users/add")
config.add_route(ManagementViews.edit_user.__name__,
"/ui/users/{user_name}/{cur_svc_type}")
config.add_route(ManagementViews.view_services.__name__,
"/ui/services/{cur_svc_type}")
config.add_route(ManagementViews.add_service.__name__,
"/ui/services/{cur_svc_type}/add")
config.add_route(ManagementViews.edit_service.__name__,
"/ui/services/{cur_svc_type}/{service_name}")
config.add_route(ManagementViews.add_resource.__name__,
"/ui/services/{cur_svc_type}/{service_name}/add/{resource_id}")
register_user_enabled = asbool(get_constant("MAGPIE_USER_REGISTRATION_ENABLED", settings_container=config,
default_value=False, print_missing=True,
raise_missing=False, raise_not_set=False))
if register_user_enabled:
LOGGER.info("Adding UI pending user registration detail page.")
config.add_route("view_pending_user", "/ui/register/users/{user_name}")
config.add_view(ManagementViews, attr="view_pending_user", route_name="view_pending_user",
renderer="magpie.ui.management:templates/view_pending_user.mako")
config.scan()
| true | true |
f73a5b4c0823bf2f427d205d1596d0ebdcaea6a8 | 155 | py | Python | report/urls.py | Aimsucks/blue_donut | 4c769e8a9923a120ff5c8b28221b79aca370121d | [
"MIT"
] | 5 | 2020-01-16T20:04:26.000Z | 2020-03-11T23:33:49.000Z | report/urls.py | Aimsucks/blue-donut | 4c769e8a9923a120ff5c8b28221b79aca370121d | [
"MIT"
] | 2 | 2019-12-19T21:07:27.000Z | 2020-02-02T13:51:04.000Z | report/urls.py | Aimsucks/blue_donut | 4c769e8a9923a120ff5c8b28221b79aca370121d | [
"MIT"
] | 9 | 2020-04-12T16:24:53.000Z | 2020-10-30T02:04:57.000Z | from django.urls import path
import report.views
app_name = 'report'
urlpatterns = [
path('', report.views.ReportView.as_view(), name='report'),
]
| 14.090909 | 63 | 0.696774 | from django.urls import path
import report.views
app_name = 'report'
urlpatterns = [
path('', report.views.ReportView.as_view(), name='report'),
]
| true | true |
f73a5c47fa6697de6313de0e177e490c0e35f948 | 4,334 | py | Python | tests/integration/test_MatchApi.py | physicsninja/Riot-Watcher | 2cd51c2cade112d018dd515f282e248940429ea0 | [
"MIT"
] | null | null | null | tests/integration/test_MatchApi.py | physicsninja/Riot-Watcher | 2cd51c2cade112d018dd515f282e248940429ea0 | [
"MIT"
] | null | null | null | tests/integration/test_MatchApi.py | physicsninja/Riot-Watcher | 2cd51c2cade112d018dd515f282e248940429ea0 | [
"MIT"
] | null | null | null |
import sys
import pytest
if sys.version_info > (3, 0):
import unittest.mock as mock
else:
import mock
class MatchApiContext(object):
def __init__(self, mock_context):
self._mock_context = mock_context
self._expected_response = {'has_value': 'yes', }
mock_response = mock.MagicMock()
mock_response.json.return_value = self._expected_response
mock_context.get.return_value = mock_response
@property
def expected_response(self):
return self._expected_response
def __getattr__(self, item):
return getattr(self._mock_context, item)
@pytest.fixture
def match_api_ctx(mock_context):
return MatchApiContext(mock_context)
@pytest.mark.integration
@pytest.mark.parametrize('region', ['br1', 'eun1', 'euw1', 'jp1', 'kr', 'la1', 'la2', 'na', 'na1', 'oc1', 'tr1', 'ru', 'pbe1', ])
class TestMatchApi(object):
@pytest.mark.parametrize('match_id', [12345, 54321, 2, 222222222222222222222])
def test_by_id(self, match_api_ctx, region, match_id):
actual_response = match_api_ctx.watcher.match.by_id(region, match_id)
assert match_api_ctx.expected_response == actual_response
match_api_ctx.get.assert_called_once_with(
'https://{region}.api.riotgames.com/lol/match/v3/matches/{match_id}'.format(
region=region,
match_id=match_id,
),
params={},
headers={'X-Riot-Token': match_api_ctx.api_key},
)
@pytest.mark.parametrize('account_id', [12345, 3333333333333333333])
@pytest.mark.parametrize('queue', [None, (5, 4, 3)])
@pytest.mark.parametrize(
'begin_end',
[
((None, None), (None, None)),
((1234, 4321), (None, None)),
((None, None), (1234, 4321)),
]
)
@pytest.mark.parametrize('season', [None, (1, 11)])
@pytest.mark.parametrize('champion', [None, (90, 43, 12)])
def test_matchlist_by_account(
self,
match_api_ctx,
region,
account_id,
queue,
begin_end,
season,
champion,
):
begin_end_time, begin_end_index = begin_end
begin_time, end_time = begin_end_time
begin_index, end_index = begin_end_index
actual_response = match_api_ctx.watcher.match.matchlist_by_account(
region,
account_id,
queue=queue,
begin_time=begin_time,
end_time=end_time,
begin_index=begin_index,
end_index=end_index,
season=season,
champion=champion,
)
assert match_api_ctx.expected_response == actual_response
expected_params = {}
if queue is not None:
expected_params['queue'] = queue
if begin_time is not None:
expected_params['beginTime'] = begin_time
if end_time is not None:
expected_params['endTime'] = end_time
if begin_index is not None:
expected_params['beginIndex'] = begin_index
if end_index is not None:
expected_params['endIndex'] = end_index
if season is not None:
expected_params['season'] = season
if champion is not None:
expected_params['champion'] = champion
match_api_ctx.get.assert_called_once_with(
'https://{region}.api.riotgames.com/lol/match/v3/matchlists/by-account/{account_id}'.format(
region=region,
account_id=account_id,
),
params=expected_params,
headers={'X-Riot-Token': match_api_ctx.api_key},
)
@pytest.mark.parametrize('match_id', [0, 54321, 3232323232323223])
def test_timeline_by_match(self, match_api_ctx, region, match_id):
actual_response = match_api_ctx.watcher.match.timeline_by_match(
region,
match_id,
)
assert match_api_ctx.expected_response == actual_response
match_api_ctx.get.assert_called_once_with(
'https://{region}.api.riotgames.com/lol/match/v3/timelines/by-match/{match_id}'.format(
region=region,
match_id=match_id,
),
params={},
headers={'X-Riot-Token': match_api_ctx.api_key},
)
| 32.586466 | 129 | 0.608214 |
import sys
import pytest
if sys.version_info > (3, 0):
import unittest.mock as mock
else:
import mock
class MatchApiContext(object):
def __init__(self, mock_context):
self._mock_context = mock_context
self._expected_response = {'has_value': 'yes', }
mock_response = mock.MagicMock()
mock_response.json.return_value = self._expected_response
mock_context.get.return_value = mock_response
@property
def expected_response(self):
return self._expected_response
def __getattr__(self, item):
return getattr(self._mock_context, item)
@pytest.fixture
def match_api_ctx(mock_context):
return MatchApiContext(mock_context)
@pytest.mark.integration
@pytest.mark.parametrize('region', ['br1', 'eun1', 'euw1', 'jp1', 'kr', 'la1', 'la2', 'na', 'na1', 'oc1', 'tr1', 'ru', 'pbe1', ])
class TestMatchApi(object):
@pytest.mark.parametrize('match_id', [12345, 54321, 2, 222222222222222222222])
def test_by_id(self, match_api_ctx, region, match_id):
actual_response = match_api_ctx.watcher.match.by_id(region, match_id)
assert match_api_ctx.expected_response == actual_response
match_api_ctx.get.assert_called_once_with(
'https://{region}.api.riotgames.com/lol/match/v3/matches/{match_id}'.format(
region=region,
match_id=match_id,
),
params={},
headers={'X-Riot-Token': match_api_ctx.api_key},
)
@pytest.mark.parametrize('account_id', [12345, 3333333333333333333])
@pytest.mark.parametrize('queue', [None, (5, 4, 3)])
@pytest.mark.parametrize(
'begin_end',
[
((None, None), (None, None)),
((1234, 4321), (None, None)),
((None, None), (1234, 4321)),
]
)
@pytest.mark.parametrize('season', [None, (1, 11)])
@pytest.mark.parametrize('champion', [None, (90, 43, 12)])
def test_matchlist_by_account(
self,
match_api_ctx,
region,
account_id,
queue,
begin_end,
season,
champion,
):
begin_end_time, begin_end_index = begin_end
begin_time, end_time = begin_end_time
begin_index, end_index = begin_end_index
actual_response = match_api_ctx.watcher.match.matchlist_by_account(
region,
account_id,
queue=queue,
begin_time=begin_time,
end_time=end_time,
begin_index=begin_index,
end_index=end_index,
season=season,
champion=champion,
)
assert match_api_ctx.expected_response == actual_response
expected_params = {}
if queue is not None:
expected_params['queue'] = queue
if begin_time is not None:
expected_params['beginTime'] = begin_time
if end_time is not None:
expected_params['endTime'] = end_time
if begin_index is not None:
expected_params['beginIndex'] = begin_index
if end_index is not None:
expected_params['endIndex'] = end_index
if season is not None:
expected_params['season'] = season
if champion is not None:
expected_params['champion'] = champion
match_api_ctx.get.assert_called_once_with(
'https://{region}.api.riotgames.com/lol/match/v3/matchlists/by-account/{account_id}'.format(
region=region,
account_id=account_id,
),
params=expected_params,
headers={'X-Riot-Token': match_api_ctx.api_key},
)
@pytest.mark.parametrize('match_id', [0, 54321, 3232323232323223])
def test_timeline_by_match(self, match_api_ctx, region, match_id):
actual_response = match_api_ctx.watcher.match.timeline_by_match(
region,
match_id,
)
assert match_api_ctx.expected_response == actual_response
match_api_ctx.get.assert_called_once_with(
'https://{region}.api.riotgames.com/lol/match/v3/timelines/by-match/{match_id}'.format(
region=region,
match_id=match_id,
),
params={},
headers={'X-Riot-Token': match_api_ctx.api_key},
)
| true | true |
f73a5ca147bbcf803a45dddf79634a838fd02abf | 1,084 | py | Python | daily_exercise/680. Valid Palindrome II_medium.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | daily_exercise/680. Valid Palindrome II_medium.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | daily_exercise/680. Valid Palindrome II_medium.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | class Solution:
def validPalindrome(self, s: str) -> bool:
deleted = 0
left, right = 0, len(s) - 1
while left < right:
if s[left] == s[right]:
left += 1
right -= 1
else:
deleted += 1
if deleted >= 2:
return False
checkpoint = (left, right)
left += 1
while left < right:
if s[left] == s[right]:
left += 1
right -= 1
if left >= right:
return True
else:
break
left, right = checkpoint
right -= 1
while left < right:
if s[left] == s[right]:
left += 1
right -= 1
if left >= right:
return True
else:
break
return True | 28.526316 | 46 | 0.306273 | class Solution:
def validPalindrome(self, s: str) -> bool:
deleted = 0
left, right = 0, len(s) - 1
while left < right:
if s[left] == s[right]:
left += 1
right -= 1
else:
deleted += 1
if deleted >= 2:
return False
checkpoint = (left, right)
left += 1
while left < right:
if s[left] == s[right]:
left += 1
right -= 1
if left >= right:
return True
else:
break
left, right = checkpoint
right -= 1
while left < right:
if s[left] == s[right]:
left += 1
right -= 1
if left >= right:
return True
else:
break
return True | true | true |
f73a5d3bf783e3bddeecfc74bc1f4d2ba34e5858 | 8,789 | py | Python | qa/rpc-tests/bip9-softforks.py | mirzaei-ce/core-koobit | 7d24e9c554fec6f3631691f456e9873bc4536fbd | [
"MIT"
] | null | null | null | qa/rpc-tests/bip9-softforks.py | mirzaei-ce/core-koobit | 7d24e9c554fec6f3631691f456e9873bc4536fbd | [
"MIT"
] | null | null | null | qa/rpc-tests/bip9-softforks.py | mirzaei-ce/core-koobit | 7d24e9c554fec6f3631691f456e9873bc4536fbd | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP3, OP_DROP
from binascii import hexlify, unhexlify
import cStringIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(hexlify(tx.serialize()))
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in xrange(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature):
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
stop_nodes(self.nodes)
wait_koobitds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 536870913, self.sequence_lock_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.mtp_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.donothing, self.csv_invalidate)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
'''Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP3, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
'''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan)
'''
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
'''Modify the nLockTime to make it fails once MTP rule is activated
'''
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main() | 39.95 | 110 | 0.664126 |
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP3, OP_DROP
from binascii import hexlify, unhexlify
import cStringIO
import time
import itertools
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start()
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(hexlify(tx.serialize()))
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in xrange(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature):
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
test_blocks = self.generate_blocks(50, activated_version)
test_blocks = self.generate_blocks(20, 4, test_blocks)
test_blocks = self.generate_blocks(50, activated_version, test_blocks)
test_blocks = self.generate_blocks(24, 4, test_blocks)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
test_blocks = self.generate_blocks(58, activated_version)
test_blocks = self.generate_blocks(26, 4, test_blocks)
test_blocks = self.generate_blocks(50, activated_version, test_blocks)
test_blocks = self.generate_blocks(10, 4, test_blocks)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
stop_nodes(self.nodes)
wait_koobitds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start()
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 536870913, self.sequence_lock_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.mtp_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.donothing, self.csv_invalidate)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP3, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main() | true | true |
f73a5dd14d74340a9e453ad6b782940bfd727359 | 4,299 | py | Python | src/model/loss.py | dmitry-vorobiev/kaggle-deepfake-detection-challenge | d8b545e1944342ba25209f1f62d9ca70314ab73a | [
"Apache-2.0"
] | null | null | null | src/model/loss.py | dmitry-vorobiev/kaggle-deepfake-detection-challenge | d8b545e1944342ba25209f1f62d9ca70314ab73a | [
"Apache-2.0"
] | null | null | null | src/model/loss.py | dmitry-vorobiev/kaggle-deepfake-detection-challenge | d8b545e1944342ba25209f1f62d9ca70314ab73a | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn.functional as F
from torch import FloatTensor, LongTensor, Tensor
from typing import Dict, Tuple
from . import ModelOut
from .ops import act, ones, zeros, reshape_as
from torch import nn
Batch = Tuple[FloatTensor, LongTensor]
def activation_loss(x: Tensor, y: LongTensor) -> Tensor:
device = x.device
pos = y.nonzero().reshape(-1)
neg = (y - 1).nonzero().reshape(-1)
x0, x1 = x[neg], x[pos]
n0, n1 = x0.size(0), x1.size(0)
a0_x0 = act(x0, zeros(n0, device))
a1_x0 = act(x0, ones(n0, device))
a1_x1 = act(x1, ones(n1, device))
a0_x1 = act(x1, zeros(n1, device))
neg_loss = (a0_x0 - 1).abs() + a1_x0
pos_loss = (a1_x1 - 1).abs() + a0_x1
return (neg_loss.sum() + pos_loss.sum()) / y.size(0)
def activation_loss_fixed_shape(x: Tensor, y: LongTensor) -> Tensor:
N = y.size(0)
device = x.device
a0 = act(x, zeros(N, device))
a1 = act(x, ones(N, device))
y1 = reshape_as(y, a0)
y0 = 1 - y1
neg_loss = ((a0 - 1) * y0).abs() + a1 * y0
pos_loss = ((a1 - 1) * y1).abs() + a0 * y1
return (neg_loss.sum() + pos_loss.sum()) / y.size(0)
class ForensicTransferLoss(object):
def __init__(self, act_w: int, rec_w: int):
self.act_w = act_w
self.rec_w = rec_w
def __call__(self, model_outs: Tuple[FloatTensor, FloatTensor],
inputs: Batch) -> Dict[str, Tensor]:
h, x_hat = model_outs
x, y = inputs
act_loss = activation_loss(h, y)
rec_loss = F.l1_loss(x_hat, x, reduction='mean')
total_loss = act_loss * self.act_w + rec_loss * self.rec_w
out = dict(
loss=total_loss,
act_loss=act_loss,
rec_loss=rec_loss)
return out
@staticmethod
def keys():
return ['loss', 'act_loss', 'rec_loss']
class TripleLoss(ForensicTransferLoss):
def __init__(self, act_w: int, rec_w: int, bce_w: int):
super(TripleLoss, self).__init__(act_w, rec_w)
self.bce_w = bce_w
def __call__(self, model_outs: ModelOut, inputs: Batch) -> Dict[str, Tensor]:
h, x_hat, y_hat = model_outs
x, y = inputs
out = super().__call__((h, x_hat), inputs)
bce_loss = F.binary_cross_entropy_with_logits(
y_hat.squeeze(1), y.float())
out['loss'] += bce_loss * self.bce_w
out['bce_loss'] = bce_loss
return out
@staticmethod
def keys():
return ['loss', 'act_loss', 'rec_loss', 'bce_loss']
class BCELoss(object):
def __call__(self, model_out: Tuple[Tensor, any], batch: Batch) -> Dict[str, Tensor]:
y_hat = model_out[0]
x, y = batch
bce_loss = F.binary_cross_entropy_with_logits(
y_hat.squeeze(1), y.float())
out = dict(loss=bce_loss)
return out
@staticmethod
def keys():
return ['loss']
# https://github.com/fastai/course-v3/blob/master/nbs/dl2/exp/nb_10b.py
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction == 'mean' else loss.sum() if reduction == 'sum' else loss
# https://github.com/fastai/course-v3/blob/8faeb66c03fc6719c5a6cf4ef5befa79a424f838/nbs/dl2/exp/nb_09.py#L127
def lin_comb(v1, v2, beta): return beta*v1 + (1-beta)*v2
# https://github.com/fastai/course-v3/blob/master/nbs/dl2/exp/nb_10b.py
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, ε: float = 0.1, reduction='mean'):
super().__init__()
self.ε = ε
self.reduction = reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)
nll = F.nll_loss(log_preds, target, reduction=self.reduction)
return lin_comb(loss/c, nll, self.ε)
class SmoothBCELoss(object):
def __init__(self, eps=0.1):
self.func = LabelSmoothingCrossEntropy(eps)
def __call__(self, model_out: Tuple[Tensor, any], batch: Batch) -> Dict[str, Tensor]:
y_hat = model_out[0]
x, y = batch
y_hat = torch.cat([(1 - y_hat), y_hat], dim=1)
bce_loss = self.func(y_hat, y)
out = dict(loss=bce_loss)
return out
@staticmethod
def keys():
return ['loss']
| 29.854167 | 109 | 0.612235 | import torch
import torch.nn.functional as F
from torch import FloatTensor, LongTensor, Tensor
from typing import Dict, Tuple
from . import ModelOut
from .ops import act, ones, zeros, reshape_as
from torch import nn
Batch = Tuple[FloatTensor, LongTensor]
def activation_loss(x: Tensor, y: LongTensor) -> Tensor:
device = x.device
pos = y.nonzero().reshape(-1)
neg = (y - 1).nonzero().reshape(-1)
x0, x1 = x[neg], x[pos]
n0, n1 = x0.size(0), x1.size(0)
a0_x0 = act(x0, zeros(n0, device))
a1_x0 = act(x0, ones(n0, device))
a1_x1 = act(x1, ones(n1, device))
a0_x1 = act(x1, zeros(n1, device))
neg_loss = (a0_x0 - 1).abs() + a1_x0
pos_loss = (a1_x1 - 1).abs() + a0_x1
return (neg_loss.sum() + pos_loss.sum()) / y.size(0)
def activation_loss_fixed_shape(x: Tensor, y: LongTensor) -> Tensor:
N = y.size(0)
device = x.device
a0 = act(x, zeros(N, device))
a1 = act(x, ones(N, device))
y1 = reshape_as(y, a0)
y0 = 1 - y1
neg_loss = ((a0 - 1) * y0).abs() + a1 * y0
pos_loss = ((a1 - 1) * y1).abs() + a0 * y1
return (neg_loss.sum() + pos_loss.sum()) / y.size(0)
class ForensicTransferLoss(object):
def __init__(self, act_w: int, rec_w: int):
self.act_w = act_w
self.rec_w = rec_w
def __call__(self, model_outs: Tuple[FloatTensor, FloatTensor],
inputs: Batch) -> Dict[str, Tensor]:
h, x_hat = model_outs
x, y = inputs
act_loss = activation_loss(h, y)
rec_loss = F.l1_loss(x_hat, x, reduction='mean')
total_loss = act_loss * self.act_w + rec_loss * self.rec_w
out = dict(
loss=total_loss,
act_loss=act_loss,
rec_loss=rec_loss)
return out
@staticmethod
def keys():
return ['loss', 'act_loss', 'rec_loss']
class TripleLoss(ForensicTransferLoss):
def __init__(self, act_w: int, rec_w: int, bce_w: int):
super(TripleLoss, self).__init__(act_w, rec_w)
self.bce_w = bce_w
def __call__(self, model_outs: ModelOut, inputs: Batch) -> Dict[str, Tensor]:
h, x_hat, y_hat = model_outs
x, y = inputs
out = super().__call__((h, x_hat), inputs)
bce_loss = F.binary_cross_entropy_with_logits(
y_hat.squeeze(1), y.float())
out['loss'] += bce_loss * self.bce_w
out['bce_loss'] = bce_loss
return out
@staticmethod
def keys():
return ['loss', 'act_loss', 'rec_loss', 'bce_loss']
class BCELoss(object):
def __call__(self, model_out: Tuple[Tensor, any], batch: Batch) -> Dict[str, Tensor]:
y_hat = model_out[0]
x, y = batch
bce_loss = F.binary_cross_entropy_with_logits(
y_hat.squeeze(1), y.float())
out = dict(loss=bce_loss)
return out
@staticmethod
def keys():
return ['loss']
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction == 'mean' else loss.sum() if reduction == 'sum' else loss
lin_comb(v1, v2, beta): return beta*v1 + (1-beta)*v2
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, ε: float = 0.1, reduction='mean'):
super().__init__()
self.ε = ε
self.reduction = reduction
def forward(self, output, target):
c = output.size()[-1]
log_preds = F.log_softmax(output, dim=-1)
loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)
nll = F.nll_loss(log_preds, target, reduction=self.reduction)
return lin_comb(loss/c, nll, self.ε)
class SmoothBCELoss(object):
def __init__(self, eps=0.1):
self.func = LabelSmoothingCrossEntropy(eps)
def __call__(self, model_out: Tuple[Tensor, any], batch: Batch) -> Dict[str, Tensor]:
y_hat = model_out[0]
x, y = batch
y_hat = torch.cat([(1 - y_hat), y_hat], dim=1)
bce_loss = self.func(y_hat, y)
out = dict(loss=bce_loss)
return out
@staticmethod
def keys():
return ['loss']
| true | true |
f73a5ded717718aafa2af0d5807b1df9f14493ab | 487 | py | Python | rdmo/tasks/migrations/0003_meta.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 77 | 2016-08-09T11:40:20.000Z | 2022-03-06T11:03:26.000Z | rdmo/tasks/migrations/0003_meta.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 377 | 2016-07-01T13:59:36.000Z | 2022-03-30T13:53:19.000Z | rdmo/tasks/migrations/0003_meta.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 47 | 2016-06-23T11:32:19.000Z | 2022-03-01T11:34:37.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-07-26 15:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_many_to_many_for_conditions'),
]
operations = [
migrations.AlterField(
model_name='task',
name='conditions',
field=models.ManyToManyField(blank=True, to='conditions.Condition'),
),
]
| 23.190476 | 80 | 0.634497 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_many_to_many_for_conditions'),
]
operations = [
migrations.AlterField(
model_name='task',
name='conditions',
field=models.ManyToManyField(blank=True, to='conditions.Condition'),
),
]
| true | true |
f73a5e1ac1c2a827569dfec324b74ae376952bd3 | 2,573 | py | Python | setup.py | marvin-manga/wtforms-alchemy | b80a2839f95cab7a60319c6237fd1b52025b76cc | [
"BSD-3-Clause"
] | null | null | null | setup.py | marvin-manga/wtforms-alchemy | b80a2839f95cab7a60319c6237fd1b52025b76cc | [
"BSD-3-Clause"
] | null | null | null | setup.py | marvin-manga/wtforms-alchemy | b80a2839f95cab7a60319c6237fd1b52025b76cc | [
"BSD-3-Clause"
] | null | null | null | """
WTForms-Alchemy
---------------
Generates WTForms forms from SQLAlchemy models.
"""
from setuptools import setup
import os
import re
import sys
HERE = os.path.dirname(os.path.abspath(__file__))
PY3 = sys.version_info[0] == 3
def get_version():
filename = os.path.join(HERE, 'wtforms_alchemy', '__init__.py')
with open(filename) as f:
contents = f.read()
pattern = r"^__version__ = '(.*?)'$"
return re.search(pattern, contents, re.MULTILINE).group(1)
extras_require = {
'test': [
'pytest>=2.3',
'Pygments>=1.2',
'Jinja2>=2.3',
'docutils>=0.10',
'flake8>=2.4.0',
'flexmock>=0.9.7',
'isort>=3.9.6',
'natsort==3.5.6',
'WTForms-Test>=0.1.1'
],
'babel': ['Babel>=1.3'],
'arrow': ['arrow>=0.3.4'],
'phone': ['phonenumbers>=5.9.2'],
'intervals': ['intervals>=0.2.0'],
'password': ['passlib >= 1.6, < 2.0'],
'color': ['colour>=0.0.4'],
'i18n': ['SQLAlchemy-i18n >= 0.8.2'],
'ipaddress': ['ipaddr'] if not PY3 else [],
'timezone': ['python-dateutil']
}
# Add all optional dependencies to testing requirements.
for name, requirements in extras_require.items():
if name != 'test':
extras_require['test'] += requirements
setup(
name='WTForms-Alchemy',
version=get_version(),
url='https://github.com/kvesteri/wtforms-alchemy',
license='BSD',
author='Konsta Vesterinen',
author_email='konsta@fastmonkeys.com',
description='Generates WTForms forms from SQLAlchemy models.',
long_description=__doc__,
packages=['wtforms_alchemy'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'SQLAlchemy>=1.0',
'WTForms>=1.0.4',
'WTForms-Components>=0.9.2',
'SQLAlchemy-Utils>=0.32.6',
'six>=1.4.1',
],
extras_require=extras_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 27.666667 | 70 | 0.586475 |
from setuptools import setup
import os
import re
import sys
HERE = os.path.dirname(os.path.abspath(__file__))
PY3 = sys.version_info[0] == 3
def get_version():
filename = os.path.join(HERE, 'wtforms_alchemy', '__init__.py')
with open(filename) as f:
contents = f.read()
pattern = r"^__version__ = '(.*?)'$"
return re.search(pattern, contents, re.MULTILINE).group(1)
extras_require = {
'test': [
'pytest>=2.3',
'Pygments>=1.2',
'Jinja2>=2.3',
'docutils>=0.10',
'flake8>=2.4.0',
'flexmock>=0.9.7',
'isort>=3.9.6',
'natsort==3.5.6',
'WTForms-Test>=0.1.1'
],
'babel': ['Babel>=1.3'],
'arrow': ['arrow>=0.3.4'],
'phone': ['phonenumbers>=5.9.2'],
'intervals': ['intervals>=0.2.0'],
'password': ['passlib >= 1.6, < 2.0'],
'color': ['colour>=0.0.4'],
'i18n': ['SQLAlchemy-i18n >= 0.8.2'],
'ipaddress': ['ipaddr'] if not PY3 else [],
'timezone': ['python-dateutil']
}
for name, requirements in extras_require.items():
if name != 'test':
extras_require['test'] += requirements
setup(
name='WTForms-Alchemy',
version=get_version(),
url='https://github.com/kvesteri/wtforms-alchemy',
license='BSD',
author='Konsta Vesterinen',
author_email='konsta@fastmonkeys.com',
description='Generates WTForms forms from SQLAlchemy models.',
long_description=__doc__,
packages=['wtforms_alchemy'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'SQLAlchemy>=1.0',
'WTForms>=1.0.4',
'WTForms-Components>=0.9.2',
'SQLAlchemy-Utils>=0.32.6',
'six>=1.4.1',
],
extras_require=extras_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| true | true |
f73a5e66d774d6e8d3e14d76de95382d308e58bb | 3,464 | py | Python | common.py | colspan/hokkaido-opendata-extractor | e08eae8de02a0b421829556b6b82a893021674de | [
"MIT"
] | null | null | null | common.py | colspan/hokkaido-opendata-extractor | e08eae8de02a0b421829556b6b82a893021674de | [
"MIT"
] | null | null | null | common.py | colspan/hokkaido-opendata-extractor | e08eae8de02a0b421829556b6b82a893021674de | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import luigi
import requests
class downloader(luigi.Task):
filepath = luigi.Parameter()
url = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.filepath, format=luigi.format.Nop)
def run(self):
r = requests.get(self.url)
with self.output().open("wb") as f:
f.write(r.content)
# https://github.com/dilshod/xlsx2csv
# https://github.com/hempalex/xls2csv
import external.xls2csv as xls2csv
import xlsx2csv
def excel2csv(infile, outfile, filetype, sheetid=None, sheetname=None):
if filetype == "xlsx":
parser = xlsx2csv.Xlsx2csv(infile)
if sheetid is None and sheetname is not None:
sheetid = parser.getSheetIdByName(sheetname)
parser.convert(outfile, sheetid=sheetid)
elif filetype == "xls":
xls2csv.xls2csv(
infile, outfile, sheetid=sheetid, sheetname=sheetname, encoding="cp932"
)
import csv
import re
import param
def extractCommuneStat(infilepath, outfilepath, csv_title):
rows = []
col_nums = []
row_num = 0
with open(infilepath, "r") as f:
reader = csv.reader(f)
for row in reader:
col_nums.append(len(row))
rows.append(row)
row_num += 1
commune_nums = []
first_row_nums = []
null_nums = []
for col in range(col_nums[0]):
commune_count = 0
first_row_num = -1
null_count = 0
for i, row in enumerate(rows):
text = row[col].replace(" ", "").replace(" ", "")
if text == "":
null_count += 1
if text in param.communes:
commune_count += 1
if first_row_num == -1:
first_row_num = i
commune_nums.append(commune_count)
first_row_nums.append(first_row_num)
null_nums.append(null_count)
for i, x in enumerate(reversed([x != row_num for x in null_nums])):
if x:
end_col_num = len(null_nums) - i
break
# 市町村名カラム
begin_col_num = commune_nums.index(max(commune_nums))
[x != row_num for x in null_nums]
new_rows = []
for row in rows:
text = row[begin_col_num] = row[begin_col_num].replace(" ", "").replace(" ", "")
if not text in param.communes:
# 市町村名がなかったら飛ばす
continue
new_rows.append(
[re.sub("^-+$", "0", x) for x in row[begin_col_num:end_col_num]]
)
# 外部からのカラム名の代入を受け付ける TODO
# カラム名候補の文字列を抽出する
col_names = {}
re_numeric = re.compile("-*[0-9]+(\.[0-9]+)*")
for col_i in range(begin_col_num + 1, end_col_num):
tmp = []
for row_i, row in enumerate(rows):
if row_i > first_row_nums[begin_col_num]:
break
if row[col_i] != "" and not re_numeric.match(row[col_i]):
output_str = re.sub(" +", "", row[col_i].replace(" ", " "))
tmp.append(output_str)
col_names[col_i] = "{:02d}_{}".format(
col_i, "".join(tmp).replace("北海道", "").replace("\n", "")
)
col_names[0] = csv_title
# 出力
writer = csv.writer(open(outfilepath, "w"))
writer.writerow(col_names.values())
for row in new_rows:
writer.writerow(row)
class SimpleTask(luigi.WrapperTask):
completed = False
def complete(self):
return self.completed
def on_success(self):
self.completed = True
| 29.355932 | 88 | 0.577656 |
import luigi
import requests
class downloader(luigi.Task):
filepath = luigi.Parameter()
url = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.filepath, format=luigi.format.Nop)
def run(self):
r = requests.get(self.url)
with self.output().open("wb") as f:
f.write(r.content)
import external.xls2csv as xls2csv
import xlsx2csv
def excel2csv(infile, outfile, filetype, sheetid=None, sheetname=None):
if filetype == "xlsx":
parser = xlsx2csv.Xlsx2csv(infile)
if sheetid is None and sheetname is not None:
sheetid = parser.getSheetIdByName(sheetname)
parser.convert(outfile, sheetid=sheetid)
elif filetype == "xls":
xls2csv.xls2csv(
infile, outfile, sheetid=sheetid, sheetname=sheetname, encoding="cp932"
)
import csv
import re
import param
def extractCommuneStat(infilepath, outfilepath, csv_title):
rows = []
col_nums = []
row_num = 0
with open(infilepath, "r") as f:
reader = csv.reader(f)
for row in reader:
col_nums.append(len(row))
rows.append(row)
row_num += 1
commune_nums = []
first_row_nums = []
null_nums = []
for col in range(col_nums[0]):
commune_count = 0
first_row_num = -1
null_count = 0
for i, row in enumerate(rows):
text = row[col].replace(" ", "").replace(" ", "")
if text == "":
null_count += 1
if text in param.communes:
commune_count += 1
if first_row_num == -1:
first_row_num = i
commune_nums.append(commune_count)
first_row_nums.append(first_row_num)
null_nums.append(null_count)
for i, x in enumerate(reversed([x != row_num for x in null_nums])):
if x:
end_col_num = len(null_nums) - i
break
begin_col_num = commune_nums.index(max(commune_nums))
[x != row_num for x in null_nums]
new_rows = []
for row in rows:
text = row[begin_col_num] = row[begin_col_num].replace(" ", "").replace(" ", "")
if not text in param.communes:
continue
new_rows.append(
[re.sub("^-+$", "0", x) for x in row[begin_col_num:end_col_num]]
)
col_names = {}
re_numeric = re.compile("-*[0-9]+(\.[0-9]+)*")
for col_i in range(begin_col_num + 1, end_col_num):
tmp = []
for row_i, row in enumerate(rows):
if row_i > first_row_nums[begin_col_num]:
break
if row[col_i] != "" and not re_numeric.match(row[col_i]):
output_str = re.sub(" +", "", row[col_i].replace(" ", " "))
tmp.append(output_str)
col_names[col_i] = "{:02d}_{}".format(
col_i, "".join(tmp).replace("北海道", "").replace("\n", "")
)
col_names[0] = csv_title
writer = csv.writer(open(outfilepath, "w"))
writer.writerow(col_names.values())
for row in new_rows:
writer.writerow(row)
class SimpleTask(luigi.WrapperTask):
completed = False
def complete(self):
return self.completed
def on_success(self):
self.completed = True
| true | true |
f73a5e6f9a94f4764b7e43763cbb76786aa2260c | 921 | py | Python | basic_test.py | lzhyu/GridWorld | 43669fb1dd01df1a94bc8671d4ee6a466f6f49d0 | [
"MIT"
] | null | null | null | basic_test.py | lzhyu/GridWorld | 43669fb1dd01df1a94bc8671d4ee6a466f6f49d0 | [
"MIT"
] | null | null | null | basic_test.py | lzhyu/GridWorld | 43669fb1dd01df1a94bc8671d4ee6a466f6f49d0 | [
"MIT"
] | 2 | 2021-03-21T06:05:04.000Z | 2021-09-22T06:51:55.000Z | # to run basic test for all envs
import inspect
from gridworld.utils.wrapper.wrappers import ImageInputWarpper
from gridworld.utils.test_util import *
import gridworld
import os
import importlib
from gridworld.envs.fourrooms import FourroomsBase
path = os.path.dirname(__file__)
envs_path = os.path.join(path, 'gridworld', 'envs')
envfiles = [ f for f in os.listdir(envs_path) if os.path.isfile(os.path.join(envs_path,f))]
envfiles = [f.split('.')[0] for f in envfiles if f.endswith('.py')]
for f in envfiles:
module = importlib.import_module('gridworld.envs.'+ f )
for name, obj in inspect.getmembers(module, inspect.isclass):
try:
if isinstance(obj(), FourroomsBase):
env = ImageInputWarpper(obj())
check_render(env)
check_run(env)
print(f"basic check for {name} finished")
except Exception as e:
pass
| 31.758621 | 91 | 0.67101 |
import inspect
from gridworld.utils.wrapper.wrappers import ImageInputWarpper
from gridworld.utils.test_util import *
import gridworld
import os
import importlib
from gridworld.envs.fourrooms import FourroomsBase
path = os.path.dirname(__file__)
envs_path = os.path.join(path, 'gridworld', 'envs')
envfiles = [ f for f in os.listdir(envs_path) if os.path.isfile(os.path.join(envs_path,f))]
envfiles = [f.split('.')[0] for f in envfiles if f.endswith('.py')]
for f in envfiles:
module = importlib.import_module('gridworld.envs.'+ f )
for name, obj in inspect.getmembers(module, inspect.isclass):
try:
if isinstance(obj(), FourroomsBase):
env = ImageInputWarpper(obj())
check_render(env)
check_run(env)
print(f"basic check for {name} finished")
except Exception as e:
pass
| true | true |
f73a5f9b6f94a686b2f7b30de9d2ab26fb9aeb2a | 3,908 | py | Python | server/core/formgen/FormData.py | legionem/pdfgen | 0bd2b82e7834a78b6cb6066cfe6be98fc0dcba6e | [
"MIT"
] | 1 | 2020-08-23T22:14:27.000Z | 2020-08-23T22:14:27.000Z | server/core/formgen/FormData.py | legionem/pdfgen | 0bd2b82e7834a78b6cb6066cfe6be98fc0dcba6e | [
"MIT"
] | 1 | 2018-10-03T15:01:04.000Z | 2018-10-03T15:01:04.000Z | server/core/formgen/FormData.py | legionem/pdfgen | 0bd2b82e7834a78b6cb6066cfe6be98fc0dcba6e | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
__author__ = 'Aleksandrov Oleg, 4231'
from PDFGenDAOPostgres import PDFGenDAOPostgres
from PDFGenDAOMySQL import PDFGenDAOMySQL
import settings
class FormData:
__dao = None
__qs = []
__small_qr = []
__version = "0.1"
__id_user = "null"
__id_owner = "null"
__id_premise = "null"
__id_meeting = "null"
__fio = "______________________"
__phoneNumber = "______________________"
__city = '__________'
__street = '___________'
__houseNumb = '_____'
__apartment = '_______'
__form = '_____________'
__share = '____________'
__formDate = '_________'
__propertyS = '___________'
__css = ''
def __init__(self, id_user, id_meeting):
# init db type
if settings.DB == "mysql":
self.__dao = PDFGenDAOMySQL()
else:
self.__dao = PDFGenDAOPostgres()
# clear date
self.__small_qr = []
self.__qs = []
# get date
self.__id_meeting = str(id_meeting)
self.__id_user = str(id_user)
qs_small_qr = self.__dao.get_question(id_meeting)
for value in qs_small_qr:
self.__small_qr.append('s' + str(value[2]))
self.__qs.append(str(value[0]) + " " +value[1])
if str(self.__dao.check_premise(self.__id_user)[0][0]) != 'None':
result = self.__dao.get_title(id_meeting, id_user)
self.__fio = result[0][2] + " " + result[0][0] + " " + result[0][1]
self.__city = result[0][3]
self.__street = result[0][4]
self.__houseNumb = result[0][5]
self.__apartment = str(result[0][6])
self.__form = str(result[0][8])
self.__share = str(round(result[0][9] * 100 / result[0][10], 2)) + '%'
self.__formDate = str(result[0][11])
self.__propertyS = str(result[0][12])
self.__id_premise = str(result[0][13])
self.__id_owner = str(result[0][14])
self.__css = self.__dao.get_css(id_meeting)
def get_date(self):
return {
"fio": self.__fio,
"city": self.__city,
"street": self.__street,
"houseNumb": self.__houseNumb,
"apartment": self.__apartment,
"phoneNumber": self.__phoneNumber,
"formSeries": self.__form,
"formDateOfIssue": self.__formDate,
"propertyS": self.__propertyS,
"share": self.__share
}
# версия | 0 или 1| id_user | id_owner| id_premise | id meeting | количество страниц| и номер текущей|
def get_big_qr_code_date(self):
return 'b0|' + self.__get_big_qr_code_date()
def get_big_qr_code_date2(self):
return 'b1|' + self.__get_big_qr_code_date()
def __get_big_qr_code_date(self):
return self.__version.ljust(10, ' ') + '|' \
+ self.__id_user.ljust(10, ' ') + '|' \
+ self.__id_owner.ljust(10, ' ') + '|' \
+ self.__id_premise.ljust(10, ' ') + '|' \
+ self.__id_meeting.ljust(10, ' ')
def get_questions(self):
return self.__qs
def get_small_qr_code_date(self):
return self.__small_qr
def get_css(self):
return self.__css[0]
def end(self):
self.__qs = []
self.__small_qr = []
self.__version = "0.1"
self.__id_user = "null"
self.__id_owner = "null"
self.__id_premise = "null"
self.__id_meeting = "null"
self.__fio = "______________________"
self.__phoneNumber = "______________________"
self.__city = '__________'
self.__street = '___________'
self.__houseNumb = '_____'
self.__apartment = '_______'
self.__form = '_____________'
self.__share = '____________'
self.__formDate = '_________'
self.__propertyS = '___________'
| 32.297521 | 106 | 0.574463 |
__author__ = 'Aleksandrov Oleg, 4231'
from PDFGenDAOPostgres import PDFGenDAOPostgres
from PDFGenDAOMySQL import PDFGenDAOMySQL
import settings
class FormData:
__dao = None
__qs = []
__small_qr = []
__version = "0.1"
__id_user = "null"
__id_owner = "null"
__id_premise = "null"
__id_meeting = "null"
__fio = "______________________"
__phoneNumber = "______________________"
__city = '__________'
__street = '___________'
__houseNumb = '_____'
__apartment = '_______'
__form = '_____________'
__share = '____________'
__formDate = '_________'
__propertyS = '___________'
__css = ''
def __init__(self, id_user, id_meeting):
if settings.DB == "mysql":
self.__dao = PDFGenDAOMySQL()
else:
self.__dao = PDFGenDAOPostgres()
self.__small_qr = []
self.__qs = []
self.__id_meeting = str(id_meeting)
self.__id_user = str(id_user)
qs_small_qr = self.__dao.get_question(id_meeting)
for value in qs_small_qr:
self.__small_qr.append('s' + str(value[2]))
self.__qs.append(str(value[0]) + " " +value[1])
if str(self.__dao.check_premise(self.__id_user)[0][0]) != 'None':
result = self.__dao.get_title(id_meeting, id_user)
self.__fio = result[0][2] + " " + result[0][0] + " " + result[0][1]
self.__city = result[0][3]
self.__street = result[0][4]
self.__houseNumb = result[0][5]
self.__apartment = str(result[0][6])
self.__form = str(result[0][8])
self.__share = str(round(result[0][9] * 100 / result[0][10], 2)) + '%'
self.__formDate = str(result[0][11])
self.__propertyS = str(result[0][12])
self.__id_premise = str(result[0][13])
self.__id_owner = str(result[0][14])
self.__css = self.__dao.get_css(id_meeting)
def get_date(self):
return {
"fio": self.__fio,
"city": self.__city,
"street": self.__street,
"houseNumb": self.__houseNumb,
"apartment": self.__apartment,
"phoneNumber": self.__phoneNumber,
"formSeries": self.__form,
"formDateOfIssue": self.__formDate,
"propertyS": self.__propertyS,
"share": self.__share
}
def get_big_qr_code_date(self):
return 'b0|' + self.__get_big_qr_code_date()
def get_big_qr_code_date2(self):
return 'b1|' + self.__get_big_qr_code_date()
def __get_big_qr_code_date(self):
return self.__version.ljust(10, ' ') + '|' \
+ self.__id_user.ljust(10, ' ') + '|' \
+ self.__id_owner.ljust(10, ' ') + '|' \
+ self.__id_premise.ljust(10, ' ') + '|' \
+ self.__id_meeting.ljust(10, ' ')
def get_questions(self):
return self.__qs
def get_small_qr_code_date(self):
return self.__small_qr
def get_css(self):
return self.__css[0]
def end(self):
self.__qs = []
self.__small_qr = []
self.__version = "0.1"
self.__id_user = "null"
self.__id_owner = "null"
self.__id_premise = "null"
self.__id_meeting = "null"
self.__fio = "______________________"
self.__phoneNumber = "______________________"
self.__city = '__________'
self.__street = '___________'
self.__houseNumb = '_____'
self.__apartment = '_______'
self.__form = '_____________'
self.__share = '____________'
self.__formDate = '_________'
self.__propertyS = '___________'
| true | true |
f73a5fb7698d2bbec96be49335d57d0c3df1411b | 5,877 | py | Python | tensorflow/python/compat/compat.py | srisharaan/tensorflow | c787e6cdbbf57434599a42bbdc5e4d4df98ed045 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/compat/compat.py | srisharaan/tensorflow | c787e6cdbbf57434599a42bbdc5e4d4df98ed045 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/compat/compat.py | srisharaan/tensorflow | c787e6cdbbf57434599a42bbdc5e4d4df98ed045 | [
"Apache-2.0"
] | 1 | 2020-08-28T07:24:37.000Z | 2020-08-28T07:24:37.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 1, 28)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
| 35.191617 | 82 | 0.74766 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 1, 28)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
| true | true |
f73a60e6875aa5553a206b1da9c9f3f341b43114 | 1,304 | py | Python | utils.py | tkipf/gym-gridworld | 807c88373023dc4240e8688e2744ea3dccd560bc | [
"MIT"
] | 13 | 2019-06-12T15:16:44.000Z | 2021-05-23T07:04:34.000Z | utils.py | tkipf/gym-gridworld | 807c88373023dc4240e8688e2744ea3dccd560bc | [
"MIT"
] | null | null | null | utils.py | tkipf/gym-gridworld | 807c88373023dc4240e8688e2744ea3dccd560bc | [
"MIT"
] | 1 | 2019-11-07T05:34:26.000Z | 2019-11-07T05:34:26.000Z | """Utility functions."""
import h5py
import numpy as np
from torch.utils import data
def save_dict_h5py(data, fname):
"""Save dictionary containing numpy arrays to h5py file."""
with h5py.File(fname, 'w') as hf:
for key in data.keys():
hf.create_dataset(key, data=data[key])
def load_dict_h5py(fname):
"""Restore dictionary containing numpy arrays from h5py file."""
data = dict()
with h5py.File(fname, 'r') as hf:
for key in hf.keys():
data[key] = hf[key][:]
return data
def to_float(np_array):
"""Convert numpy array to float32."""
return np.array(np_array, dtype=np.float32)
class TrajectoryDataset(data.Dataset):
"""Create dataset of (o_t, a_t) trajectories from replay buffer."""
def __init__(self, hdf5_file):
"""
Args:
hdf5_file (string): Path to the hdf5 file that contains experience
buffer
"""
self.experience_buffer = load_dict_h5py(hdf5_file)
def __len__(self):
return len(self.experience_buffer['actions'])
def __getitem__(self, idx):
sample = {
'obs': to_float(self.experience_buffer['observations'][idx]),
'action': self.experience_buffer['actions'][idx],
}
return sample
| 26.08 | 78 | 0.621166 |
import h5py
import numpy as np
from torch.utils import data
def save_dict_h5py(data, fname):
with h5py.File(fname, 'w') as hf:
for key in data.keys():
hf.create_dataset(key, data=data[key])
def load_dict_h5py(fname):
data = dict()
with h5py.File(fname, 'r') as hf:
for key in hf.keys():
data[key] = hf[key][:]
return data
def to_float(np_array):
return np.array(np_array, dtype=np.float32)
class TrajectoryDataset(data.Dataset):
def __init__(self, hdf5_file):
self.experience_buffer = load_dict_h5py(hdf5_file)
def __len__(self):
return len(self.experience_buffer['actions'])
def __getitem__(self, idx):
sample = {
'obs': to_float(self.experience_buffer['observations'][idx]),
'action': self.experience_buffer['actions'][idx],
}
return sample
| true | true |
f73a615a9787da2bebfe0da449462b75ff706ff1 | 22,548 | py | Python | saleor/graphql/shipping/mutations/shippings.py | victor-abz/saleor | f8e2b49703d995d4304d5a690dbe9c83631419d0 | [
"CC-BY-4.0"
] | 1 | 2022-03-25T00:21:11.000Z | 2022-03-25T00:21:11.000Z | saleor/graphql/shipping/mutations/shippings.py | victor-abz/saleor | f8e2b49703d995d4304d5a690dbe9c83631419d0 | [
"CC-BY-4.0"
] | 8 | 2022-03-21T04:41:54.000Z | 2022-03-28T04:51:59.000Z | saleor/graphql/shipping/mutations/shippings.py | victor-abz/saleor | f8e2b49703d995d4304d5a690dbe9c83631419d0 | [
"CC-BY-4.0"
] | 1 | 2021-12-28T18:02:49.000Z | 2021-12-28T18:02:49.000Z | from collections import defaultdict
import graphene
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from ....core.permissions import ShippingPermissions
from ....core.tracing import traced_atomic_transaction
from ....product import models as product_models
from ....shipping import models
from ....shipping.error_codes import ShippingErrorCode
from ....shipping.tasks import (
drop_invalid_shipping_methods_relations_for_given_channels,
)
from ....shipping.utils import (
default_shipping_zone_exists,
get_countries_without_shipping_zone,
)
from ...channel.types import ChannelContext
from ...core.fields import JSONString
from ...core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation
from ...core.scalars import WeightScalar
from ...core.types.common import ShippingError
from ...product import types as product_types
from ...shipping import types as shipping_types
from ...utils import resolve_global_ids_to_primary_keys
from ...utils.validators import check_for_duplicates
from ..enums import PostalCodeRuleInclusionTypeEnum, ShippingMethodTypeEnum
from ..types import ShippingMethodPostalCodeRule, ShippingMethodType, ShippingZone
class ShippingPostalCodeRulesCreateInputRange(graphene.InputObjectType):
start = graphene.String(
required=True, description="Start range of the postal code."
)
end = graphene.String(required=False, description="End range of the postal code.")
class ShippingPriceInput(graphene.InputObjectType):
name = graphene.String(description="Name of the shipping method.")
description = JSONString(description="Shipping method description.")
minimum_order_weight = WeightScalar(
description="Minimum order weight to use this shipping method."
)
maximum_order_weight = WeightScalar(
description="Maximum order weight to use this shipping method."
)
maximum_delivery_days = graphene.Int(
description="Maximum number of days for delivery."
)
minimum_delivery_days = graphene.Int(
description="Minimal number of days for delivery."
)
type = ShippingMethodTypeEnum(description="Shipping type: price or weight based.")
shipping_zone = graphene.ID(
description="Shipping zone this method belongs to.", name="shippingZone"
)
add_postal_code_rules = graphene.List(
graphene.NonNull(ShippingPostalCodeRulesCreateInputRange),
description="Postal code rules to add.",
)
delete_postal_code_rules = graphene.List(
graphene.NonNull(graphene.ID),
description="Postal code rules to delete.",
)
inclusion_type = PostalCodeRuleInclusionTypeEnum(
description="Inclusion type for currently assigned postal code rules.",
)
class ShippingZoneCreateInput(graphene.InputObjectType):
name = graphene.String(
description="Shipping zone's name. Visible only to the staff."
)
description = graphene.String(description="Description of the shipping zone.")
countries = graphene.List(
graphene.String, description="List of countries in this shipping zone."
)
default = graphene.Boolean(
description=(
"Default shipping zone will be used for countries not covered by other "
"zones."
)
)
add_warehouses = graphene.List(
graphene.ID,
description="List of warehouses to assign to a shipping zone",
)
add_channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channels to assign to the shipping zone.",
)
class ShippingZoneUpdateInput(ShippingZoneCreateInput):
remove_warehouses = graphene.List(
graphene.ID,
description="List of warehouses to unassign from a shipping zone",
)
remove_channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channels to unassign from the shipping zone.",
)
class ShippingZoneMixin:
@classmethod
def clean_input(cls, info, instance, data, input_cls=None):
errors = defaultdict(list)
cls.check_duplicates(
errors, data, "add_warehouses", "remove_warehouses", "warehouses"
)
cls.check_duplicates(
errors, data, "add_channels", "remove_channels", "channels"
)
if errors:
raise ValidationError(errors)
cleaned_input = super().clean_input(info, instance, data)
cleaned_input = cls.clean_default(instance, cleaned_input)
return cleaned_input
@classmethod
def check_duplicates(
cls,
errors: dict,
input_data: dict,
add_field: str,
remove_field: str,
error_class_field: str,
):
"""Check if any items are on both input field.
Raise error if some of items are duplicated.
"""
error = check_for_duplicates(
input_data, add_field, remove_field, error_class_field
)
if error:
error.code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors[error_class_field].append(error)
@classmethod
def clean_default(cls, instance, data):
default = data.get("default")
if default:
if default_shipping_zone_exists(instance.pk):
raise ValidationError(
{
"default": ValidationError(
"Default shipping zone already exists.",
code=ShippingErrorCode.ALREADY_EXISTS.value,
)
}
)
else:
countries = get_countries_without_shipping_zone()
data["countries"].extend([country for country in countries])
else:
data["default"] = False
return data
@classmethod
@traced_atomic_transaction()
def _save_m2m(cls, info, instance, cleaned_data):
super()._save_m2m(info, instance, cleaned_data)
add_warehouses = cleaned_data.get("add_warehouses")
if add_warehouses:
instance.warehouses.add(*add_warehouses)
remove_warehouses = cleaned_data.get("remove_warehouses")
if remove_warehouses:
instance.warehouses.remove(*remove_warehouses)
add_channels = cleaned_data.get("add_channels")
if add_channels:
instance.channels.add(*add_channels)
remove_channels = cleaned_data.get("remove_channels")
if remove_channels:
instance.channels.remove(*remove_channels)
shipping_channel_listings = (
models.ShippingMethodChannelListing.objects.filter(
shipping_method__shipping_zone=instance, channel__in=remove_channels
)
)
shipping_method_ids = list(
shipping_channel_listings.values_list("shipping_method_id", flat=True)
)
shipping_channel_listings.delete()
channel_ids = [channel.id for channel in remove_channels]
drop_invalid_shipping_methods_relations_for_given_channels.delay(
shipping_method_ids, channel_ids
)
class ShippingZoneCreate(ShippingZoneMixin, ModelMutation):
class Arguments:
input = ShippingZoneCreateInput(
description="Fields required to create a shipping zone.", required=True
)
class Meta:
description = "Creates a new shipping zone."
model = models.ShippingZone
object_type = ShippingZone
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def success_response(cls, instance):
instance = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(instance)
return response
class ShippingZoneUpdate(ShippingZoneMixin, ModelMutation):
class Arguments:
id = graphene.ID(description="ID of a shipping zone to update.", required=True)
input = ShippingZoneUpdateInput(
description="Fields required to update a shipping zone.", required=True
)
class Meta:
description = "Updates a new shipping zone."
model = models.ShippingZone
object_type = ShippingZone
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def success_response(cls, instance):
instance = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(instance)
return response
class ShippingZoneDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(required=True, description="ID of a shipping zone to delete.")
class Meta:
description = "Deletes a shipping zone."
model = models.ShippingZone
object_type = ShippingZone
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def success_response(cls, instance):
instance = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(instance)
return response
class ShippingMethodTypeMixin:
@classmethod
def get_type_for_model(cls):
return shipping_types.ShippingMethodType
@classmethod
def get_instance(cls, info, **data):
object_id = data.get("id")
if object_id:
instance = cls.get_node_or_error(
info, object_id, qs=models.ShippingMethod.objects
)
else:
instance = cls._meta.model()
return instance
class ShippingPriceMixin:
@classmethod
def get_type_for_model(cls):
return ShippingMethodType
@classmethod
def clean_input(cls, info, instance, data, input_cls=None):
cleaned_input = super().clean_input(info, instance, data)
errors = {}
cls.clean_weight(cleaned_input, errors)
if (
"minimum_delivery_days" in cleaned_input
or "maximum_delivery_days" in cleaned_input
):
cls.clean_delivery_time(instance, cleaned_input, errors)
if errors:
raise ValidationError(errors)
if cleaned_input.get("delete_postal_code_rules"):
_, postal_code_rules_db_ids = resolve_global_ids_to_primary_keys(
data["delete_postal_code_rules"], ShippingMethodPostalCodeRule
)
cleaned_input["delete_postal_code_rules"] = postal_code_rules_db_ids
if cleaned_input.get("add_postal_code_rules") and not cleaned_input.get(
"inclusion_type"
):
raise ValidationError(
{
"inclusion_type": ValidationError(
"This field is required.",
code=ShippingErrorCode.REQUIRED,
)
}
)
return cleaned_input
@classmethod
def clean_weight(cls, cleaned_input, errors):
min_weight = cleaned_input.get("minimum_order_weight")
max_weight = cleaned_input.get("maximum_order_weight")
if min_weight and min_weight.value < 0:
errors["minimum_order_weight"] = ValidationError(
"Shipping can't have negative weight.",
code=ShippingErrorCode.INVALID,
)
if max_weight and max_weight.value < 0:
errors["maximum_order_weight"] = ValidationError(
"Shipping can't have negative weight.",
code=ShippingErrorCode.INVALID,
)
if errors:
return
if (
min_weight is not None
and max_weight is not None
and max_weight <= min_weight
):
raise ValidationError(
{
"maximum_order_weight": ValidationError(
(
"Maximum order weight should be larger than the "
"minimum order weight."
),
code=ShippingErrorCode.MAX_LESS_THAN_MIN,
)
}
)
@classmethod
def clean_delivery_time(cls, instance, cleaned_input, errors):
"""Validate delivery days.
- check if minimum_delivery_days is not higher than maximum_delivery_days
- check if minimum_delivery_days and maximum_delivery_days are positive values
"""
min_delivery_days = (
cleaned_input.get("minimum_delivery_days") or instance.minimum_delivery_days
)
max_delivery_days = (
cleaned_input.get("maximum_delivery_days") or instance.maximum_delivery_days
)
if not min_delivery_days and not max_delivery_days:
return
error_occurred = False
if min_delivery_days and min_delivery_days < 0:
errors["minimum_delivery_days"] = ValidationError(
"Minimum delivery days must be positive.",
code=ShippingErrorCode.INVALID.value,
)
error_occurred = True
if max_delivery_days and max_delivery_days < 0:
errors["maximum_delivery_days"] = ValidationError(
"Maximum delivery days must be positive.",
code=ShippingErrorCode.INVALID.value,
)
error_occurred = True
if error_occurred:
return
if (
min_delivery_days is not None
and max_delivery_days is not None
and min_delivery_days > max_delivery_days
):
if cleaned_input.get("minimum_delivery_days") is not None:
error_msg = (
"Minimum delivery days should be lower "
"than maximum delivery days."
)
field = "minimum_delivery_days"
else:
error_msg = (
"Maximum delivery days should be higher than "
"minimum delivery days."
)
field = "maximum_delivery_days"
errors[field] = ValidationError(
error_msg, code=ShippingErrorCode.INVALID.value
)
@classmethod
@traced_atomic_transaction()
def save(cls, info, instance, cleaned_input):
super().save(info, instance, cleaned_input)
delete_postal_code_rules = cleaned_input.get("delete_postal_code_rules")
if delete_postal_code_rules:
instance.postal_code_rules.filter(id__in=delete_postal_code_rules).delete()
if cleaned_input.get("add_postal_code_rules"):
inclusion_type = cleaned_input["inclusion_type"]
for postal_code_rule in cleaned_input["add_postal_code_rules"]:
start = postal_code_rule["start"]
end = postal_code_rule.get("end")
try:
instance.postal_code_rules.create(
start=start, end=end, inclusion_type=inclusion_type
)
except IntegrityError:
raise ValidationError(
{
"addPostalCodeRules": ValidationError(
f"Entry start: {start}, end: {end} already exists.",
code=ShippingErrorCode.ALREADY_EXISTS.value,
)
}
)
class ShippingPriceCreate(ShippingPriceMixin, ShippingMethodTypeMixin, ModelMutation):
shipping_zone = graphene.Field(
ShippingZone,
description="A shipping zone to which the shipping method belongs.",
)
shipping_method = graphene.Field(
ShippingMethodType, description="A shipping method to create."
)
class Arguments:
input = ShippingPriceInput(
description="Fields required to create a shipping price.", required=True
)
class Meta:
description = "Creates a new shipping price."
model = models.ShippingMethod
object_type = ShippingMethodType
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
errors_mapping = {"price_amount": "price"}
@classmethod
def success_response(cls, instance):
shipping_method = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(shipping_method)
response.shipping_zone = ChannelContext(
node=instance.shipping_zone, channel_slug=None
)
return response
class ShippingPriceUpdate(ShippingPriceMixin, ShippingMethodTypeMixin, ModelMutation):
shipping_zone = graphene.Field(
ShippingZone,
description="A shipping zone to which the shipping method belongs.",
)
shipping_method = graphene.Field(
ShippingMethodType, description="A shipping method."
)
class Arguments:
id = graphene.ID(description="ID of a shipping price to update.", required=True)
input = ShippingPriceInput(
description="Fields required to update a shipping price.", required=True
)
class Meta:
description = "Updates a new shipping price."
model = models.ShippingMethod
object_type = ShippingMethodType
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
errors_mapping = {"price_amount": "price"}
@classmethod
def success_response(cls, instance):
shipping_method = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(shipping_method)
response.shipping_zone = ChannelContext(
node=instance.shipping_zone, channel_slug=None
)
return response
class ShippingPriceDelete(BaseMutation):
shipping_method = graphene.Field(
ShippingMethodType, description="A shipping method to delete."
)
shipping_zone = graphene.Field(
ShippingZone,
description="A shipping zone to which the shipping method belongs.",
)
class Arguments:
id = graphene.ID(required=True, description="ID of a shipping price to delete.")
class Meta:
description = "Deletes a shipping price."
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
shipping_method = cls.get_node_or_error(
info, data.get("id"), qs=models.ShippingMethod.objects
)
shipping_method_id = shipping_method.id
shipping_zone = shipping_method.shipping_zone
shipping_method.delete()
shipping_method.id = shipping_method_id
return ShippingPriceDelete(
shipping_method=ChannelContext(node=shipping_method, channel_slug=None),
shipping_zone=ChannelContext(node=shipping_zone, channel_slug=None),
)
class ShippingPriceExcludeProductsInput(graphene.InputObjectType):
products = graphene.List(
graphene.ID,
description="List of products which will be excluded.",
required=True,
)
class ShippingPriceExcludeProducts(BaseMutation):
shipping_method = graphene.Field(
ShippingMethodType,
description="A shipping method with new list of excluded products.",
)
class Arguments:
id = graphene.ID(required=True, description="ID of a shipping price.")
input = ShippingPriceExcludeProductsInput(
description="Exclude products input.", required=True
)
class Meta:
description = "Exclude products from shipping price."
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
shipping_method = cls.get_node_or_error(
info, data.get("id"), qs=models.ShippingMethod.objects
)
input = data.get("input")
product_ids = input.get("products", [])
product_db_ids = cls.get_global_ids_or_error(
product_ids, product_types.Product, field="products"
)
product_to_exclude = product_models.Product.objects.filter(
id__in=product_db_ids
)
current_excluded_products = shipping_method.excluded_products.all()
shipping_method.excluded_products.set(
(current_excluded_products | product_to_exclude).distinct()
)
return ShippingPriceExcludeProducts(
shipping_method=ChannelContext(node=shipping_method, channel_slug=None)
)
class ShippingPriceRemoveProductFromExclude(BaseMutation):
shipping_method = graphene.Field(
ShippingMethodType,
description="A shipping method with new list of excluded products.",
)
class Arguments:
id = graphene.ID(required=True, description="ID of a shipping price.")
products = graphene.List(
graphene.ID,
required=True,
description="List of products which will be removed from excluded list.",
)
class Meta:
description = "Remove product from excluded list for shipping price."
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
shipping_method = cls.get_node_or_error(
info, data.get("id"), qs=models.ShippingMethod.objects
)
product_ids = data.get("products")
if product_ids:
product_db_ids = cls.get_global_ids_or_error(
product_ids, product_types.Product, field="products"
)
shipping_method.excluded_products.set(
shipping_method.excluded_products.exclude(id__in=product_db_ids)
)
return ShippingPriceExcludeProducts(
shipping_method=ChannelContext(node=shipping_method, channel_slug=None)
)
| 35.904459 | 88 | 0.646177 | from collections import defaultdict
import graphene
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from ....core.permissions import ShippingPermissions
from ....core.tracing import traced_atomic_transaction
from ....product import models as product_models
from ....shipping import models
from ....shipping.error_codes import ShippingErrorCode
from ....shipping.tasks import (
drop_invalid_shipping_methods_relations_for_given_channels,
)
from ....shipping.utils import (
default_shipping_zone_exists,
get_countries_without_shipping_zone,
)
from ...channel.types import ChannelContext
from ...core.fields import JSONString
from ...core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation
from ...core.scalars import WeightScalar
from ...core.types.common import ShippingError
from ...product import types as product_types
from ...shipping import types as shipping_types
from ...utils import resolve_global_ids_to_primary_keys
from ...utils.validators import check_for_duplicates
from ..enums import PostalCodeRuleInclusionTypeEnum, ShippingMethodTypeEnum
from ..types import ShippingMethodPostalCodeRule, ShippingMethodType, ShippingZone
class ShippingPostalCodeRulesCreateInputRange(graphene.InputObjectType):
start = graphene.String(
required=True, description="Start range of the postal code."
)
end = graphene.String(required=False, description="End range of the postal code.")
class ShippingPriceInput(graphene.InputObjectType):
name = graphene.String(description="Name of the shipping method.")
description = JSONString(description="Shipping method description.")
minimum_order_weight = WeightScalar(
description="Minimum order weight to use this shipping method."
)
maximum_order_weight = WeightScalar(
description="Maximum order weight to use this shipping method."
)
maximum_delivery_days = graphene.Int(
description="Maximum number of days for delivery."
)
minimum_delivery_days = graphene.Int(
description="Minimal number of days for delivery."
)
type = ShippingMethodTypeEnum(description="Shipping type: price or weight based.")
shipping_zone = graphene.ID(
description="Shipping zone this method belongs to.", name="shippingZone"
)
add_postal_code_rules = graphene.List(
graphene.NonNull(ShippingPostalCodeRulesCreateInputRange),
description="Postal code rules to add.",
)
delete_postal_code_rules = graphene.List(
graphene.NonNull(graphene.ID),
description="Postal code rules to delete.",
)
inclusion_type = PostalCodeRuleInclusionTypeEnum(
description="Inclusion type for currently assigned postal code rules.",
)
class ShippingZoneCreateInput(graphene.InputObjectType):
name = graphene.String(
description="Shipping zone's name. Visible only to the staff."
)
description = graphene.String(description="Description of the shipping zone.")
countries = graphene.List(
graphene.String, description="List of countries in this shipping zone."
)
default = graphene.Boolean(
description=(
"Default shipping zone will be used for countries not covered by other "
"zones."
)
)
add_warehouses = graphene.List(
graphene.ID,
description="List of warehouses to assign to a shipping zone",
)
add_channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channels to assign to the shipping zone.",
)
class ShippingZoneUpdateInput(ShippingZoneCreateInput):
remove_warehouses = graphene.List(
graphene.ID,
description="List of warehouses to unassign from a shipping zone",
)
remove_channels = graphene.List(
graphene.NonNull(graphene.ID),
description="List of channels to unassign from the shipping zone.",
)
class ShippingZoneMixin:
@classmethod
def clean_input(cls, info, instance, data, input_cls=None):
errors = defaultdict(list)
cls.check_duplicates(
errors, data, "add_warehouses", "remove_warehouses", "warehouses"
)
cls.check_duplicates(
errors, data, "add_channels", "remove_channels", "channels"
)
if errors:
raise ValidationError(errors)
cleaned_input = super().clean_input(info, instance, data)
cleaned_input = cls.clean_default(instance, cleaned_input)
return cleaned_input
@classmethod
def check_duplicates(
cls,
errors: dict,
input_data: dict,
add_field: str,
remove_field: str,
error_class_field: str,
):
error = check_for_duplicates(
input_data, add_field, remove_field, error_class_field
)
if error:
error.code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors[error_class_field].append(error)
@classmethod
def clean_default(cls, instance, data):
default = data.get("default")
if default:
if default_shipping_zone_exists(instance.pk):
raise ValidationError(
{
"default": ValidationError(
"Default shipping zone already exists.",
code=ShippingErrorCode.ALREADY_EXISTS.value,
)
}
)
else:
countries = get_countries_without_shipping_zone()
data["countries"].extend([country for country in countries])
else:
data["default"] = False
return data
@classmethod
@traced_atomic_transaction()
def _save_m2m(cls, info, instance, cleaned_data):
super()._save_m2m(info, instance, cleaned_data)
add_warehouses = cleaned_data.get("add_warehouses")
if add_warehouses:
instance.warehouses.add(*add_warehouses)
remove_warehouses = cleaned_data.get("remove_warehouses")
if remove_warehouses:
instance.warehouses.remove(*remove_warehouses)
add_channels = cleaned_data.get("add_channels")
if add_channels:
instance.channels.add(*add_channels)
remove_channels = cleaned_data.get("remove_channels")
if remove_channels:
instance.channels.remove(*remove_channels)
shipping_channel_listings = (
models.ShippingMethodChannelListing.objects.filter(
shipping_method__shipping_zone=instance, channel__in=remove_channels
)
)
shipping_method_ids = list(
shipping_channel_listings.values_list("shipping_method_id", flat=True)
)
shipping_channel_listings.delete()
channel_ids = [channel.id for channel in remove_channels]
drop_invalid_shipping_methods_relations_for_given_channels.delay(
shipping_method_ids, channel_ids
)
class ShippingZoneCreate(ShippingZoneMixin, ModelMutation):
class Arguments:
input = ShippingZoneCreateInput(
description="Fields required to create a shipping zone.", required=True
)
class Meta:
description = "Creates a new shipping zone."
model = models.ShippingZone
object_type = ShippingZone
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def success_response(cls, instance):
instance = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(instance)
return response
class ShippingZoneUpdate(ShippingZoneMixin, ModelMutation):
class Arguments:
id = graphene.ID(description="ID of a shipping zone to update.", required=True)
input = ShippingZoneUpdateInput(
description="Fields required to update a shipping zone.", required=True
)
class Meta:
description = "Updates a new shipping zone."
model = models.ShippingZone
object_type = ShippingZone
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def success_response(cls, instance):
instance = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(instance)
return response
class ShippingZoneDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(required=True, description="ID of a shipping zone to delete.")
class Meta:
description = "Deletes a shipping zone."
model = models.ShippingZone
object_type = ShippingZone
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def success_response(cls, instance):
instance = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(instance)
return response
class ShippingMethodTypeMixin:
@classmethod
def get_type_for_model(cls):
return shipping_types.ShippingMethodType
@classmethod
def get_instance(cls, info, **data):
object_id = data.get("id")
if object_id:
instance = cls.get_node_or_error(
info, object_id, qs=models.ShippingMethod.objects
)
else:
instance = cls._meta.model()
return instance
class ShippingPriceMixin:
@classmethod
def get_type_for_model(cls):
return ShippingMethodType
@classmethod
def clean_input(cls, info, instance, data, input_cls=None):
cleaned_input = super().clean_input(info, instance, data)
errors = {}
cls.clean_weight(cleaned_input, errors)
if (
"minimum_delivery_days" in cleaned_input
or "maximum_delivery_days" in cleaned_input
):
cls.clean_delivery_time(instance, cleaned_input, errors)
if errors:
raise ValidationError(errors)
if cleaned_input.get("delete_postal_code_rules"):
_, postal_code_rules_db_ids = resolve_global_ids_to_primary_keys(
data["delete_postal_code_rules"], ShippingMethodPostalCodeRule
)
cleaned_input["delete_postal_code_rules"] = postal_code_rules_db_ids
if cleaned_input.get("add_postal_code_rules") and not cleaned_input.get(
"inclusion_type"
):
raise ValidationError(
{
"inclusion_type": ValidationError(
"This field is required.",
code=ShippingErrorCode.REQUIRED,
)
}
)
return cleaned_input
@classmethod
def clean_weight(cls, cleaned_input, errors):
min_weight = cleaned_input.get("minimum_order_weight")
max_weight = cleaned_input.get("maximum_order_weight")
if min_weight and min_weight.value < 0:
errors["minimum_order_weight"] = ValidationError(
"Shipping can't have negative weight.",
code=ShippingErrorCode.INVALID,
)
if max_weight and max_weight.value < 0:
errors["maximum_order_weight"] = ValidationError(
"Shipping can't have negative weight.",
code=ShippingErrorCode.INVALID,
)
if errors:
return
if (
min_weight is not None
and max_weight is not None
and max_weight <= min_weight
):
raise ValidationError(
{
"maximum_order_weight": ValidationError(
(
"Maximum order weight should be larger than the "
"minimum order weight."
),
code=ShippingErrorCode.MAX_LESS_THAN_MIN,
)
}
)
@classmethod
def clean_delivery_time(cls, instance, cleaned_input, errors):
min_delivery_days = (
cleaned_input.get("minimum_delivery_days") or instance.minimum_delivery_days
)
max_delivery_days = (
cleaned_input.get("maximum_delivery_days") or instance.maximum_delivery_days
)
if not min_delivery_days and not max_delivery_days:
return
error_occurred = False
if min_delivery_days and min_delivery_days < 0:
errors["minimum_delivery_days"] = ValidationError(
"Minimum delivery days must be positive.",
code=ShippingErrorCode.INVALID.value,
)
error_occurred = True
if max_delivery_days and max_delivery_days < 0:
errors["maximum_delivery_days"] = ValidationError(
"Maximum delivery days must be positive.",
code=ShippingErrorCode.INVALID.value,
)
error_occurred = True
if error_occurred:
return
if (
min_delivery_days is not None
and max_delivery_days is not None
and min_delivery_days > max_delivery_days
):
if cleaned_input.get("minimum_delivery_days") is not None:
error_msg = (
"Minimum delivery days should be lower "
"than maximum delivery days."
)
field = "minimum_delivery_days"
else:
error_msg = (
"Maximum delivery days should be higher than "
"minimum delivery days."
)
field = "maximum_delivery_days"
errors[field] = ValidationError(
error_msg, code=ShippingErrorCode.INVALID.value
)
@classmethod
@traced_atomic_transaction()
def save(cls, info, instance, cleaned_input):
super().save(info, instance, cleaned_input)
delete_postal_code_rules = cleaned_input.get("delete_postal_code_rules")
if delete_postal_code_rules:
instance.postal_code_rules.filter(id__in=delete_postal_code_rules).delete()
if cleaned_input.get("add_postal_code_rules"):
inclusion_type = cleaned_input["inclusion_type"]
for postal_code_rule in cleaned_input["add_postal_code_rules"]:
start = postal_code_rule["start"]
end = postal_code_rule.get("end")
try:
instance.postal_code_rules.create(
start=start, end=end, inclusion_type=inclusion_type
)
except IntegrityError:
raise ValidationError(
{
"addPostalCodeRules": ValidationError(
f"Entry start: {start}, end: {end} already exists.",
code=ShippingErrorCode.ALREADY_EXISTS.value,
)
}
)
class ShippingPriceCreate(ShippingPriceMixin, ShippingMethodTypeMixin, ModelMutation):
shipping_zone = graphene.Field(
ShippingZone,
description="A shipping zone to which the shipping method belongs.",
)
shipping_method = graphene.Field(
ShippingMethodType, description="A shipping method to create."
)
class Arguments:
input = ShippingPriceInput(
description="Fields required to create a shipping price.", required=True
)
class Meta:
description = "Creates a new shipping price."
model = models.ShippingMethod
object_type = ShippingMethodType
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
errors_mapping = {"price_amount": "price"}
@classmethod
def success_response(cls, instance):
shipping_method = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(shipping_method)
response.shipping_zone = ChannelContext(
node=instance.shipping_zone, channel_slug=None
)
return response
class ShippingPriceUpdate(ShippingPriceMixin, ShippingMethodTypeMixin, ModelMutation):
shipping_zone = graphene.Field(
ShippingZone,
description="A shipping zone to which the shipping method belongs.",
)
shipping_method = graphene.Field(
ShippingMethodType, description="A shipping method."
)
class Arguments:
id = graphene.ID(description="ID of a shipping price to update.", required=True)
input = ShippingPriceInput(
description="Fields required to update a shipping price.", required=True
)
class Meta:
description = "Updates a new shipping price."
model = models.ShippingMethod
object_type = ShippingMethodType
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
errors_mapping = {"price_amount": "price"}
@classmethod
def success_response(cls, instance):
shipping_method = ChannelContext(node=instance, channel_slug=None)
response = super().success_response(shipping_method)
response.shipping_zone = ChannelContext(
node=instance.shipping_zone, channel_slug=None
)
return response
class ShippingPriceDelete(BaseMutation):
shipping_method = graphene.Field(
ShippingMethodType, description="A shipping method to delete."
)
shipping_zone = graphene.Field(
ShippingZone,
description="A shipping zone to which the shipping method belongs.",
)
class Arguments:
id = graphene.ID(required=True, description="ID of a shipping price to delete.")
class Meta:
description = "Deletes a shipping price."
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
shipping_method = cls.get_node_or_error(
info, data.get("id"), qs=models.ShippingMethod.objects
)
shipping_method_id = shipping_method.id
shipping_zone = shipping_method.shipping_zone
shipping_method.delete()
shipping_method.id = shipping_method_id
return ShippingPriceDelete(
shipping_method=ChannelContext(node=shipping_method, channel_slug=None),
shipping_zone=ChannelContext(node=shipping_zone, channel_slug=None),
)
class ShippingPriceExcludeProductsInput(graphene.InputObjectType):
products = graphene.List(
graphene.ID,
description="List of products which will be excluded.",
required=True,
)
class ShippingPriceExcludeProducts(BaseMutation):
shipping_method = graphene.Field(
ShippingMethodType,
description="A shipping method with new list of excluded products.",
)
class Arguments:
id = graphene.ID(required=True, description="ID of a shipping price.")
input = ShippingPriceExcludeProductsInput(
description="Exclude products input.", required=True
)
class Meta:
description = "Exclude products from shipping price."
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
shipping_method = cls.get_node_or_error(
info, data.get("id"), qs=models.ShippingMethod.objects
)
input = data.get("input")
product_ids = input.get("products", [])
product_db_ids = cls.get_global_ids_or_error(
product_ids, product_types.Product, field="products"
)
product_to_exclude = product_models.Product.objects.filter(
id__in=product_db_ids
)
current_excluded_products = shipping_method.excluded_products.all()
shipping_method.excluded_products.set(
(current_excluded_products | product_to_exclude).distinct()
)
return ShippingPriceExcludeProducts(
shipping_method=ChannelContext(node=shipping_method, channel_slug=None)
)
class ShippingPriceRemoveProductFromExclude(BaseMutation):
shipping_method = graphene.Field(
ShippingMethodType,
description="A shipping method with new list of excluded products.",
)
class Arguments:
id = graphene.ID(required=True, description="ID of a shipping price.")
products = graphene.List(
graphene.ID,
required=True,
description="List of products which will be removed from excluded list.",
)
class Meta:
description = "Remove product from excluded list for shipping price."
permissions = (ShippingPermissions.MANAGE_SHIPPING,)
error_type_class = ShippingError
error_type_field = "shipping_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
shipping_method = cls.get_node_or_error(
info, data.get("id"), qs=models.ShippingMethod.objects
)
product_ids = data.get("products")
if product_ids:
product_db_ids = cls.get_global_ids_or_error(
product_ids, product_types.Product, field="products"
)
shipping_method.excluded_products.set(
shipping_method.excluded_products.exclude(id__in=product_db_ids)
)
return ShippingPriceExcludeProducts(
shipping_method=ChannelContext(node=shipping_method, channel_slug=None)
)
| true | true |
f73a61a0ad6e9b3ccd18054a82008f2e8e8c1a0c | 2,401 | py | Python | ktrain/tests/test_chinese_text.py | sanidhya-singh/ktrain | f91f703e3ecd189c035a532590e6c6ec26a733a3 | [
"MIT"
] | null | null | null | ktrain/tests/test_chinese_text.py | sanidhya-singh/ktrain | f91f703e3ecd189c035a532590e6c6ec26a733a3 | [
"MIT"
] | null | null | null | ktrain/tests/test_chinese_text.py | sanidhya-singh/ktrain | f91f703e3ecd189c035a532590e6c6ec26a733a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Tests of ktrain text classification flows
"""
import sys
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="0"
sys.path.insert(0,'../..')
import IPython
from unittest import TestCase, main, skip
import numpy as np
import ktrain
from ktrain import text as txt
TEST_DOC = '还好,床很大而且很干净,前台很友好,很满意,下次还来。'
class TestTextClassification(TestCase):
def test_fasttext_chinese(self):
trn, val, preproc = txt.texts_from_csv('./text_data/chinese_hotel_reviews.csv',
'content',
label_columns = ["pos", "neg"],
max_features=30000, maxlen=75,
preprocess_mode='standard', sep='|')
model = txt.text_classifier('fasttext', train_data=trn)
learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32)
lr = 5e-3
hist = learner.autofit(lr, 10)
# test training results
self.assertAlmostEqual(max(hist.history['lr']), lr)
self.assertGreater(max(hist.history['val_acc']), 0.85)
# test top losses
obs = learner.top_losses(n=1, val_data=None)
self.assertIn(obs[0][0], list(range(len(val[0]))))
learner.view_top_losses(preproc=preproc, n=1, val_data=None)
# test weight decay
self.assertEqual(len(learner.get_weight_decay()), 2)
self.assertEqual(learner.get_weight_decay()[0], None)
learner.set_weight_decay(1e-4)
self.assertAlmostEqual(learner.get_weight_decay()[0], 1e-4)
# test load and save model
learner.save_model('/tmp/test_model')
learner.load_model('/tmp/test_model')
# test validate
cm = learner.validate()
print(cm)
for i, row in enumerate(cm):
self.assertEqual(np.argmax(row), i)
# test predictor
p = ktrain.get_predictor(learner.model, preproc)
self.assertEqual(p.predict([TEST_DOC])[0], 'pos')
p.save('/tmp/test_predictor')
p = ktrain.load_predictor('/tmp/test_predictor')
self.assertEqual(p.predict(TEST_DOC), 'pos')
self.assertEqual(np.argmax(p.predict_proba([TEST_DOC])[0]), 0)
self.assertEqual(type(p.explain(TEST_DOC)), IPython.core.display.HTML)
#self.assertEqual(type(p.explain(TEST_DOC)), type(None))
if __name__ == "__main__":
main()
| 34.3 | 88 | 0.638484 |
import sys
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="0"
sys.path.insert(0,'../..')
import IPython
from unittest import TestCase, main, skip
import numpy as np
import ktrain
from ktrain import text as txt
TEST_DOC = '还好,床很大而且很干净,前台很友好,很满意,下次还来。'
class TestTextClassification(TestCase):
def test_fasttext_chinese(self):
trn, val, preproc = txt.texts_from_csv('./text_data/chinese_hotel_reviews.csv',
'content',
label_columns = ["pos", "neg"],
max_features=30000, maxlen=75,
preprocess_mode='standard', sep='|')
model = txt.text_classifier('fasttext', train_data=trn)
learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32)
lr = 5e-3
hist = learner.autofit(lr, 10)
self.assertAlmostEqual(max(hist.history['lr']), lr)
self.assertGreater(max(hist.history['val_acc']), 0.85)
obs = learner.top_losses(n=1, val_data=None)
self.assertIn(obs[0][0], list(range(len(val[0]))))
learner.view_top_losses(preproc=preproc, n=1, val_data=None)
self.assertEqual(len(learner.get_weight_decay()), 2)
self.assertEqual(learner.get_weight_decay()[0], None)
learner.set_weight_decay(1e-4)
self.assertAlmostEqual(learner.get_weight_decay()[0], 1e-4)
learner.save_model('/tmp/test_model')
learner.load_model('/tmp/test_model')
cm = learner.validate()
print(cm)
for i, row in enumerate(cm):
self.assertEqual(np.argmax(row), i)
p = ktrain.get_predictor(learner.model, preproc)
self.assertEqual(p.predict([TEST_DOC])[0], 'pos')
p.save('/tmp/test_predictor')
p = ktrain.load_predictor('/tmp/test_predictor')
self.assertEqual(p.predict(TEST_DOC), 'pos')
self.assertEqual(np.argmax(p.predict_proba([TEST_DOC])[0]), 0)
self.assertEqual(type(p.explain(TEST_DOC)), IPython.core.display.HTML)
if __name__ == "__main__":
main()
| true | true |
f73a6273326d909f787929426158b94030c8a19b | 21,479 | py | Python | test/integration/ggrc/converters/test_export_csv.py | ks-manish/ggrc-core | f9499236e0c6d2e29ff9d2acf403fdecd9c8a173 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc/converters/test_export_csv.py | ks-manish/ggrc-core | f9499236e0c6d2e29ff9d2acf403fdecd9c8a173 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc/converters/test_export_csv.py | ks-manish/ggrc-core | f9499236e0c6d2e29ff9d2acf403fdecd9c8a173 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from os.path import abspath, dirname, join
import collections
import ddt
from flask.json import dumps
from ggrc.converters import get_importables
from ggrc.models import inflector, all_models
from ggrc.models.mixins import ScopeObject
from ggrc.models.reflection import AttributeInfo
from integration.ggrc import TestCase
from integration.ggrc.models import factories
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'test_csvs/')
@ddt.ddt
class TestExportEmptyTemplate(TestCase):
"""Tests for export of import templates."""
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_basic_policy_template(self):
data = {
"export_to": "csv",
"objects": [{"object_name": "Policy", "fields": "all"}]
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
def test_multiple_empty_objects(self):
data = {
"export_to": "csv",
"objects": [
{"object_name": "Policy", "fields": "all"},
{"object_name": "Regulation", "fields": "all"},
{"object_name": "Requirement", "fields": "all"},
{"object_name": "OrgGroup", "fields": "all"},
{"object_name": "Contract", "fields": "all"},
],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
self.assertIn("Regulation", response.data)
self.assertIn("Contract", response.data)
self.assertIn("Requirement", response.data)
self.assertIn("Org Group", response.data)
@ddt.data("Assessment", "Issue")
def test_ticket_tracker_field_order(self, model):
"""Tests if Ticket Tracker fields come before mapped objects."""
data = {
"export_to": "csv",
"objects": [
{"object_name": model, "fields": "all"},
],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
ticket_tracker_fields = ["Ticket Tracker", "Component ID",
"Integration Enabled", "Hotlist ID",
"Priority", "Severity", "Issue Title",
"Issue Type"]
first_mapping_field_pos = response.data.find("map:")
for field in ticket_tracker_fields:
self.assertEquals(response.data.find(field) < first_mapping_field_pos,
True)
class TestExportSingleObject(TestCase):
def setUp(self):
super(TestExportSingleObject, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_export_query(self):
"""Test simple export query."""
response = self._import_file("data_for_export_testing_program.csv")
self._check_csv_response(response, {})
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "Cat ipsum 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "~"},
"right": "1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
def test_and_export_query(self):
"""Test export query with AND clause."""
response = self._import_file("data_for_export_testing_program.csv")
self._check_csv_response(response, {})
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": {
"left": "title",
"op": {"name": "!~"},
"right": "2",
},
"op": {"name": "AND"},
"right": {
"left": "title",
"op": {"name": "~"},
"right": "1",
},
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1, 10, 11, 13, 14, 15, 16, 17, 18, 19])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
def test_simple_relevant_query(self):
"""Test simple relevant query"""
self.import_file("data_for_export_testing_program_contract.csv")
data = [{
"object_name": "Program",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Contract",
"slugs": ["contract-25", "contract-40"],
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 13, 14, 16])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
def test_program_audit_relevant_query(self):
response = self._import_file("data_for_export_testing_program_audit.csv")
self._check_csv_response(response, {})
data = [{ # should return just program prog-1
"object_name": "Program",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Audit",
"slugs": ["au-1"],
},
},
"fields": "all",
}, { # Audits : au-1, au-3, au-5, au-7,
"object_name": "Audit",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}]
response = self.export_csv(data)
self.assertIn(",Cat ipsum 1,", response.data)
expected = set([1, 3, 5, 7])
for i in range(1, 14):
if i in expected:
self.assertIn(",Audit {},".format(i), response.data)
else:
self.assertNotIn(",Audit {},".format(i), response.data)
def test_requirement_policy_relevant_query(self):
"""Test requirement policy relevant query"""
response = self._import_file("data_for_export_testing_directives.csv")
self._check_csv_response(response, {})
data = [{ # sec-1
"object_name": "Requirement",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["p1"],
},
},
"fields": "all",
}, { # p3
"object_name": "Policy",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Requirement",
"slugs": ["sec-3"],
},
},
"fields": "all",
}, { # sec-8
"object_name": "Requirement",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Standard",
"slugs": ["std-1"],
},
},
"fields": "all",
}, { # std-3
"object_name": "Standard",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Requirement",
"slugs": ["sec-10"],
},
},
"fields": "all",
}, { # sec-5
"object_name": "Requirement",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Regulation",
"slugs": ["reg-2"],
},
},
"fields": "all",
}, { # reg-1
"object_name": "Regulation",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Requirement",
"slugs": ["sec-4"],
},
},
"fields": "all",
}]
response = self.export_csv(data)
titles = [",mapped section {},".format(i) for i in range(1, 11)]
titles.extend([",mapped reg {},".format(i) for i in range(1, 11)])
titles.extend([",mapped policy {},".format(i) for i in range(1, 11)])
titles.extend([",mapped standard {},".format(i) for i in range(1, 11)])
expected = set([
",mapped section 1,",
",mapped section 5,",
",mapped section 8,",
",mapped reg 1,",
",mapped standard 3,",
",mapped policy 3,",
])
for title in titles:
if title in expected:
self.assertIn(title, response.data, "'{}' not found".format(title))
else:
self.assertNotIn(title, response.data, "'{}' was found".format(title))
def test_multiple_relevant_query(self):
response = self._import_file(
"data_for_export_testing_program_policy_contract.csv")
self._check_csv_response(response, {})
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["policy-3"],
},
"op": {"name": "AND"},
"right": {
"op": {"name": "relevant"},
"object_name": "Contract",
"slugs": ["contract-25", "contract-40"],
},
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1, 2, 4, 8, 10, 11, 13])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
def test_query_all_aliases(self):
def rhs(model, attr):
attr = getattr(model, attr, None)
if attr is not None and hasattr(attr, "_query_clause_element"):
class_name = attr._query_clause_element().type.__class__.__name__
if class_name == "Boolean":
return "1"
return "1/1/2015"
def data(model, attr, field):
return [{
"object_name": model.__name__,
"fields": "all",
"filters": {
"expression": {
"left": field.lower(),
"op": {"name": "="},
"right": rhs(model, attr)
},
}
}]
failed = set()
for model in set(get_importables().values()):
for attr, field in AttributeInfo(model)._aliases.items():
if field is None:
continue
try:
field = field["display_name"] if type(field) is dict else field
res = self.export_csv(data(model, attr, field))
self.assertEqual(res.status_code, 200)
except Exception as e:
failed.add((model, attr, field, e))
self.assertEqual(sorted(failed), [])
@ddt.ddt
class TestExportMultipleObjects(TestCase):
def setUp(self):
super(TestExportMultipleObjects, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_multi_export(self):
"""Test basic import of multiple objects"""
match = 1
with factories.single_commit():
programs = [factories.ProgramFactory().title for i in range(3)]
regulations = [factories.RegulationFactory().title for i in range(3)]
data = [{
"object_name": "Program", # prog-1
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": programs[match]
},
},
"fields": "all",
}, {
"object_name": "Regulation", # regulation-9000
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": regulations[match]
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(3):
if i == match:
self.assertIn(programs[i], response.data)
self.assertIn(regulations[i], response.data)
else:
self.assertNotIn(programs[i], response.data)
self.assertNotIn(regulations[i], response.data)
def test_exportable_items(self):
"""Test multi export with exportable items."""
with factories.single_commit():
program = factories.ProgramFactory()
regulation = factories.RegulationFactory()
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": program.title
},
},
"fields": "all",
}, {
"object_name": "Regulation",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": regulation.title
},
},
"fields": "all",
}]
response = self.export_csv(
data,
exportable_objects=[1]
)
response_data = response.data
self.assertIn(regulation.title, response_data)
self.assertNotIn(program.title, response_data)
def test_exportable_items_incorrect(self):
"""Test export with exportable items and incorrect index"""
with factories.single_commit():
program = factories.ProgramFactory()
regulation = factories.RegulationFactory()
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": program.title
},
},
"fields": "all",
}, {
"object_name": "Regulation",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": regulation.title
},
},
"fields": "all",
}]
response = self.export_csv(
data,
exportable_objects=[3]
)
response_data = response.data
self.assertEquals(response_data, "")
def test_relevant_to_previous_export(self):
"""Test relevant to previous export"""
res = self._import_file("data_for_export_testing_relevant_previous.csv")
self._check_csv_response(res, {})
data = [{
"object_name": "Program", # prog-1, prog-23
"filters": {
"expression": {
"left": {
"left": "title",
"op": {"name": "="},
"right": "cat ipsum 1"
},
"op": {"name": "OR"},
"right": {
"left": "title",
"op": {"name": "="},
"right": "cat ipsum 23"
},
},
},
"fields": ["slug", "title", "description"],
}, {
"object_name": "Contract", # contract-25, contract-27, contract-47
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": ["slug", "title", "description"],
}, {
"object_name": "Risk", # risk-3, risk-4, risk-5
"filters": {
"expression": {
"left": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
"op": {"name": "AND"},
"right": {
"left": {
"left": "code",
"op": {"name": "!~"},
"right": "1"
},
"op": {"name": "AND"},
"right": {
"left": "code",
"op": {"name": "!~"},
"right": "2"
},
},
},
},
"fields": ["slug", "title", "description"],
}, {
"object_name": "Policy", # policy - 3, 4, 5, 6, 15, 16
"filters": {
"expression": {
"left": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
"op": {"name": "AND"},
"right": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
},
"fields": ["slug", "title", "description"],
}
]
response = self.export_csv(data)
# programs
for i in range(1, 24):
if i in (1, 23):
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
# contracts
for i in range(5, 121, 5):
if i in (5, 15, 115):
self.assertIn(",con {},".format(i), response.data)
else:
self.assertNotIn(",con {},".format(i), response.data)
# controls
for i in range(115, 140):
if i in (117, 118, 119):
self.assertIn(",Startupsum {},".format(i), response.data)
else:
self.assertNotIn(",Startupsum {},".format(i), response.data)
# policies
for i in range(5, 25):
if i in (7, 8, 9, 10, 19, 20):
self.assertIn(",Cheese ipsum ch {},".format(i), response.data)
else:
self.assertNotIn(",Cheese ipsum ch {},".format(i), response.data)
SCOPING_MODELS_NAMES = [m.__name__ for m in all_models.all_models
if issubclass(m, ScopeObject)]
@ddt.data(
"Assessment",
"Policy",
"Regulation",
"Standard",
"Contract",
"Requirement",
"Objective",
"Product",
"System",
"Process",
"Access Group",
"Data Asset",
"Facility",
"Market",
"Org Group",
"Project",
"Vendor",
"Risk Assessment",
"Risk",
"Threat",
"Key Report",
)
def test_asmnt_procedure_export(self, model):
"""Test export of Assessment Procedure. {}"""
with factories.single_commit():
program = factories.ProgramFactory()
audit = factories.AuditFactory(program=program)
import_queries = []
for i in range(3):
import_queries.append(collections.OrderedDict([
("object_type", model),
("Assessment Procedure", "Procedure-{}".format(i)),
("Title", "Title {}".format(i)),
("Code*", "{}-{}".format(model, i)),
("Admin", "user@example.com"),
("Assignees", "user@example.com"),
("Creators", "user@example.com"),
("Description", "{} description".format(model)),
("Program", program.slug),
("Audit", audit.slug),
("Start Date", ""),
("End Date", ""),
]))
if model == "Risk":
import_queries[-1]["Risk Type"] = "Risk type"
if model.replace(" ", "") in self.SCOPING_MODELS_NAMES:
import_queries[-1]["Assignee"] = "user@example.com"
import_queries[-1]["Verifier"] = "user@example.com"
self.check_import_errors(self.import_data(*import_queries))
model_cls = inflector.get_model(model)
objects = model_cls.query.order_by(model_cls.test_plan).all()
self.assertEqual(len(objects), 3)
for num, obj in enumerate(objects):
self.assertEqual(obj.test_plan, "Procedure-{}".format(num))
obj_dicts = [
{
"Code*": obj.slug,
"Assessment Procedure": "Procedure-{}".format(i)
} for i, obj in enumerate(objects)
]
search_request = [{
"object_name": model_cls.__name__,
"filters": {
"expression": {},
"order_by": {"name": "id"}
},
"fields": ["slug", "test_plan"],
}]
exported_data = self.export_parsed_csv(search_request)[model]
self.assertEqual(exported_data, obj_dicts)
| 31.128986 | 78 | 0.492528 |
from os.path import abspath, dirname, join
import collections
import ddt
from flask.json import dumps
from ggrc.converters import get_importables
from ggrc.models import inflector, all_models
from ggrc.models.mixins import ScopeObject
from ggrc.models.reflection import AttributeInfo
from integration.ggrc import TestCase
from integration.ggrc.models import factories
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'test_csvs/')
@ddt.ddt
class TestExportEmptyTemplate(TestCase):
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_basic_policy_template(self):
data = {
"export_to": "csv",
"objects": [{"object_name": "Policy", "fields": "all"}]
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
def test_multiple_empty_objects(self):
data = {
"export_to": "csv",
"objects": [
{"object_name": "Policy", "fields": "all"},
{"object_name": "Regulation", "fields": "all"},
{"object_name": "Requirement", "fields": "all"},
{"object_name": "OrgGroup", "fields": "all"},
{"object_name": "Contract", "fields": "all"},
],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
self.assertIn("Regulation", response.data)
self.assertIn("Contract", response.data)
self.assertIn("Requirement", response.data)
self.assertIn("Org Group", response.data)
@ddt.data("Assessment", "Issue")
def test_ticket_tracker_field_order(self, model):
data = {
"export_to": "csv",
"objects": [
{"object_name": model, "fields": "all"},
],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
ticket_tracker_fields = ["Ticket Tracker", "Component ID",
"Integration Enabled", "Hotlist ID",
"Priority", "Severity", "Issue Title",
"Issue Type"]
first_mapping_field_pos = response.data.find("map:")
for field in ticket_tracker_fields:
self.assertEquals(response.data.find(field) < first_mapping_field_pos,
True)
class TestExportSingleObject(TestCase):
def setUp(self):
super(TestExportSingleObject, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_export_query(self):
response = self._import_file("data_for_export_testing_program.csv")
self._check_csv_response(response, {})
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "Cat ipsum 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "~"},
"right": "1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
def test_and_export_query(self):
response = self._import_file("data_for_export_testing_program.csv")
self._check_csv_response(response, {})
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": {
"left": "title",
"op": {"name": "!~"},
"right": "2",
},
"op": {"name": "AND"},
"right": {
"left": "title",
"op": {"name": "~"},
"right": "1",
},
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1, 10, 11, 13, 14, 15, 16, 17, 18, 19])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
def test_simple_relevant_query(self):
self.import_file("data_for_export_testing_program_contract.csv")
data = [{
"object_name": "Program",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Contract",
"slugs": ["contract-25", "contract-40"],
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 13, 14, 16])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
def test_program_audit_relevant_query(self):
response = self._import_file("data_for_export_testing_program_audit.csv")
self._check_csv_response(response, {})
data = [{
"object_name": "Program",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Audit",
"slugs": ["au-1"],
},
},
"fields": "all",
}, {
"object_name": "Audit",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}]
response = self.export_csv(data)
self.assertIn(",Cat ipsum 1,", response.data)
expected = set([1, 3, 5, 7])
for i in range(1, 14):
if i in expected:
self.assertIn(",Audit {},".format(i), response.data)
else:
self.assertNotIn(",Audit {},".format(i), response.data)
def test_requirement_policy_relevant_query(self):
response = self._import_file("data_for_export_testing_directives.csv")
self._check_csv_response(response, {})
data = [{
"object_name": "Requirement",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["p1"],
},
},
"fields": "all",
}, {
"object_name": "Policy",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Requirement",
"slugs": ["sec-3"],
},
},
"fields": "all",
}, {
"object_name": "Requirement",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Standard",
"slugs": ["std-1"],
},
},
"fields": "all",
}, {
"object_name": "Standard",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Requirement",
"slugs": ["sec-10"],
},
},
"fields": "all",
}, {
"object_name": "Requirement",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Regulation",
"slugs": ["reg-2"],
},
},
"fields": "all",
}, {
"object_name": "Regulation",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Requirement",
"slugs": ["sec-4"],
},
},
"fields": "all",
}]
response = self.export_csv(data)
titles = [",mapped section {},".format(i) for i in range(1, 11)]
titles.extend([",mapped reg {},".format(i) for i in range(1, 11)])
titles.extend([",mapped policy {},".format(i) for i in range(1, 11)])
titles.extend([",mapped standard {},".format(i) for i in range(1, 11)])
expected = set([
",mapped section 1,",
",mapped section 5,",
",mapped section 8,",
",mapped reg 1,",
",mapped standard 3,",
",mapped policy 3,",
])
for title in titles:
if title in expected:
self.assertIn(title, response.data, "'{}' not found".format(title))
else:
self.assertNotIn(title, response.data, "'{}' was found".format(title))
def test_multiple_relevant_query(self):
response = self._import_file(
"data_for_export_testing_program_policy_contract.csv")
self._check_csv_response(response, {})
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["policy-3"],
},
"op": {"name": "AND"},
"right": {
"op": {"name": "relevant"},
"object_name": "Contract",
"slugs": ["contract-25", "contract-40"],
},
},
},
"fields": "all",
}]
response = self.export_csv(data)
expected = set([1, 2, 4, 8, 10, 11, 13])
for i in range(1, 24):
if i in expected:
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
def test_query_all_aliases(self):
def rhs(model, attr):
attr = getattr(model, attr, None)
if attr is not None and hasattr(attr, "_query_clause_element"):
class_name = attr._query_clause_element().type.__class__.__name__
if class_name == "Boolean":
return "1"
return "1/1/2015"
def data(model, attr, field):
return [{
"object_name": model.__name__,
"fields": "all",
"filters": {
"expression": {
"left": field.lower(),
"op": {"name": "="},
"right": rhs(model, attr)
},
}
}]
failed = set()
for model in set(get_importables().values()):
for attr, field in AttributeInfo(model)._aliases.items():
if field is None:
continue
try:
field = field["display_name"] if type(field) is dict else field
res = self.export_csv(data(model, attr, field))
self.assertEqual(res.status_code, 200)
except Exception as e:
failed.add((model, attr, field, e))
self.assertEqual(sorted(failed), [])
@ddt.ddt
class TestExportMultipleObjects(TestCase):
def setUp(self):
super(TestExportMultipleObjects, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
def test_simple_multi_export(self):
match = 1
with factories.single_commit():
programs = [factories.ProgramFactory().title for i in range(3)]
regulations = [factories.RegulationFactory().title for i in range(3)]
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": programs[match]
},
},
"fields": "all",
}, {
"object_name": "Regulation",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": regulations[match]
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(3):
if i == match:
self.assertIn(programs[i], response.data)
self.assertIn(regulations[i], response.data)
else:
self.assertNotIn(programs[i], response.data)
self.assertNotIn(regulations[i], response.data)
def test_exportable_items(self):
with factories.single_commit():
program = factories.ProgramFactory()
regulation = factories.RegulationFactory()
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": program.title
},
},
"fields": "all",
}, {
"object_name": "Regulation",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": regulation.title
},
},
"fields": "all",
}]
response = self.export_csv(
data,
exportable_objects=[1]
)
response_data = response.data
self.assertIn(regulation.title, response_data)
self.assertNotIn(program.title, response_data)
def test_exportable_items_incorrect(self):
with factories.single_commit():
program = factories.ProgramFactory()
regulation = factories.RegulationFactory()
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": program.title
},
},
"fields": "all",
}, {
"object_name": "Regulation",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": regulation.title
},
},
"fields": "all",
}]
response = self.export_csv(
data,
exportable_objects=[3]
)
response_data = response.data
self.assertEquals(response_data, "")
def test_relevant_to_previous_export(self):
res = self._import_file("data_for_export_testing_relevant_previous.csv")
self._check_csv_response(res, {})
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": {
"left": "title",
"op": {"name": "="},
"right": "cat ipsum 1"
},
"op": {"name": "OR"},
"right": {
"left": "title",
"op": {"name": "="},
"right": "cat ipsum 23"
},
},
},
"fields": ["slug", "title", "description"],
}, {
"object_name": "Contract",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": ["slug", "title", "description"],
}, {
"object_name": "Risk",
"filters": {
"expression": {
"left": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
"op": {"name": "AND"},
"right": {
"left": {
"left": "code",
"op": {"name": "!~"},
"right": "1"
},
"op": {"name": "AND"},
"right": {
"left": "code",
"op": {"name": "!~"},
"right": "2"
},
},
},
},
"fields": ["slug", "title", "description"],
}, {
"object_name": "Policy",
"filters": {
"expression": {
"left": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
"op": {"name": "AND"},
"right": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
},
"fields": ["slug", "title", "description"],
}
]
response = self.export_csv(data)
for i in range(1, 24):
if i in (1, 23):
self.assertIn(",Cat ipsum {},".format(i), response.data)
else:
self.assertNotIn(",Cat ipsum {},".format(i), response.data)
for i in range(5, 121, 5):
if i in (5, 15, 115):
self.assertIn(",con {},".format(i), response.data)
else:
self.assertNotIn(",con {},".format(i), response.data)
for i in range(115, 140):
if i in (117, 118, 119):
self.assertIn(",Startupsum {},".format(i), response.data)
else:
self.assertNotIn(",Startupsum {},".format(i), response.data)
for i in range(5, 25):
if i in (7, 8, 9, 10, 19, 20):
self.assertIn(",Cheese ipsum ch {},".format(i), response.data)
else:
self.assertNotIn(",Cheese ipsum ch {},".format(i), response.data)
SCOPING_MODELS_NAMES = [m.__name__ for m in all_models.all_models
if issubclass(m, ScopeObject)]
@ddt.data(
"Assessment",
"Policy",
"Regulation",
"Standard",
"Contract",
"Requirement",
"Objective",
"Product",
"System",
"Process",
"Access Group",
"Data Asset",
"Facility",
"Market",
"Org Group",
"Project",
"Vendor",
"Risk Assessment",
"Risk",
"Threat",
"Key Report",
)
def test_asmnt_procedure_export(self, model):
with factories.single_commit():
program = factories.ProgramFactory()
audit = factories.AuditFactory(program=program)
import_queries = []
for i in range(3):
import_queries.append(collections.OrderedDict([
("object_type", model),
("Assessment Procedure", "Procedure-{}".format(i)),
("Title", "Title {}".format(i)),
("Code*", "{}-{}".format(model, i)),
("Admin", "user@example.com"),
("Assignees", "user@example.com"),
("Creators", "user@example.com"),
("Description", "{} description".format(model)),
("Program", program.slug),
("Audit", audit.slug),
("Start Date", ""),
("End Date", ""),
]))
if model == "Risk":
import_queries[-1]["Risk Type"] = "Risk type"
if model.replace(" ", "") in self.SCOPING_MODELS_NAMES:
import_queries[-1]["Assignee"] = "user@example.com"
import_queries[-1]["Verifier"] = "user@example.com"
self.check_import_errors(self.import_data(*import_queries))
model_cls = inflector.get_model(model)
objects = model_cls.query.order_by(model_cls.test_plan).all()
self.assertEqual(len(objects), 3)
for num, obj in enumerate(objects):
self.assertEqual(obj.test_plan, "Procedure-{}".format(num))
obj_dicts = [
{
"Code*": obj.slug,
"Assessment Procedure": "Procedure-{}".format(i)
} for i, obj in enumerate(objects)
]
search_request = [{
"object_name": model_cls.__name__,
"filters": {
"expression": {},
"order_by": {"name": "id"}
},
"fields": ["slug", "test_plan"],
}]
exported_data = self.export_parsed_csv(search_request)[model]
self.assertEqual(exported_data, obj_dicts)
| true | true |
f73a62d4245f9cbc2e9632560d6ca5d3bcc4a269 | 1,406 | py | Python | Accidents/3_import_excel_tables.py | jp7492code/multiple-linear-regression | 7ec6b47554d238f6c5c52adde71d75893c83a6c1 | [
"BSD-3-Clause"
] | null | null | null | Accidents/3_import_excel_tables.py | jp7492code/multiple-linear-regression | 7ec6b47554d238f6c5c52adde71d75893c83a6c1 | [
"BSD-3-Clause"
] | null | null | null | Accidents/3_import_excel_tables.py | jp7492code/multiple-linear-regression | 7ec6b47554d238f6c5c52adde71d75893c83a6c1 | [
"BSD-3-Clause"
] | null | null | null | #!/bin/env python2.7
# import the two lookup tables from the excel file
# This requires xlrd.
# 1) open terminal
# 2) pip install xlrd
# if you get "ImportError", pip install -U pip setuptools then repeat
import sqlite3 # provides python with a library for sqlite
import xlrd
SQLITE_FILE = "UKRoadData.sqlite"
# opens sqlite and a database file
conn = sqlite3.connect(SQLITE_FILE)
# provides a connection to the database
myCursor = conn.cursor()
# open "Road-Accident-Safety-Data-Guide.xls"
roadDataXLS = xlrd.open_workbook(filename="Road-Accident-Safety-Data-Guide.xls")
##########################
# load the accident severity into the database
myCursor.execute("CREATE TABLE `accident_severity` ('Code','Label')")
accidentRows = roadDataXLS.sheet_by_name("Accident Severity").get_rows()
for arow in accidentRows:
if arow[0].value == "code": continue
theValues = (int(arow[0].value),arow[1].value)
myCursor.execute("INSERT INTO 'accident_severity' VALUES (?,?) ",theValues)
##########################
# load the vehicle type into the database
myCursor.execute("CREATE TABLE `vehicle_type` ('Code','Label')")
accidentRows = roadDataXLS.sheet_by_name("Vehicle Type").get_rows()
for arow in accidentRows:
if arow[0].value == "code": continue
theValues = (int(arow[0].value),arow[1].value)
myCursor.execute("INSERT INTO 'vehicle_type' VALUES (?,?) ",theValues)
conn.commit()
| 32.697674 | 80 | 0.711238 |
import sqlite3
import xlrd
SQLITE_FILE = "UKRoadData.sqlite"
conn = sqlite3.connect(SQLITE_FILE)
myCursor = conn.cursor()
roadDataXLS = xlrd.open_workbook(filename="Road-Accident-Safety-Data-Guide.xls")
(?,?) ",theValues)
es)
conn.commit()
| true | true |
f73a630bfb87882f22bb4192e434aec839270e40 | 99,504 | py | Python | airflow/www/views.py | SueSu-Wish/incubator-airflow | 5813c0c8e1e9832d403e5a8f5783d0cb77f2748c | [
"Apache-2.0"
] | null | null | null | airflow/www/views.py | SueSu-Wish/incubator-airflow | 5813c0c8e1e9832d403e5a8f5783d0cb77f2748c | [
"Apache-2.0"
] | null | null | null | airflow/www/views.py | SueSu-Wish/incubator-airflow | 5813c0c8e1e9832d403e5a8f5783d0cb77f2748c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import datetime as dt
import logging
import os
import pkg_resources
import socket
from functools import wraps
from datetime import timedelta
import copy
import math
import json
#import bleach
import pendulum
import codecs
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
abort, redirect, url_for, request, Markup, Response, current_app, render_template,
make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2 import escape
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField,
StringField, validators)
from flask_admin.form.fields import DateTimeField
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import set_dag_run_state
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils import timezone
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import create_session, provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date
from airflow.utils.timezone import datetime
from airflow.utils.net import get_hostname
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
UTF8_READER = codecs.getreader('utf-8')
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
if m.dag_id is None:
return Markup()
#dag_id = bleach.clean(m.dag_id)
dag_id = m.dag_id
url = url_for(
'airflow.graph',
dag_id=dag_id,
execution_date=m.execution_date)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def dag_run_link(v, c, m, p):
#dag_id = bleach.clean(m.dag_id)
dag_id = m.dag_id
url = url_for(
'airflow.graph',
dag_id=m.dag_id,
run_id=m.run_id,
execution_date=m.execution_date)
return Markup('<a href="{url}">{m.run_id}</a>'.format(**locals()))
def task_instance_link(v, c, m, p):
#dag_id = bleach.clean(m.dag_id)
dag_id = m.dag_id
#task_id = bleach.clean(m.task_id)
task_id = m.task_id
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def parse_datetime_f(value):
if not isinstance(value, dt.datetime):
return value
return timezone.make_aware(value)
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if timezone.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
@provide_session
def dag_stats(self, session=None):
ds = models.DagStat
ds.update(
dag_ids=[dag.dag_id for dag in dagbag.dags.values() if not dag.is_subdag]
)
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
@login_required
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.filter(Dag.is_subdag == False)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.filter(Dag.is_subdag == False)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=get_hostname()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=get_hostname(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/log')
@login_required
@wwwutils.action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
else:
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
try:
ti.task = dag.get_task(ti.task_id)
logs = handler.read(ti)
except AttributeError as e:
logs = ["Task log handler {} does not support read logs.\n{}\n" \
.format(task_log_reader, str(e))]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow """
"""administrator for assistance."""
.format(
"- This task instance already ran and had its state changed "
"manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = pendulum.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
@provide_session
def blocked(self, session=None):
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def tree(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date,
DR.execution_date >= min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = timezone.utcnow() - pendulum.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
@provide_session
def duration(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$(function() {$( document ).trigger('chartload') })" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
@provide_session
def tries(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
@provide_session
def landing_times(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
@provide_session
def paused(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else timezone.utcnow()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
@provide_session
def task_instances(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
with create_session() as session:
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
d = json.load(UTF8_READER(request.files['file']))
except Exception as e:
flash("Missing file or syntax error: {}.".format(e))
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
@provide_session
def index(self, session=None):
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
@provide_session
def query(self, session=None):
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
@provide_session
def _connection_ids(session=None):
return [
(c.conn_id, c.conn_id)
for c in (
session.query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': _connection_ids()
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = timezone.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
'filters': [
parse_datetime_f,
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
'filters': [
parse_datetime_f,
]
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
# todo: yes this has a spelling error
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
try:
return getattr(model, name)
except AirflowException:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
@provide_session
def action_varexport(self, ids, session=None):
V = models.Variable
qry = session.query(V).filter(V.id.in_(ids)).all()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
form_args = {
'execution_date': {
'filters': [
parse_datetime_f,
]
}
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(execution_date=DateTimeField)
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
filter_converter = wwwutils.UtcFilterConverter()
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
filter_converter = wwwutils.UtcFilterConverter()
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link,
run_id=dag_run_link
)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
@provide_session
def action_new_delete(self, ids, session=None):
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = timezone.utcnow()
else:
dr.end_date = timezone.utcnow()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
filter_converter = wwwutils.UtcFilterConverter()
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link,
run_id=dag_run_link,
duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
@action('clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task instance(s)'
' and set their dagruns to the running state?'))
def action_clear(self, ids, session=None):
try:
TI = models.TaskInstance
dag_to_task_details = {}
dag_to_tis = {}
# Collect dags upfront as dagbag.get_dag() will reset the session
for id_str in ids:
task_id, dag_id, execution_date = id_str.split(',')
dag = dagbag.get_dag(dag_id)
task_details = dag_to_task_details.setdefault(dag, [])
task_details.append((task_id, execution_date))
for dag, task_details in dag_to_task_details.items():
for task_id, execution_date in task_details:
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag.dag_id,
TI.execution_date == execution_date).one()
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(ids)))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = pendulum.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| 34.925939 | 163 | 0.569083 |
from past.builtins import basestring, unicode
import ast
import datetime as dt
import logging
import os
import pkg_resources
import socket
from functools import wraps
from datetime import timedelta
import copy
import math
import json
import pendulum
import codecs
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
abort, redirect, url_for, request, Markup, Response, current_app, render_template,
make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2 import escape
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField,
StringField, validators)
from flask_admin.form.fields import DateTimeField
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import set_dag_run_state
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils import timezone
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import create_session, provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date
from airflow.utils.timezone import datetime
from airflow.utils.net import get_hostname
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
UTF8_READER = codecs.getreader('utf-8')
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
if m.dag_id is None:
return Markup()
dag_id = m.dag_id
url = url_for(
'airflow.graph',
dag_id=dag_id,
execution_date=m.execution_date)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def dag_run_link(v, c, m, p):
dag_id = m.dag_id
url = url_for(
'airflow.graph',
dag_id=m.dag_id,
run_id=m.run_id,
execution_date=m.execution_date)
return Markup('<a href="{url}">{m.run_id}</a>'.format(**locals()))
def task_instance_link(v, c, m, p):
dag_id = m.dag_id
task_id = m.task_id
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def parse_datetime_f(value):
if not isinstance(value, dt.datetime):
return value
return timezone.make_aware(value)
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if timezone.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
return 600 + len(dag.tasks) * 10
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
def chart_data(self):
from airflow import macros
import pandas as pd
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
payload = {
"state": "ERROR",
"error": ""
}
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(),
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(),
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
@provide_session
def dag_stats(self, session=None):
ds = models.DagStat
ds.update(
dag_ids=[dag.dag_id for dag in dagbag.dags.values() if not dag.is_subdag]
)
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
@login_required
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.filter(Dag.is_subdag == False)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.filter(Dag.is_subdag == False)
.subquery('running_dag_run')
)
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=get_hostname()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=get_hostname(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/log')
@login_required
@wwwutils.action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
else:
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
try:
ti.task = dag.get_task(ti.task_id)
logs = handler.read(ti)
except AttributeError as e:
logs = ["Task log handler {} does not support read logs.\n{}\n" \
.format(task_log_reader, str(e))]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow """
"""administrator for assistance."""
.format(
"- This task instance already ran and had its state changed "
"manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = pendulum.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
@provide_session
def blocked(self, session=None):
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def tree(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date,
DR.execution_date >= min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = timezone.utcnow() - pendulum.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
@provide_session
def duration(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$(function() {$( document ).trigger('chartload') })" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
@provide_session
def tries(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
@provide_session
def landing_times(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
y_unit = infer_time_unit([d for t in y.values() for d in t])
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
@provide_session
def paused(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else timezone.utcnow()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
@provide_session
def task_instances(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
with create_session() as session:
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
d = json.load(UTF8_READER(request.files['file']))
except Exception as e:
flash("Missing file or syntax error: {}.".format(e))
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
@provide_session
def index(self, session=None):
DM = models.DagModel
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
if do_filter and owner_mode == 'ldapgroup':
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
@provide_session
def query(self, session=None):
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
@provide_session
def _connection_ids(session=None):
return [
(c.conn_id, c.conn_id)
for c in (
session.query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': _connection_ids()
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = timezone.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
'filters': [
parse_datetime_f,
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
'filters': [
parse_datetime_f,
]
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
try:
return getattr(model, name)
except AirflowException:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
@action('varexport', 'Export', None)
@provide_session
def action_varexport(self, ids, session=None):
V = models.Variable
qry = session.query(V).filter(V.id.in_(ids)).all()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
form_args = {
'execution_date': {
'filters': [
parse_datetime_f,
]
}
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(execution_date=DateTimeField)
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
filter_converter = wwwutils.UtcFilterConverter()
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
filter_converter = wwwutils.UtcFilterConverter()
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link,
run_id=dag_run_link
)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
@provide_session
def action_new_delete(self, ids, session=None):
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = timezone.utcnow()
else:
dr.end_date = timezone.utcnow()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
filter_converter = wwwutils.UtcFilterConverter()
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link,
run_id=dag_run_link,
duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
@action('clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task instance(s)'
' and set their dagruns to the running state?'))
def action_clear(self, ids, session=None):
try:
TI = models.TaskInstance
dag_to_task_details = {}
dag_to_tis = {}
# Collect dags upfront as dagbag.get_dag() will reset the session
for id_str in ids:
task_id, dag_id, execution_date = id_str.split(',')
dag = dagbag.get_dag(dag_id)
task_details = dag_to_task_details.setdefault(dag, [])
task_details.append((task_id, execution_date))
for dag, task_details in dag_to_task_details.items():
for task_id, execution_date in task_details:
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag.dag_id,
TI.execution_date == execution_date).one()
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(ids)))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
task_id, dag_id, execution_date = iterdecode(id)
execution_date = pendulum.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| true | true |
f73a633b34d6d4eebbc2b7cb354a5d2ab296d1bb | 89 | py | Python | tritam_cpn_api/models/__init__.py | kenysmile/test_facebook | 844a3ddd53abd319c0115de86909118a37106c67 | [
"Apache-2.0"
] | null | null | null | tritam_cpn_api/models/__init__.py | kenysmile/test_facebook | 844a3ddd53abd319c0115de86909118a37106c67 | [
"Apache-2.0"
] | null | null | null | tritam_cpn_api/models/__init__.py | kenysmile/test_facebook | 844a3ddd53abd319c0115de86909118a37106c67 | [
"Apache-2.0"
] | null | null | null | from . import tritam_tracking
from . import tritam_api_constant
from . import tritam_sms
| 22.25 | 33 | 0.831461 | from . import tritam_tracking
from . import tritam_api_constant
from . import tritam_sms
| true | true |
f73a634f829aa670125b96cd9f28e533ba2f79e3 | 837 | py | Python | glue/core/data_exporters/qt/dialog.py | HPLegion/glue | 1843787ccb4de852dfe103ff58473da13faccf5f | [
"BSD-3-Clause"
] | 550 | 2015-01-08T13:51:06.000Z | 2022-03-31T11:54:47.000Z | glue/core/data_exporters/qt/dialog.py | HPLegion/glue | 1843787ccb4de852dfe103ff58473da13faccf5f | [
"BSD-3-Clause"
] | 1,362 | 2015-01-03T19:15:52.000Z | 2022-03-30T13:23:11.000Z | glue/core/data_exporters/qt/dialog.py | HPLegion/glue | 1843787ccb4de852dfe103ff58473da13faccf5f | [
"BSD-3-Clause"
] | 142 | 2015-01-08T13:08:00.000Z | 2022-03-18T13:25:57.000Z | from qtpy import compat
from glue import config
def export_data(data, components=None, exporter=None):
if exporter is None:
exporters = {}
for e in config.data_exporter:
if e.extension == '':
fltr = "{0} (*)".format(e.label)
else:
fltr = "{0} ({1})".format(e.label, ' '.join('*.' + ext for ext in e.extension))
exporters[fltr] = e.function
filters = ';;'.join(sorted(exporters))
else:
filters = None
filename, fltr = compat.getsavefilename(caption="Choose an output filename",
filters=filters)
filename = str(filename)
if not filename:
return
if filters is not None:
exporter = exporters[fltr]
exporter(filename, data, components=components)
| 27.9 | 95 | 0.55675 | from qtpy import compat
from glue import config
def export_data(data, components=None, exporter=None):
if exporter is None:
exporters = {}
for e in config.data_exporter:
if e.extension == '':
fltr = "{0} (*)".format(e.label)
else:
fltr = "{0} ({1})".format(e.label, ' '.join('*.' + ext for ext in e.extension))
exporters[fltr] = e.function
filters = ';;'.join(sorted(exporters))
else:
filters = None
filename, fltr = compat.getsavefilename(caption="Choose an output filename",
filters=filters)
filename = str(filename)
if not filename:
return
if filters is not None:
exporter = exporters[fltr]
exporter(filename, data, components=components)
| true | true |
f73a63c74bf1a400e6cb44786318ab12453cd272 | 308 | py | Python | src/emutils/tf/math.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | src/emutils/tf/math.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | src/emutils/tf/math.py | emanuele-albini/emutils | d5e3939da8a14b629879f06d87d4bd371e7117ab | [
"MIT"
] | null | null | null | import tensorflow as tf
def cov(x):
mean_x = tf.reduce_mean(x, axis=0, keepdims=True)
mx = tf.matmul(tf.transpose(mean_x), mean_x)
vx = tf.matmul(tf.transpose(x), x) / tf.cast(tf.shape(x)[0], tf.float64)
cov_xx = vx - mx
return cov_xx
def inv_cov(x):
return tf.linalg.inv(cov(x))
| 22 | 76 | 0.642857 | import tensorflow as tf
def cov(x):
mean_x = tf.reduce_mean(x, axis=0, keepdims=True)
mx = tf.matmul(tf.transpose(mean_x), mean_x)
vx = tf.matmul(tf.transpose(x), x) / tf.cast(tf.shape(x)[0], tf.float64)
cov_xx = vx - mx
return cov_xx
def inv_cov(x):
return tf.linalg.inv(cov(x))
| true | true |
f73a6563f02f126cf396faeb2beb4912e04b4c90 | 209 | py | Python | binproto2/exceptions.py | ghost58400/marlin-binary-protocol | fb93603866ecfce84e887c159bbbb9f9d2f01f17 | [
"MIT"
] | 3 | 2020-06-25T15:23:52.000Z | 2020-12-27T22:00:10.000Z | binproto2/exceptions.py | ghost58400/marlin-binary-protocol | fb93603866ecfce84e887c159bbbb9f9d2f01f17 | [
"MIT"
] | 2 | 2020-07-28T16:50:01.000Z | 2021-03-08T11:27:45.000Z | binproto2/exceptions.py | ghost58400/marlin-binary-protocol | fb93603866ecfce84e887c159bbbb9f9d2f01f17 | [
"MIT"
] | 4 | 2020-07-28T16:25:28.000Z | 2021-02-06T12:46:51.000Z | class ReadTimeout(Exception):
pass
class FatalError(Exception):
pass
class SynchronizationError(Exception):
pass
class PayloadOverflow(Exception):
pass
class ConnectionLost(Exception):
pass | 20.9 | 38 | 0.76555 | class ReadTimeout(Exception):
pass
class FatalError(Exception):
pass
class SynchronizationError(Exception):
pass
class PayloadOverflow(Exception):
pass
class ConnectionLost(Exception):
pass | true | true |
f73a66c7150d81f9d5215661887583cbee26d5a1 | 51,419 | py | Python | polymath/srdfg/base.py | lite-david/polymath | cf1addc75e203fa606ebc6d32bc552fb3975ea99 | [
"Apache-2.0"
] | null | null | null | polymath/srdfg/base.py | lite-david/polymath | cf1addc75e203fa606ebc6d32bc552fb3975ea99 | [
"Apache-2.0"
] | null | null | null | polymath/srdfg/base.py | lite-david/polymath | cf1addc75e203fa606ebc6d32bc552fb3975ea99 | [
"Apache-2.0"
] | null | null | null |
from polymath import UNSET_SHAPE, DEFAULT_SHAPES
import builtins
import operator
from collections import OrderedDict, Mapping, Sequence, deque
import functools
from numbers import Integral, Rational, Real
import contextlib
import traceback
import uuid
import numpy as np
import importlib
from .graph import Graph
from .domain import Domain
from .util import _noop_callback, _flatten_iterable, node_hash, \
_is_node_type_instance, is_iterable
class Node(object):
"""
Base class for nodes.
Parameters
----------
args : tuple
Positional arguments passed to the `_evaluate` method.
name : str or None
Name of the node or `None` to use a random, unique identifier.
shape : tuple or None
Shape of the output for a node. This can be a tuple of integers or parameter node names.
graph : Node or None
Parent graph of this node. If graph is `None`, this is the top-level graph.
op_name : str
Operation name which describes the node functionality.
value : Any or None
If a node has a default value to use for execution, it can be set using `value`.
kwargs : dict
Keyword arguments passed to the `_evaluate` method.
"""
_graph_stack = deque([None])
_eval_stack = []
stack_size = 5
evaluated_nodes = 0
def __init__(self, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
self.nodes = Graph()
self.value = value
self.dependencies = []
self._args = []
self._predeecessors = []
self._succesors = []
self.args = args
if "name" in kwargs:
kwargs.pop("name")
self.added_attrs = []
# TODO: CHange this to underscore private variable
self.kwargs = kwargs
self.graph = graph
self._shape = OrderedDict()
self.shape = shape or tuple([])
# Get a list of all dependencies relevant to this node
self.dependencies = [] if dependencies is None else dependencies
if self.graph:
self.dependencies.extend(self.graph.dependencies)
# Choose a name for the node and add the node to the graph
self._name = None
self.name = name or uuid.uuid4().hex
self._op_name = None
self.op_name = op_name
# Get the stack context so we can report where the node was defined
self._stack = traceback.extract_stack(limit=1)
@property
def graph(self):
"""
polymath.srdfg.graph.Graph : Parent graph of this node. If graph is `None`, this is the top-level graph.
"""
return self._graph
def preds(self):
return self._preds
def succs(self):
return self._preds
def add_predecessor(self, pred):
if isinstance(pred, Node):
self._predecessors.append(pred.gname)
else:
self._predecessors.append(pred)
def add_successor(self, succ):
if isinstance(succ, Node):
self._succesors.append(succ.gname)
else:
self._succesors.append(succ)
def set_edges(self):
for e in self.args:
self.add_predecessor(e)
if isinstance(e, Node):
e.add_successor(self)
@property
def domain(self):
return Domain(tuple([]))
@property
def args(self):
"""
tuple : Positional arguments which are used for executing this node.
"""
return tuple(self._args)
@property
def argnames(self):
return [a.name if isinstance(a, Node) else a for a in self.args]
@property
def shape(self):
"""
tuple : Shape of the output for a node. This can be a tuple of integers or parameter node names.
"""
return self._shape
@property
def var(self):
return self
@property
def name(self):
"""str : Unique name of the node"""
return self._name
@property
def op_name(self):
"""
str : Operation name which describes the node functionality.
"""
return self._op_name
@op_name.setter
def op_name(self, op_name):
if op_name:
self._op_name = op_name
elif self.__class__.__name__ == "Node":
self._op_name = self.name
else:
self._op_name = self.__class__.__name__
@name.setter
def name(self, name):
self.set_name(name)
@args.setter
def args(self, args):
new_args = []
for arg in args:
if isinstance(arg, Node):
if self.__class__.__name__ == "Node":
self.nodes[arg.name] = self.graph[arg.name]
new_args.append(arg)
self._args = tuple(new_args)
@shape.setter
def shape(self, shape):
self.set_shape(shape, init=True)
@graph.setter
def graph(self, graph):
self._graph = Node.get_active_graph(graph)
@property
def gname(self):
scope_names = [self.name]
cgraph = self.graph
while cgraph:
scope_names.append(cgraph.name)
cgraph = cgraph.graph
return "/".join(list(reversed(scope_names)))
def __enter__(self):
Node._graph_stack.append(self)
return self
def __exit__(self, *args):
assert self == Node._graph_stack.pop()
def __repr__(self):
return "<node '%s'>" % self.name
def add_attribute(self, key, value):
self.added_attrs.append(key)
self.kwargs[key] = value
def is_shape_finalized(self):
if self.shape == UNSET_SHAPE:
return False
for s in self.shape:
if not isinstance(s, Integral):
return False
return True
def set_shape(self, shape=None, init=False):
if isinstance(shape, float):
self._shape = tuple([np.int(shape)])
elif isinstance(shape, Integral):
self._shape = tuple([shape])
elif isinstance(shape, Node):
self._shape = tuple([shape])
elif not shape or len(shape) == 0:
# TODO: Change in order to enable "is shape finalized" to work
self._shape = UNSET_SHAPE
else:
shapes = []
for dim in shape:
if isinstance(dim, (Node, Integral)):
shapes.append(dim)
elif isinstance(dim, float):
shapes.append(int(dim))
else:
raise TypeError(f"Shape value must be placeholder or integer value for {self.name}\n"
f"\tDim: {dim}"
f"\n\t{self.kwargs} ")
self._shape = tuple(shapes)
@staticmethod
def get_active_graph(graph=None):
"""
Obtain the currently active graph instance by returning the explicitly given graph or using
the default graph.
Parameters
----------
graph : Node or None
Graph to return or `None` to use the default graph.
Raises
------
ValueError
If no `Graph` instance can be obtained.
"""
graph = graph or Node._graph_stack[-1]
return graph
def instantiate_node(self, node): # pylint:disable=W0621
"""
Instantiate nodes by retrieving the node object associated with the node name.
Parameters
----------
node : Node or str
Node instance or name of an node.
Returns
-------
instantiated_node : Node
Node instance.
Raises
------
ValueError
If `node` is not an `Node` instance or an node name.
RuntimeError
If `node` is an `Node` instance but does not belong to this graph.
"""
if isinstance(node, str):
return self.nodes[node]
if isinstance(node, Node):
if node.name not in self.nodes and (node.graph != self):
raise RuntimeError(f"node '{node}' does not belong to {self} graph, instead belongs to"
f" {node.graph}")
return node
raise ValueError(f"'{node}' is not an `Node` instance or node name")
def instantiate_graph(self, context, **kwargs):
"""
Instantiate a graph by replacing all node names with node instances.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
context : dict[Node or str, object]
Context whose keys are node instances or names.
kwargs : dict[str, object]
Additional context information keyed by variable name.
Returns
-------
normalized_context : dict[Node, object]
Normalized context whose keys are node instances.
Raises
------
ValueError
If the context specifies more than one value for any node.
ValueError
If `context` is not a mapping.
"""
if context is None:
context = {}
elif not isinstance(context, Mapping):
raise ValueError("`context` must be a mapping.")
nodes = list(context)
# Add the keyword arguments
for node in nodes: # pylint:disable=W0621
value = context.pop(node)
node = self.instantiate_node(node)
if node in context:
raise ValueError(f"duplicate unequal value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
for name, value in kwargs.items():
node = self.nodes[name]
if node in context:
raise ValueError(f"duplicate value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
return context
def run(self, fetches, context=None, *, callback=None, **kwargs):
"""
Evaluate one or more nodes given a dictionary of node names with their values.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
fetches : list[str or Node] or str or Node
One or more `Node` instances or names to evaluate.
context : dict or None
Context in which to evaluate the nodes.
callback : callable or None
Callback to be evaluated when an node is evaluated.
kwargs : dict
Additional context information keyed by variable name.
Returns
-------
values : Node or tuple[object]
Output of the nodes given the context.
Raises
------
ValueError
If `fetches` is not an `Node` instance, node name, or a sequence thereof.
"""
if isinstance(fetches, (str, Node)):
fetches = [fetches]
single = True
elif isinstance(fetches, Sequence):
single = False
else:
raise ValueError("`fetches` must be an `Node` instance, node name, or a "
"sequence thereof.")
fetches = [self.instantiate_node(node) for node in fetches]
context = self.instantiate_graph(context, **kwargs)
for c in context:
if c in fetches and c.op_name in ["output", "state", "temp"]:
write_name = "/".join([f"{i}{c.write_count-1}" for i in c.name.split("/")]) if c.write_count > 0 else c.name
fetches[fetches.index(c)] = c.graph.nodes[write_name]
values = [fetch.evaluate_node(fetch, context, callback=callback) for fetch in fetches]
return values[0] if single else tuple(values)
def __getstate__(self):
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
def set_name(self, name):
"""
Set the name of the node and update the graph.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
name = name or uuid.uuid4().hex
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:\n\t"
f"Existing: {self.graph.nodes[name].args}\n\t"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def evaluate_dependencies(self, context, callback=None):
"""
Evaluate the dependencies of this node and discard the values.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
"""
for node in self.dependencies:
node.evaluate(context, callback)
def evaluate(self, context, callback=None):
"""
Evaluate the node given a context.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
Returns
-------
value : object
Output of the node given the context.
"""
# Evaluate all explicit dependencies first
self.evaluate_dependencies(context, callback)
if self in context:
return context[self]
# Evaluate the parents
partial = functools.partial(self.evaluate_node, context=context, callback=callback)
args = [partial(arg) for arg in self.args]
kwargs = {key: partial(value) for key, value in self.kwargs.items() if key not in self.added_attrs}
# Evaluate the node
callback = callback or _noop_callback
with callback(self, context):
if self.__class__.__name__ == "Node":
context[self] = self.value = self._evaluate(*args, context=context, **kwargs)
else:
context[self] = self.value = self._evaluate(*args, **kwargs)
return self.value
def _evaluate(self, *args, context=None, **kwargs):
"""
Inheriting nodes should implement this function to evaluate the node.
"""
return self(*args, context, **kwargs)
@classmethod
def evaluate_node(cls, node, context, **kwargs):
"""
Evaluate an node or constant given a context.
"""
Node.evaluated_nodes += 1
try:
if isinstance(node, Node):
Node._eval_stack.append(node.name)
return node.evaluate(context, **kwargs)
partial = functools.partial(cls.evaluate_node, context=context, **kwargs)
if isinstance(node, tuple):
return tuple(partial(element) for element in node)
if isinstance(node, list):
return [partial(element) for element in node]
if isinstance(node, dict):
return {partial(key): partial(value) for key, value in node.items()}
if isinstance(node, slice):
return slice(*[partial(getattr(node, attr))
for attr in ['start', 'stop', 'step']])
return node
except Exception as ex: # pragma: no cover
messages = []
interactive = False
if isinstance(node, Node) or not is_iterable(node):
node = [node]
for n in node:
stack = []
if isinstance(n, Node):
for frame in reversed(n._stack): # pylint: disable=protected-access
# Do not capture any internal stack traces
fname = frame.filename
if 'polymath' in fname:
continue
# Stop tracing at the last interactive cell
if interactive and not fname.startswith('<'):
break # pragma: no cover
interactive = fname.startswith('<')
stack.append(frame)
stack = "".join(traceback.format_list(reversed(stack)))
message = "Failed to evaluate node `%s` defined at:\n\n%s" % (n, stack)
messages.append(message)
raise ex from EvaluationError("".join(messages))
@classmethod
def init_from_args(cls, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
if len(args) == 0:
n = cls(name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
else:
n = cls(*args,
name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
return n
def __bool__(self):
return True
def __hash__(self):
return id(self)
def func_hash(self):
"""
This returns the functional hash of a particular node. The default hash returns an object id, whereas this function
returns a hash of all attributes and subgraphs of a node.
"""
return node_hash(self)
def find_node(self, name):
g = self.graph
while g is not None and name not in g.nodes:
g = g.graph
if name in g.nodes:
return g.nodes[name]
raise RuntimeError(f"Cannot find {name} in graph nodes. Graph: {self.graph}")
def __len__(self):
#TODO: Update this to check for finalzied shape
if self.shape == UNSET_SHAPE:
raise TypeError(f'`shape` must be specified explicitly for nodes {self}')
return self.shape[0]
def __iter__(self):
num = len(self)
for i in range(num):
yield self[i]
def __eq__(self, other):
return hash(self) == hash(other)
def __getattr__(self, name):
return getattr_(self, name, graph=self.graph)
def __getitem__(self, key):
if self.__class__.__name__ != "Node":
if isinstance(key, (slice, Integral)):
return getitem(self, key, graph=self.graph)
else:
if isinstance(key, (list)):
return var_index(self, key, graph=self)
elif isinstance(key, tuple):
return var_index(self, list(key), graph=self)
else:
return var_index(self, [key], graph=self)
else:
return self.nodes[key]
def __add__(self, other):
return add(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__radd__(self)
def __radd__(self, other):
return add(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__add__(self)
def __sub__(self, other):
return sub(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rsub__(self)
def __rsub__(self, other):
return sub(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__sub__(self)
def __pow__(self, other):
return pow_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __rpow__(self, other):
return pow_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __matmul__(self, other):
return matmul(self, other, graph=self.graph)
def __rmatmul__(self, other):
return matmul(other, self, graph=self.graph)
def __mul__(self, other):
return mul(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmul__(self)
def __rmul__(self, other):
return mul(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mul__(self)
def __truediv__(self, other):
return truediv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__truediv__(self)
def __rtruediv__(self, other):
return truediv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rtruediv__(self)
def __floordiv__(self, other):
return floordiv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rfloordiv__(self)
def __rfloordiv__(self, other):
return floordiv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__floordiv__(self)
def __mod__(self, other):
return mod(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmod__(self)
def __rmod__(self, other):
return mod(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mod__(self)
def __lshift__(self, other):
return lshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rlshift__(self)
def __rlshift__(self, other):
return lshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lshift__(self)
def __rshift__(self, other):
return rshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rrshift__(self)
def __rrshift__(self, other):
return rshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rshift__(self)
def __and__(self, other):
return and_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rand__(self)
def __rand__(self, other):
return and_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__and__(self)
def __or__(self, other):
return or_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ror__(self)
def __ror__(self, other):
return or_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__or__(self)
def __xor__(self, other):
return xor(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rxor__(self)
def __rxor__(self, other):
return xor(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__xor__(self)
def __lt__(self, other):
return lt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__gt__(self)
def __le__(self, other):
return le(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ge__(self)
def __ne__(self, other):
return ne(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ne__(self)
def __gt__(self, other):
return gt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lt__(self)
def __ge__(self, other):
return ge(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__le__(self)
def __invert__(self):
return inv(self, graph=self.graph)
def __neg__(self):
return neg(self, graph=self.graph)
def __abs__(self):
return abs_(self, graph=self.graph)
def __pos__(self):
return pos(self, graph=self.graph)
def __reversed__(self):
return reversed_(self, graph=self.graph)
def update_graph_key(self, old_key, new_key):
n = list(map(lambda k: (new_key, self.nodes[k]) if k == old_key else (k, self.nodes[k]), self.nodes.keys()))
self.nodes = Graph(n)
def insert_node(self, node, idx):
node_list = list(self.nodes.items())
node_list.insert(idx, (node.name, node))
self.nodes = Graph(node_list)
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
class EvaluationError(RuntimeError):
"""
Failed to evaluate an node.
"""
class var_index(Node): # pylint: disable=C0103,W0223
"""
Node representing values of a variable corresponding to input index values.
Parameters
----------
var : Node
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def __init__(self, var, idx, name=None, **kwargs): # pylint: disable=W0235
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
domain = Domain(idx)
super(var_index, self).__init__(var, idx, name=name, domain=domain, **kwargs)
@property
def domain(self):
return self.kwargs["domain"]
@property
def var(self):
var, index_list = self.args
return var
def set_name(self, name):
"""
Set the name for a variable index, making sure to replicate the new name with
a unique stringwhich corresponds to the variable, index combination.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:"
f"Existing: {self.graph.nodes[name].args}\n"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name is not None and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def __getitem__(self, key):
if self.is_shape_finalized() and len(self.nodes) >= np.prod(self.shape):
if isinstance(key, Integral):
key = tuple([key])
idx = np.ravel_multi_index(key, dims=self.shape, order='C')
ret = self.nodes.item_by_index(idx)
return ret
else:
if isinstance(key, (list)):
ret = var_index(self.var, tuple(key), graph=self)
elif isinstance(key, tuple):
ret = var_index(self.var, key, graph=self)
else:
ret = var_index(self.var, tuple([key]), graph=self)
return ret
def is_scalar(self, val=None):
if val is not None and (not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)):
if self.var.shape != DEFAULT_SHAPES[0] and (len(self.var.shape) == 1 and not isinstance(self.var.shape[0],Node)):
raise ValueError(f"Invalid shape var for var index {self} with variable shape {self.var.shape}")
return True
else:
return self.var.shape == DEFAULT_SHAPES[0]
def scalar_result(self):
return all([isinstance(v, int) for v in self.args[1]])
def _evaluate(self, var, indices, **kwargs):
if self.is_scalar(var):
out_shape = (1,)
indices = (0,)
single = True
else:
out_shape = self.domain.shape_from_indices(indices)
indices = self.domain.compute_pairs()
single = False
if isinstance(var, (Integral, Real, str)):
var = np.asarray([var])
elif not isinstance(var, (np.ndarray, list)):
raise TypeError(f"Variable {var} with type {type(var)} is not a list or numpy array, and cannot be sliced for {self.name}")
elif isinstance(var, list):
var = np.asarray(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) == np.prod(out_shape):
if len(out_shape) > len(var.shape):
for i in range(len(out_shape)):
if out_shape[i] == 1:
var = np.expand_dims(var, axis=i)
else:
var = np.squeeze(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) != np.prod(out_shape):
raise ValueError(f"Index list does not match {var.shape} in {self.var.name} - {self.var.op_name}"
f"dimensions for slice {self.args[0].name} with {out_shape}.\n"
f"Domain: {self.domain}\n"
f"Eval Stack: {Node._eval_stack}")
if not single and not all([(idx_val - 1) >= indices[-1][idx] for idx, idx_val in enumerate(var.shape)]):
raise ValueError(f"var_index {self.name} has indices which are greater than the variable shape:\n"
f"\tArgs: {self.args}\n"
f"\tVar shape: {var.shape}\n"
f"\tNode shape: {self.var.shape}\n"
f"\tIndex Upper bounds: {indices[-1]}")
indices = list(map(lambda x: x.tolist() if isinstance(x, np.ndarray) else x, indices))
res = var[indices] if single else np.asarray([var[idx] for idx in indices]).reshape(out_shape)
if out_shape == (1,) and len(indices) == 1:
res = res[0]
self.domain.set_computed(out_shape, indices)
return res
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<var_index name=%s, index=%s>" % (self.name, self.args)
class slice_op(Node):
"""
Node representing multi-dimensional operations performed on a node.
Parameters
----------
target : cal
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def __init__(self, target, *args, **kwargs):
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
if "op_name" in kwargs:
kwargs.pop("op_name")
target_name = f"{target.__module__}.{target.__name__}"
super(slice_op, self).__init__(*args, target=target_name, domain=domain, op_name=f"slice_{target.__name__}", **kwargs)
self.target = target
@property
def domain(self):
return self.kwargs["domain"]
def __getitem__(self, key):
if isinstance(key, (tuple, list, np.ndarray)) and len(key) == 0:
return self
elif self.is_shape_finalized() and len(self.nodes) > 0:
if isinstance(key, (int, Node)):
key = tuple([key])
if len(key) != len(self.shape):
raise KeyError(f"Invalid key shape for {self.name}:\n"
f"Shape: {self.shape}\n"
f"Key: {key}")
if isinstance(key, list):
key = tuple(key)
name = f"{self.name}{key}"
if name not in self.nodes.keys():
raise KeyError(f"{name} not in {self.name} keys:\n"
f"Node keys: {list(self.nodes.keys())}")
ret = self.nodes[name]
return ret
else:
name = []
if isinstance(key, Node):
name.append(key.name)
elif hasattr(key, "__len__") and not isinstance(key, str):
for k in key:
if isinstance(k, Node):
name.append(k.name)
else:
name.append(k)
else:
name.append(key)
name = tuple(name)
name = self.var.name + str(name)
if name in self.graph.nodes:
return self.graph.nodes[name]
elif isinstance(key, (list)):
return var_index(self, key, name=name, graph=self.graph)
elif isinstance(key, tuple):
return var_index(self, list(key), name=name, graph=self.graph)
else:
return var_index(self, [key], name=name, graph=self.graph)
def set_shape(self, shape=None, init=False):
s = []
assert isinstance(shape, (tuple, list))
if all([isinstance(sv, Integral) for sv in shape]) and len(self.domain) == np.product(shape) and len(shape) > 0:
self._shape = shape if isinstance(shape, tuple) else tuple(shape)
else:
for idx, d in enumerate(self.domain.dom_set):
if shape and isinstance(shape[idx], (func_op, Integral)):
s.append(shape[idx])
elif shape and isinstance(shape[idx], float):
s.append(int(shape[idx]))
elif isinstance(d, float):
s.append(int(d))
elif isinstance(d, var_index):
s.append(d.domain)
else:
s.append(d)
self._shape = tuple(s)
def is_scalar(self, val):
return not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)
def scalar_result(self):
return False
def _evaluate(self, op1, op2, context=None, **kwargs):
if self.is_scalar(op1) or self.is_scalar(op2):
value = self.target(op1, op2)
else:
arg0_dom = self.args[0].domain
arg1_dom = self.args[1].domain
op1_idx = self.domain.map_sub_domain(arg0_dom) if isinstance(self.args[0], Node) else tuple([])
op2_idx = self.domain.map_sub_domain(arg1_dom) if isinstance(self.args[1], Node) else tuple([])
op1 = np.asarray(list(map(lambda x: op1[x], op1_idx))).reshape(self.domain.computed_shape)
op2 = np.asarray(list(map(lambda x: op2[x], op2_idx))).reshape(self.domain.computed_shape)
value = self.target(op1, op2)
return value
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
elif _is_node_type_instance(slice1_var, "index"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
elif _is_node_type_instance(slice2_var, "index"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<slice_%s '%s'>" % (self.target.__name__, self.name)
class func_op(Node): # pylint: disable=C0103,R0903
"""
Node wrapper for stateless functions.
Parameters
----------
target : callable
function to evaluate the node
args : tuple
positional arguments passed to the target
kwargs : dict
keywoard arguments passed to the target
"""
def __init__(self, target, *args, **kwargs):
kwargs["op_name"] = kwargs["op_name"] if "op_name" in kwargs \
else f"{target.__name__}"
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
elif len(args) == 2:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
else:
domain = Domain(tuple([]))
self._target = None
super(func_op, self).__init__(*args, target=f"{target.__module__}.{target.__name__}", domain=domain, **kwargs)
self.target = target
self.added_attrs += ["domain", "target"]
@property
def target(self):
return self._target
@target.setter
def target(self, fnc):
self._target = fnc
self.op_name = f"{fnc.__name__}"
self.kwargs["target"] = f"{fnc.__module__}.{fnc.__name__}"
def __getitem__(self, key):
return self
@property
def domain(self):
return self.kwargs["domain"]
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def _evaluate(self, *args, **kwargs):
for aa in list(kwargs.keys()):
if aa in self.added_attrs:
kwargs.pop(aa)
return self.target(*args, **kwargs)
def __call__(self, *args, **kwargs):
return call(self, *args, **kwargs)
def __repr__(self):
return "<func_op '%s' target=%s args=<%d items>>" % \
(self.name, self.kwargs["target"], len(self.args))
def nodeop(target=None, **kwargs):
"""
Decorator for creating nodes from functions.
"""
# This is called when the decorator is used with arguments
if target is None:
return functools.partial(nodeop, **kwargs)
# This is called when the decorator is used without arguments
@functools.wraps(target)
def _wrapper(*args, **kwargs_inner):
return func_op(target, *args, **kwargs_inner, **kwargs)
return _wrapper
@nodeop
def call(func, *args, **kwargs):
"""
Call `func` with positional arguments `args` and keyword arguments `kwargs`.
Parameters
----------
func : callable
Function to call when the node is executed.
args : list
Sequence of positional arguments passed to `func`.
kwargs : dict
Mapping of keyword arguments passed to `func`.
"""
return func(*args, **kwargs)
@contextlib.contextmanager
def control_dependencies(dependencies, graph=None):
"""
Ensure that all `dependencies` are executed before any nodes in this scope.
Parameters
----------
dependencies : list
Sequence of nodes to be evaluted before evaluating any nodes defined in this
scope.
"""
# Add dependencies to the graph
graph = Node.get_active_graph(graph)
graph.dependencies.extend(dependencies)
yield
# Remove dependencies from the graph
del graph.dependencies[-len(dependencies):]
#pylint: disable=C0103
abs_ = nodeop(builtins.abs)
dict_ = nodeop(builtins.dict)
help_ = nodeop(builtins.help)
min_ = nodeop(builtins.min)
setattr_ = nodeop(builtins.setattr)
all_ = nodeop(builtins.all)
dir_ = nodeop(builtins.dir)
hex_ = nodeop(builtins.hex)
next_ = nodeop(builtins.next)
slice_ = nodeop(builtins.slice)
any_ = nodeop(builtins.any)
divmod_ = nodeop(builtins.divmod)
id_ = nodeop(builtins.id)
object_ = nodeop(builtins.object)
sorted_ = nodeop(builtins.sorted)
ascii_ = nodeop(builtins.ascii)
enumerate_ = nodeop(builtins.enumerate)
input_ = nodeop(builtins.input)
oct_ = nodeop(builtins.oct)
staticmethod_ = nodeop(builtins.staticmethod)
bin_ = nodeop(builtins.bin)
eval_ = nodeop(builtins.eval)
int_ = nodeop(builtins.int)
open_ = nodeop(builtins.open)
str_ = nodeop(builtins.str)
bool_ = nodeop(builtins.bool)
exec_ = nodeop(builtins.exec)
isinstance_ = nodeop(builtins.isinstance)
ord_ = nodeop(builtins.ord)
sum_ = nodeop(builtins.sum)
bytearray_ = nodeop(builtins.bytearray)
filter_ = nodeop(builtins.filter)
issubclass_ = nodeop(builtins.issubclass)
pow_ = nodeop(builtins.pow)
super_ = nodeop(builtins.super)
bytes_ = nodeop(builtins.bytes)
float_ = nodeop(builtins.float)
iter_ = nodeop(builtins.iter)
print_ = nodeop(builtins.print)
tuple_ = nodeop(builtins.tuple)
callable_ = nodeop(builtins.callable)
format_ = nodeop(builtins.format)
len_ = nodeop(builtins.len)
property_ = nodeop(builtins.property)
type_ = nodeop(builtins.type)
chr_ = nodeop(builtins.chr)
frozenset_ = nodeop(builtins.frozenset)
list_ = nodeop(builtins.list)
range_ = nodeop(builtins.range)
vars_ = nodeop(builtins.vars)
classmethod_ = nodeop(builtins.classmethod)
getattr_ = nodeop(builtins.getattr)
locals_ = nodeop(builtins.locals)
repr_ = nodeop(builtins.repr)
zip_ = nodeop(builtins.zip)
compile_ = nodeop(builtins.compile)
globals_ = nodeop(builtins.globals)
map_ = nodeop(builtins.map)
reversed_ = nodeop(builtins.reversed)
complex_ = nodeop(builtins.complex)
hasattr_ = nodeop(builtins.hasattr)
max_ = nodeop(builtins.max)
round_ = nodeop(builtins.round)
delattr_ = nodeop(builtins.delattr)
hash_ = nodeop(builtins.hash)
memoryview_ = nodeop(builtins.memoryview)
set_ = nodeop(builtins.set)
add = nodeop(operator.add)
and_ = nodeop(operator.and_)
attrgetter = nodeop(operator.attrgetter)
concat = nodeop(operator.concat)
contains = nodeop(operator.contains)
countOf = nodeop(operator.countOf)
delitem = nodeop(operator.delitem)
eq = nodeop(operator.eq)
floordiv = nodeop(operator.floordiv)
ge = nodeop(operator.ge)
getitem = nodeop(operator.getitem)
gt = nodeop(operator.gt)
index = nodeop(operator.index)
indexOf = nodeop(operator.indexOf)
inv = nodeop(operator.inv)
invert = nodeop(operator.invert)
ior = nodeop(operator.ior)
ipow = nodeop(operator.ipow)
irshift = nodeop(operator.irshift)
is_ = nodeop(operator.is_)
is_not = nodeop(operator.is_not)
itemgetter = nodeop(operator.itemgetter)
le = nodeop(operator.le)
length_hint = nodeop(operator.length_hint)
lshift = nodeop(operator.lshift)
lt = nodeop(operator.lt)
matmul = nodeop(operator.matmul)
methodcaller = nodeop(operator.methodcaller)
mod = nodeop(operator.mod)
mul = nodeop(operator.mul)
ne = nodeop(operator.ne)
neg = nodeop(operator.neg)
not_ = nodeop(operator.not_)
or_ = nodeop(operator.or_)
pos = nodeop(operator.pos)
rshift = nodeop(operator.rshift)
setitem = nodeop(operator.setitem)
sub = nodeop(operator.sub)
truediv = nodeop(operator.truediv)
truth = nodeop(operator.truth)
xor = nodeop(operator.xor)
import_ = nodeop(importlib.import_module)
| 35.608726 | 158 | 0.607071 |
from polymath import UNSET_SHAPE, DEFAULT_SHAPES
import builtins
import operator
from collections import OrderedDict, Mapping, Sequence, deque
import functools
from numbers import Integral, Rational, Real
import contextlib
import traceback
import uuid
import numpy as np
import importlib
from .graph import Graph
from .domain import Domain
from .util import _noop_callback, _flatten_iterable, node_hash, \
_is_node_type_instance, is_iterable
class Node(object):
_graph_stack = deque([None])
_eval_stack = []
stack_size = 5
evaluated_nodes = 0
def __init__(self, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
self.nodes = Graph()
self.value = value
self.dependencies = []
self._args = []
self._predeecessors = []
self._succesors = []
self.args = args
if "name" in kwargs:
kwargs.pop("name")
self.added_attrs = []
self.kwargs = kwargs
self.graph = graph
self._shape = OrderedDict()
self.shape = shape or tuple([])
self.dependencies = [] if dependencies is None else dependencies
if self.graph:
self.dependencies.extend(self.graph.dependencies)
self._name = None
self.name = name or uuid.uuid4().hex
self._op_name = None
self.op_name = op_name
self._stack = traceback.extract_stack(limit=1)
@property
def graph(self):
return self._graph
def preds(self):
return self._preds
def succs(self):
return self._preds
def add_predecessor(self, pred):
if isinstance(pred, Node):
self._predecessors.append(pred.gname)
else:
self._predecessors.append(pred)
def add_successor(self, succ):
if isinstance(succ, Node):
self._succesors.append(succ.gname)
else:
self._succesors.append(succ)
def set_edges(self):
for e in self.args:
self.add_predecessor(e)
if isinstance(e, Node):
e.add_successor(self)
@property
def domain(self):
return Domain(tuple([]))
@property
def args(self):
return tuple(self._args)
@property
def argnames(self):
return [a.name if isinstance(a, Node) else a for a in self.args]
@property
def shape(self):
return self._shape
@property
def var(self):
return self
@property
def name(self):
return self._name
@property
def op_name(self):
return self._op_name
@op_name.setter
def op_name(self, op_name):
if op_name:
self._op_name = op_name
elif self.__class__.__name__ == "Node":
self._op_name = self.name
else:
self._op_name = self.__class__.__name__
@name.setter
def name(self, name):
self.set_name(name)
@args.setter
def args(self, args):
new_args = []
for arg in args:
if isinstance(arg, Node):
if self.__class__.__name__ == "Node":
self.nodes[arg.name] = self.graph[arg.name]
new_args.append(arg)
self._args = tuple(new_args)
@shape.setter
def shape(self, shape):
self.set_shape(shape, init=True)
@graph.setter
def graph(self, graph):
self._graph = Node.get_active_graph(graph)
@property
def gname(self):
scope_names = [self.name]
cgraph = self.graph
while cgraph:
scope_names.append(cgraph.name)
cgraph = cgraph.graph
return "/".join(list(reversed(scope_names)))
def __enter__(self):
Node._graph_stack.append(self)
return self
def __exit__(self, *args):
assert self == Node._graph_stack.pop()
def __repr__(self):
return "<node '%s'>" % self.name
def add_attribute(self, key, value):
self.added_attrs.append(key)
self.kwargs[key] = value
def is_shape_finalized(self):
if self.shape == UNSET_SHAPE:
return False
for s in self.shape:
if not isinstance(s, Integral):
return False
return True
def set_shape(self, shape=None, init=False):
if isinstance(shape, float):
self._shape = tuple([np.int(shape)])
elif isinstance(shape, Integral):
self._shape = tuple([shape])
elif isinstance(shape, Node):
self._shape = tuple([shape])
elif not shape or len(shape) == 0:
self._shape = UNSET_SHAPE
else:
shapes = []
for dim in shape:
if isinstance(dim, (Node, Integral)):
shapes.append(dim)
elif isinstance(dim, float):
shapes.append(int(dim))
else:
raise TypeError(f"Shape value must be placeholder or integer value for {self.name}\n"
f"\tDim: {dim}"
f"\n\t{self.kwargs} ")
self._shape = tuple(shapes)
@staticmethod
def get_active_graph(graph=None):
graph = graph or Node._graph_stack[-1]
return graph
def instantiate_node(self, node):
if isinstance(node, str):
return self.nodes[node]
if isinstance(node, Node):
if node.name not in self.nodes and (node.graph != self):
raise RuntimeError(f"node '{node}' does not belong to {self} graph, instead belongs to"
f" {node.graph}")
return node
raise ValueError(f"'{node}' is not an `Node` instance or node name")
def instantiate_graph(self, context, **kwargs):
if context is None:
context = {}
elif not isinstance(context, Mapping):
raise ValueError("`context` must be a mapping.")
nodes = list(context)
for node in nodes:
value = context.pop(node)
node = self.instantiate_node(node)
if node in context:
raise ValueError(f"duplicate unequal value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
for name, value in kwargs.items():
node = self.nodes[name]
if node in context:
raise ValueError(f"duplicate value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
return context
def run(self, fetches, context=None, *, callback=None, **kwargs):
if isinstance(fetches, (str, Node)):
fetches = [fetches]
single = True
elif isinstance(fetches, Sequence):
single = False
else:
raise ValueError("`fetches` must be an `Node` instance, node name, or a "
"sequence thereof.")
fetches = [self.instantiate_node(node) for node in fetches]
context = self.instantiate_graph(context, **kwargs)
for c in context:
if c in fetches and c.op_name in ["output", "state", "temp"]:
write_name = "/".join([f"{i}{c.write_count-1}" for i in c.name.split("/")]) if c.write_count > 0 else c.name
fetches[fetches.index(c)] = c.graph.nodes[write_name]
values = [fetch.evaluate_node(fetch, context, callback=callback) for fetch in fetches]
return values[0] if single else tuple(values)
def __getstate__(self):
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
def set_name(self, name):
name = name or uuid.uuid4().hex
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:\n\t"
f"Existing: {self.graph.nodes[name].args}\n\t"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def evaluate_dependencies(self, context, callback=None):
for node in self.dependencies:
node.evaluate(context, callback)
def evaluate(self, context, callback=None):
self.evaluate_dependencies(context, callback)
if self in context:
return context[self]
partial = functools.partial(self.evaluate_node, context=context, callback=callback)
args = [partial(arg) for arg in self.args]
kwargs = {key: partial(value) for key, value in self.kwargs.items() if key not in self.added_attrs}
callback = callback or _noop_callback
with callback(self, context):
if self.__class__.__name__ == "Node":
context[self] = self.value = self._evaluate(*args, context=context, **kwargs)
else:
context[self] = self.value = self._evaluate(*args, **kwargs)
return self.value
def _evaluate(self, *args, context=None, **kwargs):
return self(*args, context, **kwargs)
@classmethod
def evaluate_node(cls, node, context, **kwargs):
Node.evaluated_nodes += 1
try:
if isinstance(node, Node):
Node._eval_stack.append(node.name)
return node.evaluate(context, **kwargs)
partial = functools.partial(cls.evaluate_node, context=context, **kwargs)
if isinstance(node, tuple):
return tuple(partial(element) for element in node)
if isinstance(node, list):
return [partial(element) for element in node]
if isinstance(node, dict):
return {partial(key): partial(value) for key, value in node.items()}
if isinstance(node, slice):
return slice(*[partial(getattr(node, attr))
for attr in ['start', 'stop', 'step']])
return node
except Exception as ex:
messages = []
interactive = False
if isinstance(node, Node) or not is_iterable(node):
node = [node]
for n in node:
stack = []
if isinstance(n, Node):
for frame in reversed(n._stack):
fname = frame.filename
if 'polymath' in fname:
continue
if interactive and not fname.startswith('<'):
break
interactive = fname.startswith('<')
stack.append(frame)
stack = "".join(traceback.format_list(reversed(stack)))
message = "Failed to evaluate node `%s` defined at:\n\n%s" % (n, stack)
messages.append(message)
raise ex from EvaluationError("".join(messages))
@classmethod
def init_from_args(cls, *args,
name=None,
shape=None,
graph=None,
dependencies=None,
op_name=None,
value=None,
**kwargs):
if len(args) == 0:
n = cls(name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
else:
n = cls(*args,
name=name,
shape=shape,
graph=graph,
op_name=op_name,
dependencies=dependencies,
value=value,
**kwargs)
return n
def __bool__(self):
return True
def __hash__(self):
return id(self)
def func_hash(self):
return node_hash(self)
def find_node(self, name):
g = self.graph
while g is not None and name not in g.nodes:
g = g.graph
if name in g.nodes:
return g.nodes[name]
raise RuntimeError(f"Cannot find {name} in graph nodes. Graph: {self.graph}")
def __len__(self):
if self.shape == UNSET_SHAPE:
raise TypeError(f'`shape` must be specified explicitly for nodes {self}')
return self.shape[0]
def __iter__(self):
num = len(self)
for i in range(num):
yield self[i]
def __eq__(self, other):
return hash(self) == hash(other)
def __getattr__(self, name):
return getattr_(self, name, graph=self.graph)
def __getitem__(self, key):
if self.__class__.__name__ != "Node":
if isinstance(key, (slice, Integral)):
return getitem(self, key, graph=self.graph)
else:
if isinstance(key, (list)):
return var_index(self, key, graph=self)
elif isinstance(key, tuple):
return var_index(self, list(key), graph=self)
else:
return var_index(self, [key], graph=self)
else:
return self.nodes[key]
def __add__(self, other):
return add(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__radd__(self)
def __radd__(self, other):
return add(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__add__(self)
def __sub__(self, other):
return sub(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rsub__(self)
def __rsub__(self, other):
return sub(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__sub__(self)
def __pow__(self, other):
return pow_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __rpow__(self, other):
return pow_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rpow__(self)
def __matmul__(self, other):
return matmul(self, other, graph=self.graph)
def __rmatmul__(self, other):
return matmul(other, self, graph=self.graph)
def __mul__(self, other):
return mul(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmul__(self)
def __rmul__(self, other):
return mul(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mul__(self)
def __truediv__(self, other):
return truediv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__truediv__(self)
def __rtruediv__(self, other):
return truediv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rtruediv__(self)
def __floordiv__(self, other):
return floordiv(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rfloordiv__(self)
def __rfloordiv__(self, other):
return floordiv(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__floordiv__(self)
def __mod__(self, other):
return mod(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rmod__(self)
def __rmod__(self, other):
return mod(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__mod__(self)
def __lshift__(self, other):
return lshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rlshift__(self)
def __rlshift__(self, other):
return lshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lshift__(self)
def __rshift__(self, other):
return rshift(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rrshift__(self)
def __rrshift__(self, other):
return rshift(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rshift__(self)
def __and__(self, other):
return and_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rand__(self)
def __rand__(self, other):
return and_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__and__(self)
def __or__(self, other):
return or_(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ror__(self)
def __ror__(self, other):
return or_(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__or__(self)
def __xor__(self, other):
return xor(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__rxor__(self)
def __rxor__(self, other):
return xor(other, self, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__xor__(self)
def __lt__(self, other):
return lt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__gt__(self)
def __le__(self, other):
return le(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ge__(self)
def __ne__(self, other):
return ne(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__ne__(self)
def __gt__(self, other):
return gt(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__lt__(self)
def __ge__(self, other):
return ge(self, other, graph=self.graph) if not _is_node_type_instance(other, ("slice_op", "var_index", "index")) else other.__le__(self)
def __invert__(self):
return inv(self, graph=self.graph)
def __neg__(self):
return neg(self, graph=self.graph)
def __abs__(self):
return abs_(self, graph=self.graph)
def __pos__(self):
return pos(self, graph=self.graph)
def __reversed__(self):
return reversed_(self, graph=self.graph)
def update_graph_key(self, old_key, new_key):
n = list(map(lambda k: (new_key, self.nodes[k]) if k == old_key else (k, self.nodes[k]), self.nodes.keys()))
self.nodes = Graph(n)
def insert_node(self, node, idx):
node_list = list(self.nodes.items())
node_list.insert(idx, (node.name, node))
self.nodes = Graph(node_list)
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
class EvaluationError(RuntimeError):
class var_index(Node):
def __init__(self, var, idx, name=None, **kwargs):
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
domain = Domain(idx)
super(var_index, self).__init__(var, idx, name=name, domain=domain, **kwargs)
@property
def domain(self):
return self.kwargs["domain"]
@property
def var(self):
var, index_list = self.args
return var
def set_name(self, name):
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:"
f"Existing: {self.graph.nodes[name].args}\n"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name is not None and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def __getitem__(self, key):
if self.is_shape_finalized() and len(self.nodes) >= np.prod(self.shape):
if isinstance(key, Integral):
key = tuple([key])
idx = np.ravel_multi_index(key, dims=self.shape, order='C')
ret = self.nodes.item_by_index(idx)
return ret
else:
if isinstance(key, (list)):
ret = var_index(self.var, tuple(key), graph=self)
elif isinstance(key, tuple):
ret = var_index(self.var, key, graph=self)
else:
ret = var_index(self.var, tuple([key]), graph=self)
return ret
def is_scalar(self, val=None):
if val is not None and (not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)):
if self.var.shape != DEFAULT_SHAPES[0] and (len(self.var.shape) == 1 and not isinstance(self.var.shape[0],Node)):
raise ValueError(f"Invalid shape var for var index {self} with variable shape {self.var.shape}")
return True
else:
return self.var.shape == DEFAULT_SHAPES[0]
def scalar_result(self):
return all([isinstance(v, int) for v in self.args[1]])
def _evaluate(self, var, indices, **kwargs):
if self.is_scalar(var):
out_shape = (1,)
indices = (0,)
single = True
else:
out_shape = self.domain.shape_from_indices(indices)
indices = self.domain.compute_pairs()
single = False
if isinstance(var, (Integral, Real, str)):
var = np.asarray([var])
elif not isinstance(var, (np.ndarray, list)):
raise TypeError(f"Variable {var} with type {type(var)} is not a list or numpy array, and cannot be sliced for {self.name}")
elif isinstance(var, list):
var = np.asarray(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) == np.prod(out_shape):
if len(out_shape) > len(var.shape):
for i in range(len(out_shape)):
if out_shape[i] == 1:
var = np.expand_dims(var, axis=i)
else:
var = np.squeeze(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) != np.prod(out_shape):
raise ValueError(f"Index list does not match {var.shape} in {self.var.name} - {self.var.op_name}"
f"dimensions for slice {self.args[0].name} with {out_shape}.\n"
f"Domain: {self.domain}\n"
f"Eval Stack: {Node._eval_stack}")
if not single and not all([(idx_val - 1) >= indices[-1][idx] for idx, idx_val in enumerate(var.shape)]):
raise ValueError(f"var_index {self.name} has indices which are greater than the variable shape:\n"
f"\tArgs: {self.args}\n"
f"\tVar shape: {var.shape}\n"
f"\tNode shape: {self.var.shape}\n"
f"\tIndex Upper bounds: {indices[-1]}")
indices = list(map(lambda x: x.tolist() if isinstance(x, np.ndarray) else x, indices))
res = var[indices] if single else np.asarray([var[idx] for idx in indices]).reshape(out_shape)
if out_shape == (1,) and len(indices) == 1:
res = res[0]
self.domain.set_computed(out_shape, indices)
return res
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<var_index name=%s, index=%s>" % (self.name, self.args)
class slice_op(Node):
def __init__(self, target, *args, **kwargs):
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
else:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
if "op_name" in kwargs:
kwargs.pop("op_name")
target_name = f"{target.__module__}.{target.__name__}"
super(slice_op, self).__init__(*args, target=target_name, domain=domain, op_name=f"slice_{target.__name__}", **kwargs)
self.target = target
@property
def domain(self):
return self.kwargs["domain"]
def __getitem__(self, key):
if isinstance(key, (tuple, list, np.ndarray)) and len(key) == 0:
return self
elif self.is_shape_finalized() and len(self.nodes) > 0:
if isinstance(key, (int, Node)):
key = tuple([key])
if len(key) != len(self.shape):
raise KeyError(f"Invalid key shape for {self.name}:\n"
f"Shape: {self.shape}\n"
f"Key: {key}")
if isinstance(key, list):
key = tuple(key)
name = f"{self.name}{key}"
if name not in self.nodes.keys():
raise KeyError(f"{name} not in {self.name} keys:\n"
f"Node keys: {list(self.nodes.keys())}")
ret = self.nodes[name]
return ret
else:
name = []
if isinstance(key, Node):
name.append(key.name)
elif hasattr(key, "__len__") and not isinstance(key, str):
for k in key:
if isinstance(k, Node):
name.append(k.name)
else:
name.append(k)
else:
name.append(key)
name = tuple(name)
name = self.var.name + str(name)
if name in self.graph.nodes:
return self.graph.nodes[name]
elif isinstance(key, (list)):
return var_index(self, key, name=name, graph=self.graph)
elif isinstance(key, tuple):
return var_index(self, list(key), name=name, graph=self.graph)
else:
return var_index(self, [key], name=name, graph=self.graph)
def set_shape(self, shape=None, init=False):
s = []
assert isinstance(shape, (tuple, list))
if all([isinstance(sv, Integral) for sv in shape]) and len(self.domain) == np.product(shape) and len(shape) > 0:
self._shape = shape if isinstance(shape, tuple) else tuple(shape)
else:
for idx, d in enumerate(self.domain.dom_set):
if shape and isinstance(shape[idx], (func_op, Integral)):
s.append(shape[idx])
elif shape and isinstance(shape[idx], float):
s.append(int(shape[idx]))
elif isinstance(d, float):
s.append(int(d))
elif isinstance(d, var_index):
s.append(d.domain)
else:
s.append(d)
self._shape = tuple(s)
def is_scalar(self, val):
return not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)
def scalar_result(self):
return False
def _evaluate(self, op1, op2, context=None, **kwargs):
if self.is_scalar(op1) or self.is_scalar(op2):
value = self.target(op1, op2)
else:
arg0_dom = self.args[0].domain
arg1_dom = self.args[1].domain
op1_idx = self.domain.map_sub_domain(arg0_dom) if isinstance(self.args[0], Node) else tuple([])
op2_idx = self.domain.map_sub_domain(arg1_dom) if isinstance(self.args[1], Node) else tuple([])
op1 = np.asarray(list(map(lambda x: op1[x], op1_idx))).reshape(self.domain.computed_shape)
op2 = np.asarray(list(map(lambda x: op2[x], op2_idx))).reshape(self.domain.computed_shape)
value = self.target(op1, op2)
return value
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
elif _is_node_type_instance(slice1_var, "index"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
elif _is_node_type_instance(slice2_var, "index"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<slice_%s '%s'>" % (self.target.__name__, self.name)
class func_op(Node):
def __init__(self, target, *args, **kwargs):
kwargs["op_name"] = kwargs["op_name"] if "op_name" in kwargs \
else f"{target.__name__}"
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
elif len(args) == 2:
all_args = _flatten_iterable(args)
slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])
domain = slice1_idx.combine_set_domains(slice2_idx)
else:
domain = Domain(tuple([]))
self._target = None
super(func_op, self).__init__(*args, target=f"{target.__module__}.{target.__name__}", domain=domain, **kwargs)
self.target = target
self.added_attrs += ["domain", "target"]
@property
def target(self):
return self._target
@target.setter
def target(self, fnc):
self._target = fnc
self.op_name = f"{fnc.__name__}"
self.kwargs["target"] = f"{fnc.__module__}.{fnc.__name__}"
def __getitem__(self, key):
return self
@property
def domain(self):
return self.kwargs["domain"]
def get_index_nodes(self, slice1_var=None, slice2_var=None):
if slice1_var is None and slice2_var is None:
slice1_var, slice2_var = self.args
if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, "GroupNode"):
slice1_idx = slice1_var.domain
else:
slice1_idx = Domain(tuple([]))
if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, "GroupNode"):
slice2_idx = slice2_var.domain
else:
slice2_idx = Domain(tuple([]))
return slice1_var, slice1_idx, slice2_var, slice2_idx
def _evaluate(self, *args, **kwargs):
for aa in list(kwargs.keys()):
if aa in self.added_attrs:
kwargs.pop(aa)
return self.target(*args, **kwargs)
def __call__(self, *args, **kwargs):
return call(self, *args, **kwargs)
def __repr__(self):
return "<func_op '%s' target=%s args=<%d items>>" % \
(self.name, self.kwargs["target"], len(self.args))
def nodeop(target=None, **kwargs):
if target is None:
return functools.partial(nodeop, **kwargs)
@functools.wraps(target)
def _wrapper(*args, **kwargs_inner):
return func_op(target, *args, **kwargs_inner, **kwargs)
return _wrapper
@nodeop
def call(func, *args, **kwargs):
return func(*args, **kwargs)
@contextlib.contextmanager
def control_dependencies(dependencies, graph=None):
graph = Node.get_active_graph(graph)
graph.dependencies.extend(dependencies)
yield
del graph.dependencies[-len(dependencies):]
abs_ = nodeop(builtins.abs)
dict_ = nodeop(builtins.dict)
help_ = nodeop(builtins.help)
min_ = nodeop(builtins.min)
setattr_ = nodeop(builtins.setattr)
all_ = nodeop(builtins.all)
dir_ = nodeop(builtins.dir)
hex_ = nodeop(builtins.hex)
next_ = nodeop(builtins.next)
slice_ = nodeop(builtins.slice)
any_ = nodeop(builtins.any)
divmod_ = nodeop(builtins.divmod)
id_ = nodeop(builtins.id)
object_ = nodeop(builtins.object)
sorted_ = nodeop(builtins.sorted)
ascii_ = nodeop(builtins.ascii)
enumerate_ = nodeop(builtins.enumerate)
input_ = nodeop(builtins.input)
oct_ = nodeop(builtins.oct)
staticmethod_ = nodeop(builtins.staticmethod)
bin_ = nodeop(builtins.bin)
eval_ = nodeop(builtins.eval)
int_ = nodeop(builtins.int)
open_ = nodeop(builtins.open)
str_ = nodeop(builtins.str)
bool_ = nodeop(builtins.bool)
exec_ = nodeop(builtins.exec)
isinstance_ = nodeop(builtins.isinstance)
ord_ = nodeop(builtins.ord)
sum_ = nodeop(builtins.sum)
bytearray_ = nodeop(builtins.bytearray)
filter_ = nodeop(builtins.filter)
issubclass_ = nodeop(builtins.issubclass)
pow_ = nodeop(builtins.pow)
super_ = nodeop(builtins.super)
bytes_ = nodeop(builtins.bytes)
float_ = nodeop(builtins.float)
iter_ = nodeop(builtins.iter)
print_ = nodeop(builtins.print)
tuple_ = nodeop(builtins.tuple)
callable_ = nodeop(builtins.callable)
format_ = nodeop(builtins.format)
len_ = nodeop(builtins.len)
property_ = nodeop(builtins.property)
type_ = nodeop(builtins.type)
chr_ = nodeop(builtins.chr)
frozenset_ = nodeop(builtins.frozenset)
list_ = nodeop(builtins.list)
range_ = nodeop(builtins.range)
vars_ = nodeop(builtins.vars)
classmethod_ = nodeop(builtins.classmethod)
getattr_ = nodeop(builtins.getattr)
locals_ = nodeop(builtins.locals)
repr_ = nodeop(builtins.repr)
zip_ = nodeop(builtins.zip)
compile_ = nodeop(builtins.compile)
globals_ = nodeop(builtins.globals)
map_ = nodeop(builtins.map)
reversed_ = nodeop(builtins.reversed)
complex_ = nodeop(builtins.complex)
hasattr_ = nodeop(builtins.hasattr)
max_ = nodeop(builtins.max)
round_ = nodeop(builtins.round)
delattr_ = nodeop(builtins.delattr)
hash_ = nodeop(builtins.hash)
memoryview_ = nodeop(builtins.memoryview)
set_ = nodeop(builtins.set)
add = nodeop(operator.add)
and_ = nodeop(operator.and_)
attrgetter = nodeop(operator.attrgetter)
concat = nodeop(operator.concat)
contains = nodeop(operator.contains)
countOf = nodeop(operator.countOf)
delitem = nodeop(operator.delitem)
eq = nodeop(operator.eq)
floordiv = nodeop(operator.floordiv)
ge = nodeop(operator.ge)
getitem = nodeop(operator.getitem)
gt = nodeop(operator.gt)
index = nodeop(operator.index)
indexOf = nodeop(operator.indexOf)
inv = nodeop(operator.inv)
invert = nodeop(operator.invert)
ior = nodeop(operator.ior)
ipow = nodeop(operator.ipow)
irshift = nodeop(operator.irshift)
is_ = nodeop(operator.is_)
is_not = nodeop(operator.is_not)
itemgetter = nodeop(operator.itemgetter)
le = nodeop(operator.le)
length_hint = nodeop(operator.length_hint)
lshift = nodeop(operator.lshift)
lt = nodeop(operator.lt)
matmul = nodeop(operator.matmul)
methodcaller = nodeop(operator.methodcaller)
mod = nodeop(operator.mod)
mul = nodeop(operator.mul)
ne = nodeop(operator.ne)
neg = nodeop(operator.neg)
not_ = nodeop(operator.not_)
or_ = nodeop(operator.or_)
pos = nodeop(operator.pos)
rshift = nodeop(operator.rshift)
setitem = nodeop(operator.setitem)
sub = nodeop(operator.sub)
truediv = nodeop(operator.truediv)
truth = nodeop(operator.truth)
xor = nodeop(operator.xor)
import_ = nodeop(importlib.import_module)
| true | true |
f73a68514b36d07bee3dc64335fe6985e7003dca | 654 | py | Python | f_monthpython/manage.py | koffi09/Parissssportif | 0ca2a57c27f15ffb389dbd350f0bb746751f2639 | [
"Apache-2.0"
] | null | null | null | f_monthpython/manage.py | koffi09/Parissssportif | 0ca2a57c27f15ffb389dbd350f0bb746751f2639 | [
"Apache-2.0"
] | null | null | null | f_monthpython/manage.py | koffi09/Parissssportif | 0ca2a57c27f15ffb389dbd350f0bb746751f2639 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'f_monthpython.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.727273 | 78 | 0.663609 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'f_monthpython.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f73a6af3a877014f4954b7e193a6f76018104845 | 5,406 | py | Python | dbsp_drp/quicklook.py | finagle29/DBSP_DRP | d2f869f85e1425507dbc84e4e76fa44a6784f9d1 | [
"BSD-3-Clause"
] | 1 | 2021-05-17T23:28:25.000Z | 2021-05-17T23:28:25.000Z | dbsp_drp/quicklook.py | finagle29/DBSP_DRP | d2f869f85e1425507dbc84e4e76fa44a6784f9d1 | [
"BSD-3-Clause"
] | 23 | 2020-12-14T17:25:13.000Z | 2022-02-23T09:30:10.000Z | dbsp_drp/quicklook.py | finagle29/DBSP_DRP | d2f869f85e1425507dbc84e4e76fa44a6784f9d1 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import os
import time
from typing import List, Optional
import glob
from multiprocessing import Process
import numpy as np
from astropy.io import fits
from pkg_resources import resource_filename
from pypeit.pypeitsetup import PypeItSetup
from pypeit.core import framematch
from pypeit import pypeit
from pypeit import fluxcalibrate
from pypeit.scripts import show_2dspec, show_1dspec
def get_cfg_lines(spectrograph: str) -> List[str]:
cfg_lines = [
"[rdx]",
f"spectrograph = {spectrograph}",
"[calibrations]",
f"master_dir = Master_{spectrograph.split('_')[-1]}",
"raise_chk_error = False",
"[scienceframe]",
"[[process]]",
"mask_cr = False",
"[baseprocess]",
"use_biasimage = False",
"[reduce]",
"[[extraction]]",
"skip_optimal = True",
"[[findobj]]",
"skip_second_find = True"
]
return cfg_lines
def parse(options: Optional[List[str]] = None) -> argparse.Namespace:
argparser = argparse.ArgumentParser(description="Quicklook for P200 DBSP",
formatter_class=argparse.RawTextHelpFormatter)
argparser.add_argument("fname", type=str, help="file to take a quick look at, or else red/blue\n"
"to just perform rough calibrations")
argparser.add_argument("--no-show", default=False, action="store_true",
help="Set this flag to suppress opening of plots")
return argparser.parse_args() if options is None else argparser.parse_args(options)
def main(args: argparse.Namespace):
t = time.perf_counter()
# need an arc frame and a flat frame
root = args.fname.rstrip('0123456789.fits')
paths = glob.glob(f'{root}*.fits')
spectrograph = 'p200_dbsp_red' if 'red' in os.path.basename(args.fname) else 'p200_dbsp_blue'
arm = spectrograph.split('_')[-1]
CFG_LINES = get_cfg_lines(spectrograph)
flatimg = ""
arcimg = ""
sciimg = args.fname
calib_only = not os.path.isfile(sciimg)
if calib_only:
for path in paths:
with fits.open(path) as hdul:
if not flatimg:
if hdul[0].header['OBJECT'] == 'flat' or hdul[0].header['IMGTYPE'] == 'flat':
flatimg = path
if not arcimg:
if hdul[0].header['OBJECT'] == 'arcs' or hdul[0].header['IMGTYPE'] == 'cal':
arcimg = path
if flatimg and arcimg:
break
if not (flatimg and arcimg):
raise Exception(f"Could not find a flat and an arc frame in the same directory as {root}!")
files = [arcimg, flatimg]
else:
files = [sciimg]
ps = PypeItSetup(files, path="./", spectrograph_name=spectrograph,
cfg_lines = CFG_LINES)
ps.build_fitstbl()
bm = framematch.FrameTypeBitMask()
file_bits = np.zeros(len(files), dtype=bm.minimum_dtype())
if calib_only:
file_bits[0] = bm.turn_on(file_bits[0], ['arc', 'tilt'])
file_bits[1] = bm.turn_on(file_bits[1], ['pixelflat', 'trace', 'illumflat'])
else:
file_bits[0] = bm.turn_on(file_bits[0], 'science')
asrt = np.array([ps.fitstbl['filename'].data.tolist().index(os.path.basename(fname)) for fname in files])
ps.fitstbl.set_frame_types(file_bits[asrt])
ps.fitstbl.set_combination_groups()
ps.fitstbl['setup'] = 'A'
ofiles = ps.fitstbl.write_pypeit(configs='A', cfg_lines=CFG_LINES)
pypeIt = pypeit.PypeIt(ofiles[0], verbosity=0,
reuse_masters=True, overwrite=True,
logname='dbsp_ql.log', show=False, calib_only=calib_only)
if calib_only:
pypeIt.calib_all()
else:
pypeIt.reduce_all()
pypeIt.build_qa()
output_spec2ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i, True) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
output_spec1ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
if output_spec1ds and not calib_only:
sensfiles = [resource_filename("dbsp_drp", f"data/sens_{arm}_archived.fits")]
FxCalib = fluxcalibrate.FluxCalibrate.get_instance(output_spec1ds, sensfiles, par=ps.par['fluxcalib'])
print(f"Time elapsed: {time.perf_counter() - t}s.")
if not calib_only and not args.no_show:
p1 = Process(target = show_spec2d_helper, args=(output_spec2ds[0],))
p1.start()
if output_spec1ds:
with fits.open(output_spec1ds[0]) as hdul:
specs = len(hdul) - 2
parr = [ None ] * specs
for i in range(specs):
parr[i] = Process(target = show_spec1d_helper,
args=(str(i), output_spec1ds[0]))
parr[i].start()
def show_spec2d_helper(file):
return show_2dspec.Show2DSpec.main(show_2dspec.Show2DSpec.parse_args([file]))
def show_spec1d_helper(exten, file):
return show_1dspec.Show1DSpec.main(
show_1dspec.Show1DSpec.parse_args(['--extract', 'BOX', '--exten', exten,
'--flux', file])
)
| 35.103896 | 110 | 0.621532 | import argparse
import os
import time
from typing import List, Optional
import glob
from multiprocessing import Process
import numpy as np
from astropy.io import fits
from pkg_resources import resource_filename
from pypeit.pypeitsetup import PypeItSetup
from pypeit.core import framematch
from pypeit import pypeit
from pypeit import fluxcalibrate
from pypeit.scripts import show_2dspec, show_1dspec
def get_cfg_lines(spectrograph: str) -> List[str]:
cfg_lines = [
"[rdx]",
f"spectrograph = {spectrograph}",
"[calibrations]",
f"master_dir = Master_{spectrograph.split('_')[-1]}",
"raise_chk_error = False",
"[scienceframe]",
"[[process]]",
"mask_cr = False",
"[baseprocess]",
"use_biasimage = False",
"[reduce]",
"[[extraction]]",
"skip_optimal = True",
"[[findobj]]",
"skip_second_find = True"
]
return cfg_lines
def parse(options: Optional[List[str]] = None) -> argparse.Namespace:
argparser = argparse.ArgumentParser(description="Quicklook for P200 DBSP",
formatter_class=argparse.RawTextHelpFormatter)
argparser.add_argument("fname", type=str, help="file to take a quick look at, or else red/blue\n"
"to just perform rough calibrations")
argparser.add_argument("--no-show", default=False, action="store_true",
help="Set this flag to suppress opening of plots")
return argparser.parse_args() if options is None else argparser.parse_args(options)
def main(args: argparse.Namespace):
t = time.perf_counter()
root = args.fname.rstrip('0123456789.fits')
paths = glob.glob(f'{root}*.fits')
spectrograph = 'p200_dbsp_red' if 'red' in os.path.basename(args.fname) else 'p200_dbsp_blue'
arm = spectrograph.split('_')[-1]
CFG_LINES = get_cfg_lines(spectrograph)
flatimg = ""
arcimg = ""
sciimg = args.fname
calib_only = not os.path.isfile(sciimg)
if calib_only:
for path in paths:
with fits.open(path) as hdul:
if not flatimg:
if hdul[0].header['OBJECT'] == 'flat' or hdul[0].header['IMGTYPE'] == 'flat':
flatimg = path
if not arcimg:
if hdul[0].header['OBJECT'] == 'arcs' or hdul[0].header['IMGTYPE'] == 'cal':
arcimg = path
if flatimg and arcimg:
break
if not (flatimg and arcimg):
raise Exception(f"Could not find a flat and an arc frame in the same directory as {root}!")
files = [arcimg, flatimg]
else:
files = [sciimg]
ps = PypeItSetup(files, path="./", spectrograph_name=spectrograph,
cfg_lines = CFG_LINES)
ps.build_fitstbl()
bm = framematch.FrameTypeBitMask()
file_bits = np.zeros(len(files), dtype=bm.minimum_dtype())
if calib_only:
file_bits[0] = bm.turn_on(file_bits[0], ['arc', 'tilt'])
file_bits[1] = bm.turn_on(file_bits[1], ['pixelflat', 'trace', 'illumflat'])
else:
file_bits[0] = bm.turn_on(file_bits[0], 'science')
asrt = np.array([ps.fitstbl['filename'].data.tolist().index(os.path.basename(fname)) for fname in files])
ps.fitstbl.set_frame_types(file_bits[asrt])
ps.fitstbl.set_combination_groups()
ps.fitstbl['setup'] = 'A'
ofiles = ps.fitstbl.write_pypeit(configs='A', cfg_lines=CFG_LINES)
pypeIt = pypeit.PypeIt(ofiles[0], verbosity=0,
reuse_masters=True, overwrite=True,
logname='dbsp_ql.log', show=False, calib_only=calib_only)
if calib_only:
pypeIt.calib_all()
else:
pypeIt.reduce_all()
pypeIt.build_qa()
output_spec2ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i, True) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
output_spec1ds = list(filter(lambda f: os.path.isfile(os.path.join('Science', f)), [
pypeIt.spec_output_file(i) \
for i in range(len(pypeIt.fitstbl.table)) \
if pypeIt.fitstbl.table[i]['frametype'] in ['science']
]))
if output_spec1ds and not calib_only:
sensfiles = [resource_filename("dbsp_drp", f"data/sens_{arm}_archived.fits")]
FxCalib = fluxcalibrate.FluxCalibrate.get_instance(output_spec1ds, sensfiles, par=ps.par['fluxcalib'])
print(f"Time elapsed: {time.perf_counter() - t}s.")
if not calib_only and not args.no_show:
p1 = Process(target = show_spec2d_helper, args=(output_spec2ds[0],))
p1.start()
if output_spec1ds:
with fits.open(output_spec1ds[0]) as hdul:
specs = len(hdul) - 2
parr = [ None ] * specs
for i in range(specs):
parr[i] = Process(target = show_spec1d_helper,
args=(str(i), output_spec1ds[0]))
parr[i].start()
def show_spec2d_helper(file):
return show_2dspec.Show2DSpec.main(show_2dspec.Show2DSpec.parse_args([file]))
def show_spec1d_helper(exten, file):
return show_1dspec.Show1DSpec.main(
show_1dspec.Show1DSpec.parse_args(['--extract', 'BOX', '--exten', exten,
'--flux', file])
)
| true | true |
f73a6b1b0e4076f175322cf2155371734efb6c83 | 10,310 | py | Python | pybind/slxos/v16r_1_00b/interface/ethernet/ipv6/ipv6_config/address/link_local_config/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/interface/ethernet/ipv6/ipv6_config/address/link_local_config/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/interface/ethernet/ipv6/ipv6_config/address/link_local_config/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class link_local_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/ethernet/ipv6/ipv6-config/address/link-local-config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__link_local_address','__link_local',)
_yang_name = 'link-local-config'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__link_local = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__link_local_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'ethernet', u'ipv6', u'ipv6-config', u'address', u'link-local-config']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ethernet', u'ipv6', u'address']
def _get_link_local_address(self):
"""
Getter method for link_local_address, mapped from YANG variable /interface/ethernet/ipv6/ipv6_config/address/link_local_config/link_local_address (inet:ipv6-address)
"""
return self.__link_local_address
def _set_link_local_address(self, v, load=False):
"""
Setter method for link_local_address, mapped from YANG variable /interface/ethernet/ipv6/ipv6_config/address/link_local_config/link_local_address (inet:ipv6-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local_address() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local_address must be of a type compatible with inet:ipv6-address""",
'defined-type': "inet:ipv6-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)""",
})
self.__link_local_address = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local_address(self):
self.__link_local_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)
def _get_link_local(self):
"""
Getter method for link_local, mapped from YANG variable /interface/ethernet/ipv6/ipv6_config/address/link_local_config/link_local (empty)
"""
return self.__link_local
def _set_link_local(self, v, load=False):
"""
Setter method for link_local, mapped from YANG variable /interface/ethernet/ipv6/ipv6_config/address/link_local_config/link_local (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__link_local = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local(self):
self.__link_local = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
link_local_address = __builtin__.property(_get_link_local_address, _set_link_local_address)
link_local = __builtin__.property(_get_link_local, _set_link_local)
_pyangbind_elements = {'link_local_address': link_local_address, 'link_local': link_local, }
| 65.253165 | 730 | 0.705141 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class link_local_config(PybindBase):
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__link_local_address','__link_local',)
_yang_name = 'link-local-config'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__link_local = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
self.__link_local_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'ethernet', u'ipv6', u'ipv6-config', u'address', u'link-local-config']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ethernet', u'ipv6', u'address']
def _get_link_local_address(self):
return self.__link_local_address
def _set_link_local_address(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local_address must be of a type compatible with inet:ipv6-address""",
'defined-type': "inet:ipv6-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)""",
})
self.__link_local_address = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local_address(self):
self.__link_local_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-address", rest_name="link-local-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='inet:ipv6-address', is_config=True)
def _get_link_local(self):
return self.__link_local
def _set_link_local(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)""",
})
self.__link_local = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local(self):
self.__link_local = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="link-local", rest_name="link-local", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure ipv6 address to override automatically computed link-local address'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='empty', is_config=True)
link_local_address = __builtin__.property(_get_link_local_address, _set_link_local_address)
link_local = __builtin__.property(_get_link_local, _set_link_local)
_pyangbind_elements = {'link_local_address': link_local_address, 'link_local': link_local, }
| true | true |
f73a6c138d64413a8b39866ab2b3401201615657 | 477 | py | Python | recipes/migrations/0004_recipe_difficulty.py | sergeant-savage/my-recipe-app | cb1b5c05928689aed2c1637d8b4cf1ab08daf4b6 | [
"MIT"
] | 1 | 2021-08-11T11:43:06.000Z | 2021-08-11T11:43:06.000Z | recipes/migrations/0004_recipe_difficulty.py | sergeant-savage/my-recipe-app | cb1b5c05928689aed2c1637d8b4cf1ab08daf4b6 | [
"MIT"
] | 8 | 2021-08-11T00:55:32.000Z | 2021-08-15T20:48:59.000Z | recipes/migrations/0004_recipe_difficulty.py | sergeant-savage/my-recipe-app | cb1b5c05928689aed2c1637d8b4cf1ab08daf4b6 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-07-01 01:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0003_alter_recipe_description'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='difficulty',
field=models.CharField(choices=[('easy', 'Easy'), ('medium', 'Medium'), ('hard', 'Hard')], default='easy', max_length=6),
),
]
| 25.105263 | 133 | 0.595388 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0003_alter_recipe_description'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='difficulty',
field=models.CharField(choices=[('easy', 'Easy'), ('medium', 'Medium'), ('hard', 'Hard')], default='easy', max_length=6),
),
]
| true | true |
f73a6f7edb17988bfb903fbaeda2bd39ccfc91ab | 6,877 | py | Python | tasks/dogstatsd.py | confluentinc/datadog-agent | ea7982569bdf0c62c8adf00e2c5b855cc266f71c | [
"Apache-2.0"
] | 1 | 2021-02-18T21:23:52.000Z | 2021-02-18T21:23:52.000Z | tasks/dogstatsd.py | confluentinc/datadog-agent | ea7982569bdf0c62c8adf00e2c5b855cc266f71c | [
"Apache-2.0"
] | null | null | null | tasks/dogstatsd.py | confluentinc/datadog-agent | ea7982569bdf0c62c8adf00e2c5b855cc266f71c | [
"Apache-2.0"
] | null | null | null | """
Dogstatsd tasks
"""
from __future__ import print_function, absolute_import
import os
import shutil
from distutils.dir_util import copy_tree
import invoke
from invoke import task
from invoke.exceptions import Exit
from .build_tags import get_build_tags, get_default_build_tags
from .utils import get_build_flags, bin_name, get_root
from .utils import REPO_PATH
from .go import deps
# constants
DOGSTATSD_BIN_PATH = os.path.join(".", "bin", "dogstatsd")
STATIC_BIN_PATH = os.path.join(".", "bin", "static")
MAX_BINARY_SIZE = 15 * 1024
DOGSTATSD_TAG = "datadog/dogstatsd:master"
DEFAULT_BUILD_TAGS = [
"zlib",
"docker",
"kubelet",
]
@task
def build(ctx, rebuild=False, race=False, static=False, build_include=None,
build_exclude=None, use_embedded_libs=False):
"""
Build Dogstatsd
"""
build_include = DEFAULT_BUILD_TAGS if build_include is None else build_include.split(",")
build_exclude = [] if build_exclude is None else build_exclude.split(",")
build_tags = get_build_tags(build_include, build_exclude)
ldflags, gcflags, env = get_build_flags(ctx, static=static, use_embedded_libs=use_embedded_libs)
bin_path = DOGSTATSD_BIN_PATH
if static:
bin_path = STATIC_BIN_PATH
cmd = "go build {race_opt} {build_type} -tags '{build_tags}' -o {bin_name} "
cmd += "-gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/dogstatsd/"
args = {
"race_opt": "-race" if race else "",
"build_type": "-a" if rebuild else "",
"build_tags": " ".join(build_tags),
"bin_name": os.path.join(bin_path, bin_name("dogstatsd")),
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
# Render the configuration file template
#
# We need to remove cross compiling bits if any because go generate must
# build and execute in the native platform
env = {
"GOOS": "",
"GOARCH": "",
}
cmd = "go generate {}/cmd/dogstatsd"
ctx.run(cmd.format(REPO_PATH), env=env)
refresh_assets(ctx)
@task
def refresh_assets(ctx):
"""
Clean up and refresh Collector's assets and config files
"""
# ensure DOGSTATSD_BIN_PATH exists
if not os.path.exists(DOGSTATSD_BIN_PATH):
os.mkdir(DOGSTATSD_BIN_PATH)
dist_folder = os.path.join(DOGSTATSD_BIN_PATH, "dist")
if os.path.exists(dist_folder):
shutil.rmtree(dist_folder)
copy_tree("./cmd/dogstatsd/dist/", dist_folder)
@task
def run(ctx, rebuild=False, race=False, build_include=None, build_exclude=None,
skip_build=False):
"""
Run Dogstatsd binary. Build the binary before executing, unless
--skip-build was passed.
"""
if not skip_build:
print("Building dogstatsd...")
build(ctx, rebuild=rebuild, race=race, build_include=build_include,
build_exclude=build_exclude)
target = os.path.join(DOGSTATSD_BIN_PATH, bin_name("dogstatsd"))
ctx.run("{} start".format(target))
@task
def system_tests(ctx, skip_build=False):
"""
Run the system testsuite.
"""
if not skip_build:
print("Building dogstatsd...")
build(ctx)
env = {
"DOGSTATSD_BIN": os.path.join(get_root(), DOGSTATSD_BIN_PATH, bin_name("dogstatsd")),
}
cmd = "go test -tags '{build_tags}' -v {REPO_PATH}/test/system/dogstatsd/"
args = {
"build_tags": " ".join(get_default_build_tags()),
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
@task
def size_test(ctx, skip_build=False):
"""
Run the size test for the static binary
"""
if not skip_build:
print("Building dogstatsd...")
build(ctx, static=True)
bin_path = os.path.join(STATIC_BIN_PATH, bin_name("dogstatsd"))
stat_info = os.stat(bin_path)
size = stat_info.st_size / 1024
if size > MAX_BINARY_SIZE:
print("DogStatsD static build size too big: {} kB".format(size))
print("This means your PR added big classes or dependencies in the packages dogstatsd uses")
raise Exit(1)
print("DogStatsD static build size OK: {} kB".format(size))
@task
def omnibus_build(ctx, log_level="info", base_dir=None, gem_path=None,
skip_deps=False):
"""
Build the Dogstatsd packages with Omnibus Installer.
"""
if not skip_deps:
deps(ctx)
# omnibus config overrides
overrides = []
# base dir (can be overridden through env vars, command line takes precedence)
base_dir = base_dir or os.environ.get("DSD_OMNIBUS_BASE_DIR")
if base_dir:
overrides.append("base_dir:{}".format(base_dir))
overrides_cmd = ""
if overrides:
overrides_cmd = "--override=" + " ".join(overrides)
with ctx.cd("omnibus"):
cmd = "bundle install"
if gem_path:
cmd += " --path {}".format(gem_path)
ctx.run(cmd)
omnibus = "bundle exec omnibus.bat" if invoke.platform.WINDOWS else "bundle exec omnibus"
cmd = "{omnibus} build dogstatsd --log-level={log_level} {overrides}"
args = {
"omnibus": omnibus,
"log_level": log_level,
"overrides": overrides_cmd
}
ctx.run(cmd.format(**args))
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
"""
Run integration tests for dogstatsd
"""
if install_deps:
deps(ctx)
test_args = {
"go_build_tags": " ".join(get_default_build_tags()),
"race_opt": "-race" if race else "",
"exec_opts": "",
}
if remote_docker:
test_args["exec_opts"] = "-exec \"inv docker.dockerize-test\""
go_cmd = 'go test {race_opt} -tags "{go_build_tags}" {exec_opts}'.format(**test_args)
prefixes = [
"./test/integration/dogstatsd/...",
]
for prefix in prefixes:
ctx.run("{} {}".format(go_cmd, prefix))
@task
def image_build(ctx, skip_build=False):
"""
Build the docker image
"""
import docker
client = docker.from_env()
src = os.path.join(STATIC_BIN_PATH, bin_name("dogstatsd"))
dst = os.path.join("Dockerfiles", "dogstatsd", "alpine", "static")
if not skip_build:
build(ctx, rebuild=True, static=True)
if not os.path.exists(src):
raise Exit(1)
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copy(src, dst)
client.images.build(path="Dockerfiles/dogstatsd/alpine/", rm=True, tag=DOGSTATSD_TAG)
ctx.run("rm -rf Dockerfiles/dogstatsd/alpine/static")
@task
def clean(ctx):
"""
Remove temporary objects and binary artifacts
"""
# go clean
print("Executing go clean")
ctx.run("go clean")
# remove the bin/dogstatsd folder
print("Remove agent binary folder")
ctx.run("rm -rf ./bin/dogstatsd")
| 28.184426 | 100 | 0.644031 | from __future__ import print_function, absolute_import
import os
import shutil
from distutils.dir_util import copy_tree
import invoke
from invoke import task
from invoke.exceptions import Exit
from .build_tags import get_build_tags, get_default_build_tags
from .utils import get_build_flags, bin_name, get_root
from .utils import REPO_PATH
from .go import deps
DOGSTATSD_BIN_PATH = os.path.join(".", "bin", "dogstatsd")
STATIC_BIN_PATH = os.path.join(".", "bin", "static")
MAX_BINARY_SIZE = 15 * 1024
DOGSTATSD_TAG = "datadog/dogstatsd:master"
DEFAULT_BUILD_TAGS = [
"zlib",
"docker",
"kubelet",
]
@task
def build(ctx, rebuild=False, race=False, static=False, build_include=None,
build_exclude=None, use_embedded_libs=False):
build_include = DEFAULT_BUILD_TAGS if build_include is None else build_include.split(",")
build_exclude = [] if build_exclude is None else build_exclude.split(",")
build_tags = get_build_tags(build_include, build_exclude)
ldflags, gcflags, env = get_build_flags(ctx, static=static, use_embedded_libs=use_embedded_libs)
bin_path = DOGSTATSD_BIN_PATH
if static:
bin_path = STATIC_BIN_PATH
cmd = "go build {race_opt} {build_type} -tags '{build_tags}' -o {bin_name} "
cmd += "-gcflags=\"{gcflags}\" -ldflags=\"{ldflags}\" {REPO_PATH}/cmd/dogstatsd/"
args = {
"race_opt": "-race" if race else "",
"build_type": "-a" if rebuild else "",
"build_tags": " ".join(build_tags),
"bin_name": os.path.join(bin_path, bin_name("dogstatsd")),
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
env = {
"GOOS": "",
"GOARCH": "",
}
cmd = "go generate {}/cmd/dogstatsd"
ctx.run(cmd.format(REPO_PATH), env=env)
refresh_assets(ctx)
@task
def refresh_assets(ctx):
if not os.path.exists(DOGSTATSD_BIN_PATH):
os.mkdir(DOGSTATSD_BIN_PATH)
dist_folder = os.path.join(DOGSTATSD_BIN_PATH, "dist")
if os.path.exists(dist_folder):
shutil.rmtree(dist_folder)
copy_tree("./cmd/dogstatsd/dist/", dist_folder)
@task
def run(ctx, rebuild=False, race=False, build_include=None, build_exclude=None,
skip_build=False):
if not skip_build:
print("Building dogstatsd...")
build(ctx, rebuild=rebuild, race=race, build_include=build_include,
build_exclude=build_exclude)
target = os.path.join(DOGSTATSD_BIN_PATH, bin_name("dogstatsd"))
ctx.run("{} start".format(target))
@task
def system_tests(ctx, skip_build=False):
if not skip_build:
print("Building dogstatsd...")
build(ctx)
env = {
"DOGSTATSD_BIN": os.path.join(get_root(), DOGSTATSD_BIN_PATH, bin_name("dogstatsd")),
}
cmd = "go test -tags '{build_tags}' -v {REPO_PATH}/test/system/dogstatsd/"
args = {
"build_tags": " ".join(get_default_build_tags()),
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
@task
def size_test(ctx, skip_build=False):
if not skip_build:
print("Building dogstatsd...")
build(ctx, static=True)
bin_path = os.path.join(STATIC_BIN_PATH, bin_name("dogstatsd"))
stat_info = os.stat(bin_path)
size = stat_info.st_size / 1024
if size > MAX_BINARY_SIZE:
print("DogStatsD static build size too big: {} kB".format(size))
print("This means your PR added big classes or dependencies in the packages dogstatsd uses")
raise Exit(1)
print("DogStatsD static build size OK: {} kB".format(size))
@task
def omnibus_build(ctx, log_level="info", base_dir=None, gem_path=None,
skip_deps=False):
if not skip_deps:
deps(ctx)
overrides = []
base_dir = base_dir or os.environ.get("DSD_OMNIBUS_BASE_DIR")
if base_dir:
overrides.append("base_dir:{}".format(base_dir))
overrides_cmd = ""
if overrides:
overrides_cmd = "--override=" + " ".join(overrides)
with ctx.cd("omnibus"):
cmd = "bundle install"
if gem_path:
cmd += " --path {}".format(gem_path)
ctx.run(cmd)
omnibus = "bundle exec omnibus.bat" if invoke.platform.WINDOWS else "bundle exec omnibus"
cmd = "{omnibus} build dogstatsd --log-level={log_level} {overrides}"
args = {
"omnibus": omnibus,
"log_level": log_level,
"overrides": overrides_cmd
}
ctx.run(cmd.format(**args))
@task
def integration_tests(ctx, install_deps=False, race=False, remote_docker=False):
if install_deps:
deps(ctx)
test_args = {
"go_build_tags": " ".join(get_default_build_tags()),
"race_opt": "-race" if race else "",
"exec_opts": "",
}
if remote_docker:
test_args["exec_opts"] = "-exec \"inv docker.dockerize-test\""
go_cmd = 'go test {race_opt} -tags "{go_build_tags}" {exec_opts}'.format(**test_args)
prefixes = [
"./test/integration/dogstatsd/...",
]
for prefix in prefixes:
ctx.run("{} {}".format(go_cmd, prefix))
@task
def image_build(ctx, skip_build=False):
import docker
client = docker.from_env()
src = os.path.join(STATIC_BIN_PATH, bin_name("dogstatsd"))
dst = os.path.join("Dockerfiles", "dogstatsd", "alpine", "static")
if not skip_build:
build(ctx, rebuild=True, static=True)
if not os.path.exists(src):
raise Exit(1)
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copy(src, dst)
client.images.build(path="Dockerfiles/dogstatsd/alpine/", rm=True, tag=DOGSTATSD_TAG)
ctx.run("rm -rf Dockerfiles/dogstatsd/alpine/static")
@task
def clean(ctx):
print("Executing go clean")
ctx.run("go clean")
print("Remove agent binary folder")
ctx.run("rm -rf ./bin/dogstatsd")
| true | true |
f73a6fd4dc154de2079875bbe0e3d6b7badbe1c9 | 53,759 | py | Python | sdk/python/pulumi_aws_native/events/_inputs.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/events/_inputs.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/events/_inputs.py | pulumi/pulumi-aws-native | 1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ConnectionApiKeyAuthParametersArgs',
'ConnectionAuthParametersArgs',
'ConnectionBasicAuthParametersArgs',
'ConnectionClientParametersArgs',
'ConnectionHttpParametersArgs',
'ConnectionOAuthParametersArgs',
'ConnectionParameterArgs',
'EndpointEventBusArgs',
'EndpointFailoverConfigArgs',
'EndpointPrimaryArgs',
'EndpointReplicationConfigArgs',
'EndpointRoutingConfigArgs',
'EndpointSecondaryArgs',
'EventBusPolicyConditionArgs',
'EventBusTagEntryArgs',
'RuleAwsVpcConfigurationArgs',
'RuleBatchArrayPropertiesArgs',
'RuleBatchParametersArgs',
'RuleBatchRetryStrategyArgs',
'RuleCapacityProviderStrategyItemArgs',
'RuleDeadLetterConfigArgs',
'RuleEcsParametersArgs',
'RuleHttpParametersArgs',
'RuleInputTransformerArgs',
'RuleKinesisParametersArgs',
'RuleNetworkConfigurationArgs',
'RulePlacementConstraintArgs',
'RulePlacementStrategyArgs',
'RuleRedshiftDataParametersArgs',
'RuleRetryPolicyArgs',
'RuleRunCommandParametersArgs',
'RuleRunCommandTargetArgs',
'RuleSageMakerPipelineParametersArgs',
'RuleSageMakerPipelineParameterArgs',
'RuleSqsParametersArgs',
'RuleTagArgs',
'RuleTargetArgs',
]
@pulumi.input_type
class ConnectionApiKeyAuthParametersArgs:
def __init__(__self__, *,
api_key_name: pulumi.Input[str],
api_key_value: pulumi.Input[str]):
pulumi.set(__self__, "api_key_name", api_key_name)
pulumi.set(__self__, "api_key_value", api_key_value)
@property
@pulumi.getter(name="apiKeyName")
def api_key_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "api_key_name")
@api_key_name.setter
def api_key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "api_key_name", value)
@property
@pulumi.getter(name="apiKeyValue")
def api_key_value(self) -> pulumi.Input[str]:
return pulumi.get(self, "api_key_value")
@api_key_value.setter
def api_key_value(self, value: pulumi.Input[str]):
pulumi.set(self, "api_key_value", value)
@pulumi.input_type
class ConnectionAuthParametersArgs:
def __init__(__self__, *,
api_key_auth_parameters: Optional[pulumi.Input['ConnectionApiKeyAuthParametersArgs']] = None,
basic_auth_parameters: Optional[pulumi.Input['ConnectionBasicAuthParametersArgs']] = None,
invocation_http_parameters: Optional[pulumi.Input['ConnectionHttpParametersArgs']] = None,
o_auth_parameters: Optional[pulumi.Input['ConnectionOAuthParametersArgs']] = None):
if api_key_auth_parameters is not None:
pulumi.set(__self__, "api_key_auth_parameters", api_key_auth_parameters)
if basic_auth_parameters is not None:
pulumi.set(__self__, "basic_auth_parameters", basic_auth_parameters)
if invocation_http_parameters is not None:
pulumi.set(__self__, "invocation_http_parameters", invocation_http_parameters)
if o_auth_parameters is not None:
pulumi.set(__self__, "o_auth_parameters", o_auth_parameters)
@property
@pulumi.getter(name="apiKeyAuthParameters")
def api_key_auth_parameters(self) -> Optional[pulumi.Input['ConnectionApiKeyAuthParametersArgs']]:
return pulumi.get(self, "api_key_auth_parameters")
@api_key_auth_parameters.setter
def api_key_auth_parameters(self, value: Optional[pulumi.Input['ConnectionApiKeyAuthParametersArgs']]):
pulumi.set(self, "api_key_auth_parameters", value)
@property
@pulumi.getter(name="basicAuthParameters")
def basic_auth_parameters(self) -> Optional[pulumi.Input['ConnectionBasicAuthParametersArgs']]:
return pulumi.get(self, "basic_auth_parameters")
@basic_auth_parameters.setter
def basic_auth_parameters(self, value: Optional[pulumi.Input['ConnectionBasicAuthParametersArgs']]):
pulumi.set(self, "basic_auth_parameters", value)
@property
@pulumi.getter(name="invocationHttpParameters")
def invocation_http_parameters(self) -> Optional[pulumi.Input['ConnectionHttpParametersArgs']]:
return pulumi.get(self, "invocation_http_parameters")
@invocation_http_parameters.setter
def invocation_http_parameters(self, value: Optional[pulumi.Input['ConnectionHttpParametersArgs']]):
pulumi.set(self, "invocation_http_parameters", value)
@property
@pulumi.getter(name="oAuthParameters")
def o_auth_parameters(self) -> Optional[pulumi.Input['ConnectionOAuthParametersArgs']]:
return pulumi.get(self, "o_auth_parameters")
@o_auth_parameters.setter
def o_auth_parameters(self, value: Optional[pulumi.Input['ConnectionOAuthParametersArgs']]):
pulumi.set(self, "o_auth_parameters", value)
@pulumi.input_type
class ConnectionBasicAuthParametersArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str]):
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class ConnectionClientParametersArgs:
def __init__(__self__, *,
client_id: pulumi.Input[str],
client_secret: pulumi.Input[str]):
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
@property
@pulumi.getter(name="clientID")
def client_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Input[str]:
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: pulumi.Input[str]):
pulumi.set(self, "client_secret", value)
@pulumi.input_type
class ConnectionHttpParametersArgs:
def __init__(__self__, *,
body_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]] = None,
header_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]] = None,
query_string_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]] = None):
if body_parameters is not None:
pulumi.set(__self__, "body_parameters", body_parameters)
if header_parameters is not None:
pulumi.set(__self__, "header_parameters", header_parameters)
if query_string_parameters is not None:
pulumi.set(__self__, "query_string_parameters", query_string_parameters)
@property
@pulumi.getter(name="bodyParameters")
def body_parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]:
return pulumi.get(self, "body_parameters")
@body_parameters.setter
def body_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]):
pulumi.set(self, "body_parameters", value)
@property
@pulumi.getter(name="headerParameters")
def header_parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]:
return pulumi.get(self, "header_parameters")
@header_parameters.setter
def header_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]):
pulumi.set(self, "header_parameters", value)
@property
@pulumi.getter(name="queryStringParameters")
def query_string_parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]:
return pulumi.get(self, "query_string_parameters")
@query_string_parameters.setter
def query_string_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]):
pulumi.set(self, "query_string_parameters", value)
@pulumi.input_type
class ConnectionOAuthParametersArgs:
def __init__(__self__, *,
authorization_endpoint: pulumi.Input[str],
client_parameters: pulumi.Input['ConnectionClientParametersArgs'],
http_method: pulumi.Input['ConnectionOAuthParametersHttpMethod'],
o_auth_http_parameters: Optional[pulumi.Input['ConnectionHttpParametersArgs']] = None):
pulumi.set(__self__, "authorization_endpoint", authorization_endpoint)
pulumi.set(__self__, "client_parameters", client_parameters)
pulumi.set(__self__, "http_method", http_method)
if o_auth_http_parameters is not None:
pulumi.set(__self__, "o_auth_http_parameters", o_auth_http_parameters)
@property
@pulumi.getter(name="authorizationEndpoint")
def authorization_endpoint(self) -> pulumi.Input[str]:
return pulumi.get(self, "authorization_endpoint")
@authorization_endpoint.setter
def authorization_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "authorization_endpoint", value)
@property
@pulumi.getter(name="clientParameters")
def client_parameters(self) -> pulumi.Input['ConnectionClientParametersArgs']:
return pulumi.get(self, "client_parameters")
@client_parameters.setter
def client_parameters(self, value: pulumi.Input['ConnectionClientParametersArgs']):
pulumi.set(self, "client_parameters", value)
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> pulumi.Input['ConnectionOAuthParametersHttpMethod']:
return pulumi.get(self, "http_method")
@http_method.setter
def http_method(self, value: pulumi.Input['ConnectionOAuthParametersHttpMethod']):
pulumi.set(self, "http_method", value)
@property
@pulumi.getter(name="oAuthHttpParameters")
def o_auth_http_parameters(self) -> Optional[pulumi.Input['ConnectionHttpParametersArgs']]:
return pulumi.get(self, "o_auth_http_parameters")
@o_auth_http_parameters.setter
def o_auth_http_parameters(self, value: Optional[pulumi.Input['ConnectionHttpParametersArgs']]):
pulumi.set(self, "o_auth_http_parameters", value)
@pulumi.input_type
class ConnectionParameterArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str],
is_value_secret: Optional[pulumi.Input[bool]] = None):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
if is_value_secret is not None:
pulumi.set(__self__, "is_value_secret", is_value_secret)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="isValueSecret")
def is_value_secret(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_value_secret")
@is_value_secret.setter
def is_value_secret(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_value_secret", value)
@pulumi.input_type
class EndpointEventBusArgs:
def __init__(__self__, *,
event_bus_arn: pulumi.Input[str]):
pulumi.set(__self__, "event_bus_arn", event_bus_arn)
@property
@pulumi.getter(name="eventBusArn")
def event_bus_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "event_bus_arn")
@event_bus_arn.setter
def event_bus_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "event_bus_arn", value)
@pulumi.input_type
class EndpointFailoverConfigArgs:
def __init__(__self__, *,
primary: pulumi.Input['EndpointPrimaryArgs'],
secondary: pulumi.Input['EndpointSecondaryArgs']):
pulumi.set(__self__, "primary", primary)
pulumi.set(__self__, "secondary", secondary)
@property
@pulumi.getter
def primary(self) -> pulumi.Input['EndpointPrimaryArgs']:
return pulumi.get(self, "primary")
@primary.setter
def primary(self, value: pulumi.Input['EndpointPrimaryArgs']):
pulumi.set(self, "primary", value)
@property
@pulumi.getter
def secondary(self) -> pulumi.Input['EndpointSecondaryArgs']:
return pulumi.get(self, "secondary")
@secondary.setter
def secondary(self, value: pulumi.Input['EndpointSecondaryArgs']):
pulumi.set(self, "secondary", value)
@pulumi.input_type
class EndpointPrimaryArgs:
def __init__(__self__, *,
health_check: pulumi.Input[str]):
pulumi.set(__self__, "health_check", health_check)
@property
@pulumi.getter(name="healthCheck")
def health_check(self) -> pulumi.Input[str]:
return pulumi.get(self, "health_check")
@health_check.setter
def health_check(self, value: pulumi.Input[str]):
pulumi.set(self, "health_check", value)
@pulumi.input_type
class EndpointReplicationConfigArgs:
def __init__(__self__, *,
state: pulumi.Input['EndpointReplicationState']):
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def state(self) -> pulumi.Input['EndpointReplicationState']:
return pulumi.get(self, "state")
@state.setter
def state(self, value: pulumi.Input['EndpointReplicationState']):
pulumi.set(self, "state", value)
@pulumi.input_type
class EndpointRoutingConfigArgs:
def __init__(__self__, *,
failover_config: pulumi.Input['EndpointFailoverConfigArgs']):
pulumi.set(__self__, "failover_config", failover_config)
@property
@pulumi.getter(name="failoverConfig")
def failover_config(self) -> pulumi.Input['EndpointFailoverConfigArgs']:
return pulumi.get(self, "failover_config")
@failover_config.setter
def failover_config(self, value: pulumi.Input['EndpointFailoverConfigArgs']):
pulumi.set(self, "failover_config", value)
@pulumi.input_type
class EndpointSecondaryArgs:
def __init__(__self__, *,
route: pulumi.Input[str]):
pulumi.set(__self__, "route", route)
@property
@pulumi.getter
def route(self) -> pulumi.Input[str]:
return pulumi.get(self, "route")
@route.setter
def route(self, value: pulumi.Input[str]):
pulumi.set(self, "route", value)
@pulumi.input_type
class EventBusPolicyConditionArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class EventBusTagEntryArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class RuleAwsVpcConfigurationArgs:
def __init__(__self__, *,
subnets: pulumi.Input[Sequence[pulumi.Input[str]]],
assign_public_ip: Optional[pulumi.Input[str]] = None,
security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "subnets", subnets)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
if security_groups is not None:
pulumi.set(__self__, "security_groups", security_groups)
@property
@pulumi.getter
def subnets(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "subnets")
@subnets.setter
def subnets(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "subnets", value)
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "assign_public_ip")
@assign_public_ip.setter
def assign_public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "assign_public_ip", value)
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "security_groups")
@security_groups.setter
def security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_groups", value)
@pulumi.input_type
class RuleBatchArrayPropertiesArgs:
def __init__(__self__, *,
size: Optional[pulumi.Input[int]] = None):
if size is not None:
pulumi.set(__self__, "size", size)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@pulumi.input_type
class RuleBatchParametersArgs:
def __init__(__self__, *,
job_definition: pulumi.Input[str],
job_name: pulumi.Input[str],
array_properties: Optional[pulumi.Input['RuleBatchArrayPropertiesArgs']] = None,
retry_strategy: Optional[pulumi.Input['RuleBatchRetryStrategyArgs']] = None):
pulumi.set(__self__, "job_definition", job_definition)
pulumi.set(__self__, "job_name", job_name)
if array_properties is not None:
pulumi.set(__self__, "array_properties", array_properties)
if retry_strategy is not None:
pulumi.set(__self__, "retry_strategy", retry_strategy)
@property
@pulumi.getter(name="jobDefinition")
def job_definition(self) -> pulumi.Input[str]:
return pulumi.get(self, "job_definition")
@job_definition.setter
def job_definition(self, value: pulumi.Input[str]):
pulumi.set(self, "job_definition", value)
@property
@pulumi.getter(name="jobName")
def job_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "job_name")
@job_name.setter
def job_name(self, value: pulumi.Input[str]):
pulumi.set(self, "job_name", value)
@property
@pulumi.getter(name="arrayProperties")
def array_properties(self) -> Optional[pulumi.Input['RuleBatchArrayPropertiesArgs']]:
return pulumi.get(self, "array_properties")
@array_properties.setter
def array_properties(self, value: Optional[pulumi.Input['RuleBatchArrayPropertiesArgs']]):
pulumi.set(self, "array_properties", value)
@property
@pulumi.getter(name="retryStrategy")
def retry_strategy(self) -> Optional[pulumi.Input['RuleBatchRetryStrategyArgs']]:
return pulumi.get(self, "retry_strategy")
@retry_strategy.setter
def retry_strategy(self, value: Optional[pulumi.Input['RuleBatchRetryStrategyArgs']]):
pulumi.set(self, "retry_strategy", value)
@pulumi.input_type
class RuleBatchRetryStrategyArgs:
def __init__(__self__, *,
attempts: Optional[pulumi.Input[int]] = None):
if attempts is not None:
pulumi.set(__self__, "attempts", attempts)
@property
@pulumi.getter
def attempts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "attempts")
@attempts.setter
def attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "attempts", value)
@pulumi.input_type
class RuleCapacityProviderStrategyItemArgs:
def __init__(__self__, *,
capacity_provider: pulumi.Input[str],
base: Optional[pulumi.Input[int]] = None,
weight: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "capacity_provider", capacity_provider)
if base is not None:
pulumi.set(__self__, "base", base)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="capacityProvider")
def capacity_provider(self) -> pulumi.Input[str]:
return pulumi.get(self, "capacity_provider")
@capacity_provider.setter
def capacity_provider(self, value: pulumi.Input[str]):
pulumi.set(self, "capacity_provider", value)
@property
@pulumi.getter
def base(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "base")
@base.setter
def base(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "base", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class RuleDeadLetterConfigArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None):
if arn is not None:
pulumi.set(__self__, "arn", arn)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@pulumi.input_type
class RuleEcsParametersArgs:
def __init__(__self__, *,
task_definition_arn: pulumi.Input[str],
capacity_provider_strategy: Optional[pulumi.Input[Sequence[pulumi.Input['RuleCapacityProviderStrategyItemArgs']]]] = None,
enable_ecs_managed_tags: Optional[pulumi.Input[bool]] = None,
enable_execute_command: Optional[pulumi.Input[bool]] = None,
group: Optional[pulumi.Input[str]] = None,
launch_type: Optional[pulumi.Input[str]] = None,
network_configuration: Optional[pulumi.Input['RuleNetworkConfigurationArgs']] = None,
placement_constraints: Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementConstraintArgs']]]] = None,
placement_strategies: Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementStrategyArgs']]]] = None,
platform_version: Optional[pulumi.Input[str]] = None,
propagate_tags: Optional[pulumi.Input[str]] = None,
reference_id: Optional[pulumi.Input[str]] = None,
tag_list: Optional[pulumi.Input[Sequence[pulumi.Input['RuleTagArgs']]]] = None,
task_count: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "task_definition_arn", task_definition_arn)
if capacity_provider_strategy is not None:
pulumi.set(__self__, "capacity_provider_strategy", capacity_provider_strategy)
if enable_ecs_managed_tags is not None:
pulumi.set(__self__, "enable_ecs_managed_tags", enable_ecs_managed_tags)
if enable_execute_command is not None:
pulumi.set(__self__, "enable_execute_command", enable_execute_command)
if group is not None:
pulumi.set(__self__, "group", group)
if launch_type is not None:
pulumi.set(__self__, "launch_type", launch_type)
if network_configuration is not None:
pulumi.set(__self__, "network_configuration", network_configuration)
if placement_constraints is not None:
pulumi.set(__self__, "placement_constraints", placement_constraints)
if placement_strategies is not None:
pulumi.set(__self__, "placement_strategies", placement_strategies)
if platform_version is not None:
pulumi.set(__self__, "platform_version", platform_version)
if propagate_tags is not None:
pulumi.set(__self__, "propagate_tags", propagate_tags)
if reference_id is not None:
pulumi.set(__self__, "reference_id", reference_id)
if tag_list is not None:
pulumi.set(__self__, "tag_list", tag_list)
if task_count is not None:
pulumi.set(__self__, "task_count", task_count)
@property
@pulumi.getter(name="taskDefinitionArn")
def task_definition_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "task_definition_arn")
@task_definition_arn.setter
def task_definition_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "task_definition_arn", value)
@property
@pulumi.getter(name="capacityProviderStrategy")
def capacity_provider_strategy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleCapacityProviderStrategyItemArgs']]]]:
return pulumi.get(self, "capacity_provider_strategy")
@capacity_provider_strategy.setter
def capacity_provider_strategy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleCapacityProviderStrategyItemArgs']]]]):
pulumi.set(self, "capacity_provider_strategy", value)
@property
@pulumi.getter(name="enableECSManagedTags")
def enable_ecs_managed_tags(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_ecs_managed_tags")
@enable_ecs_managed_tags.setter
def enable_ecs_managed_tags(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ecs_managed_tags", value)
@property
@pulumi.getter(name="enableExecuteCommand")
def enable_execute_command(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_execute_command")
@enable_execute_command.setter
def enable_execute_command(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_execute_command", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter(name="launchType")
def launch_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "launch_type")
@launch_type.setter
def launch_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_type", value)
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> Optional[pulumi.Input['RuleNetworkConfigurationArgs']]:
return pulumi.get(self, "network_configuration")
@network_configuration.setter
def network_configuration(self, value: Optional[pulumi.Input['RuleNetworkConfigurationArgs']]):
pulumi.set(self, "network_configuration", value)
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementConstraintArgs']]]]:
return pulumi.get(self, "placement_constraints")
@placement_constraints.setter
def placement_constraints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementConstraintArgs']]]]):
pulumi.set(self, "placement_constraints", value)
@property
@pulumi.getter(name="placementStrategies")
def placement_strategies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementStrategyArgs']]]]:
return pulumi.get(self, "placement_strategies")
@placement_strategies.setter
def placement_strategies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementStrategyArgs']]]]):
pulumi.set(self, "placement_strategies", value)
@property
@pulumi.getter(name="platformVersion")
def platform_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "platform_version")
@platform_version.setter
def platform_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "platform_version", value)
@property
@pulumi.getter(name="propagateTags")
def propagate_tags(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "propagate_tags")
@propagate_tags.setter
def propagate_tags(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "propagate_tags", value)
@property
@pulumi.getter(name="referenceId")
def reference_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reference_id")
@reference_id.setter
def reference_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reference_id", value)
@property
@pulumi.getter(name="tagList")
def tag_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleTagArgs']]]]:
return pulumi.get(self, "tag_list")
@tag_list.setter
def tag_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleTagArgs']]]]):
pulumi.set(self, "tag_list", value)
@property
@pulumi.getter(name="taskCount")
def task_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "task_count")
@task_count.setter
def task_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "task_count", value)
@pulumi.input_type
class RuleHttpParametersArgs:
def __init__(__self__, *,
header_parameters: Optional[Any] = None,
path_parameter_values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
query_string_parameters: Optional[Any] = None):
if header_parameters is not None:
pulumi.set(__self__, "header_parameters", header_parameters)
if path_parameter_values is not None:
pulumi.set(__self__, "path_parameter_values", path_parameter_values)
if query_string_parameters is not None:
pulumi.set(__self__, "query_string_parameters", query_string_parameters)
@property
@pulumi.getter(name="headerParameters")
def header_parameters(self) -> Optional[Any]:
return pulumi.get(self, "header_parameters")
@header_parameters.setter
def header_parameters(self, value: Optional[Any]):
pulumi.set(self, "header_parameters", value)
@property
@pulumi.getter(name="pathParameterValues")
def path_parameter_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "path_parameter_values")
@path_parameter_values.setter
def path_parameter_values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "path_parameter_values", value)
@property
@pulumi.getter(name="queryStringParameters")
def query_string_parameters(self) -> Optional[Any]:
return pulumi.get(self, "query_string_parameters")
@query_string_parameters.setter
def query_string_parameters(self, value: Optional[Any]):
pulumi.set(self, "query_string_parameters", value)
@pulumi.input_type
class RuleInputTransformerArgs:
def __init__(__self__, *,
input_template: pulumi.Input[str],
input_paths_map: Optional[Any] = None):
pulumi.set(__self__, "input_template", input_template)
if input_paths_map is not None:
pulumi.set(__self__, "input_paths_map", input_paths_map)
@property
@pulumi.getter(name="inputTemplate")
def input_template(self) -> pulumi.Input[str]:
return pulumi.get(self, "input_template")
@input_template.setter
def input_template(self, value: pulumi.Input[str]):
pulumi.set(self, "input_template", value)
@property
@pulumi.getter(name="inputPathsMap")
def input_paths_map(self) -> Optional[Any]:
return pulumi.get(self, "input_paths_map")
@input_paths_map.setter
def input_paths_map(self, value: Optional[Any]):
pulumi.set(self, "input_paths_map", value)
@pulumi.input_type
class RuleKinesisParametersArgs:
def __init__(__self__, *,
partition_key_path: pulumi.Input[str]):
pulumi.set(__self__, "partition_key_path", partition_key_path)
@property
@pulumi.getter(name="partitionKeyPath")
def partition_key_path(self) -> pulumi.Input[str]:
return pulumi.get(self, "partition_key_path")
@partition_key_path.setter
def partition_key_path(self, value: pulumi.Input[str]):
pulumi.set(self, "partition_key_path", value)
@pulumi.input_type
class RuleNetworkConfigurationArgs:
def __init__(__self__, *,
aws_vpc_configuration: Optional[pulumi.Input['RuleAwsVpcConfigurationArgs']] = None):
if aws_vpc_configuration is not None:
pulumi.set(__self__, "aws_vpc_configuration", aws_vpc_configuration)
@property
@pulumi.getter(name="awsVpcConfiguration")
def aws_vpc_configuration(self) -> Optional[pulumi.Input['RuleAwsVpcConfigurationArgs']]:
return pulumi.get(self, "aws_vpc_configuration")
@aws_vpc_configuration.setter
def aws_vpc_configuration(self, value: Optional[pulumi.Input['RuleAwsVpcConfigurationArgs']]):
pulumi.set(self, "aws_vpc_configuration", value)
@pulumi.input_type
class RulePlacementConstraintArgs:
def __init__(__self__, *,
expression: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if expression is not None:
pulumi.set(__self__, "expression", expression)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def expression(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RulePlacementStrategyArgs:
def __init__(__self__, *,
field: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if field is not None:
pulumi.set(__self__, "field", field)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def field(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "field")
@field.setter
def field(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RuleRedshiftDataParametersArgs:
def __init__(__self__, *,
database: pulumi.Input[str],
sql: pulumi.Input[str],
db_user: Optional[pulumi.Input[str]] = None,
secret_manager_arn: Optional[pulumi.Input[str]] = None,
statement_name: Optional[pulumi.Input[str]] = None,
with_event: Optional[pulumi.Input[bool]] = None):
pulumi.set(__self__, "database", database)
pulumi.set(__self__, "sql", sql)
if db_user is not None:
pulumi.set(__self__, "db_user", db_user)
if secret_manager_arn is not None:
pulumi.set(__self__, "secret_manager_arn", secret_manager_arn)
if statement_name is not None:
pulumi.set(__self__, "statement_name", statement_name)
if with_event is not None:
pulumi.set(__self__, "with_event", with_event)
@property
@pulumi.getter
def database(self) -> pulumi.Input[str]:
return pulumi.get(self, "database")
@database.setter
def database(self, value: pulumi.Input[str]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def sql(self) -> pulumi.Input[str]:
return pulumi.get(self, "sql")
@sql.setter
def sql(self, value: pulumi.Input[str]):
pulumi.set(self, "sql", value)
@property
@pulumi.getter(name="dbUser")
def db_user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "db_user")
@db_user.setter
def db_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_user", value)
@property
@pulumi.getter(name="secretManagerArn")
def secret_manager_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "secret_manager_arn")
@secret_manager_arn.setter
def secret_manager_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_manager_arn", value)
@property
@pulumi.getter(name="statementName")
def statement_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "statement_name")
@statement_name.setter
def statement_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statement_name", value)
@property
@pulumi.getter(name="withEvent")
def with_event(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "with_event")
@with_event.setter
def with_event(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "with_event", value)
@pulumi.input_type
class RuleRetryPolicyArgs:
def __init__(__self__, *,
maximum_event_age_in_seconds: Optional[pulumi.Input[int]] = None,
maximum_retry_attempts: Optional[pulumi.Input[int]] = None):
if maximum_event_age_in_seconds is not None:
pulumi.set(__self__, "maximum_event_age_in_seconds", maximum_event_age_in_seconds)
if maximum_retry_attempts is not None:
pulumi.set(__self__, "maximum_retry_attempts", maximum_retry_attempts)
@property
@pulumi.getter(name="maximumEventAgeInSeconds")
def maximum_event_age_in_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum_event_age_in_seconds")
@maximum_event_age_in_seconds.setter
def maximum_event_age_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_event_age_in_seconds", value)
@property
@pulumi.getter(name="maximumRetryAttempts")
def maximum_retry_attempts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum_retry_attempts")
@maximum_retry_attempts.setter
def maximum_retry_attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_retry_attempts", value)
@pulumi.input_type
class RuleRunCommandParametersArgs:
def __init__(__self__, *,
run_command_targets: pulumi.Input[Sequence[pulumi.Input['RuleRunCommandTargetArgs']]]):
pulumi.set(__self__, "run_command_targets", run_command_targets)
@property
@pulumi.getter(name="runCommandTargets")
def run_command_targets(self) -> pulumi.Input[Sequence[pulumi.Input['RuleRunCommandTargetArgs']]]:
return pulumi.get(self, "run_command_targets")
@run_command_targets.setter
def run_command_targets(self, value: pulumi.Input[Sequence[pulumi.Input['RuleRunCommandTargetArgs']]]):
pulumi.set(self, "run_command_targets", value)
@pulumi.input_type
class RuleRunCommandTargetArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class RuleSageMakerPipelineParametersArgs:
def __init__(__self__, *,
pipeline_parameter_list: Optional[pulumi.Input[Sequence[pulumi.Input['RuleSageMakerPipelineParameterArgs']]]] = None):
if pipeline_parameter_list is not None:
pulumi.set(__self__, "pipeline_parameter_list", pipeline_parameter_list)
@property
@pulumi.getter(name="pipelineParameterList")
def pipeline_parameter_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleSageMakerPipelineParameterArgs']]]]:
return pulumi.get(self, "pipeline_parameter_list")
@pipeline_parameter_list.setter
def pipeline_parameter_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleSageMakerPipelineParameterArgs']]]]):
pulumi.set(self, "pipeline_parameter_list", value)
@pulumi.input_type
class RuleSageMakerPipelineParameterArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class RuleSqsParametersArgs:
def __init__(__self__, *,
message_group_id: pulumi.Input[str]):
pulumi.set(__self__, "message_group_id", message_group_id)
@property
@pulumi.getter(name="messageGroupId")
def message_group_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "message_group_id")
@message_group_id.setter
def message_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "message_group_id", value)
@pulumi.input_type
class RuleTagArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class RuleTargetArgs:
def __init__(__self__, *,
arn: pulumi.Input[str],
id: pulumi.Input[str],
batch_parameters: Optional[pulumi.Input['RuleBatchParametersArgs']] = None,
dead_letter_config: Optional[pulumi.Input['RuleDeadLetterConfigArgs']] = None,
ecs_parameters: Optional[pulumi.Input['RuleEcsParametersArgs']] = None,
http_parameters: Optional[pulumi.Input['RuleHttpParametersArgs']] = None,
input: Optional[pulumi.Input[str]] = None,
input_path: Optional[pulumi.Input[str]] = None,
input_transformer: Optional[pulumi.Input['RuleInputTransformerArgs']] = None,
kinesis_parameters: Optional[pulumi.Input['RuleKinesisParametersArgs']] = None,
redshift_data_parameters: Optional[pulumi.Input['RuleRedshiftDataParametersArgs']] = None,
retry_policy: Optional[pulumi.Input['RuleRetryPolicyArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
run_command_parameters: Optional[pulumi.Input['RuleRunCommandParametersArgs']] = None,
sage_maker_pipeline_parameters: Optional[pulumi.Input['RuleSageMakerPipelineParametersArgs']] = None,
sqs_parameters: Optional[pulumi.Input['RuleSqsParametersArgs']] = None):
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "id", id)
if batch_parameters is not None:
pulumi.set(__self__, "batch_parameters", batch_parameters)
if dead_letter_config is not None:
pulumi.set(__self__, "dead_letter_config", dead_letter_config)
if ecs_parameters is not None:
pulumi.set(__self__, "ecs_parameters", ecs_parameters)
if http_parameters is not None:
pulumi.set(__self__, "http_parameters", http_parameters)
if input is not None:
pulumi.set(__self__, "input", input)
if input_path is not None:
pulumi.set(__self__, "input_path", input_path)
if input_transformer is not None:
pulumi.set(__self__, "input_transformer", input_transformer)
if kinesis_parameters is not None:
pulumi.set(__self__, "kinesis_parameters", kinesis_parameters)
if redshift_data_parameters is not None:
pulumi.set(__self__, "redshift_data_parameters", redshift_data_parameters)
if retry_policy is not None:
pulumi.set(__self__, "retry_policy", retry_policy)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if run_command_parameters is not None:
pulumi.set(__self__, "run_command_parameters", run_command_parameters)
if sage_maker_pipeline_parameters is not None:
pulumi.set(__self__, "sage_maker_pipeline_parameters", sage_maker_pipeline_parameters)
if sqs_parameters is not None:
pulumi.set(__self__, "sqs_parameters", sqs_parameters)
@property
@pulumi.getter
def arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: pulumi.Input[str]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="batchParameters")
def batch_parameters(self) -> Optional[pulumi.Input['RuleBatchParametersArgs']]:
return pulumi.get(self, "batch_parameters")
@batch_parameters.setter
def batch_parameters(self, value: Optional[pulumi.Input['RuleBatchParametersArgs']]):
pulumi.set(self, "batch_parameters", value)
@property
@pulumi.getter(name="deadLetterConfig")
def dead_letter_config(self) -> Optional[pulumi.Input['RuleDeadLetterConfigArgs']]:
return pulumi.get(self, "dead_letter_config")
@dead_letter_config.setter
def dead_letter_config(self, value: Optional[pulumi.Input['RuleDeadLetterConfigArgs']]):
pulumi.set(self, "dead_letter_config", value)
@property
@pulumi.getter(name="ecsParameters")
def ecs_parameters(self) -> Optional[pulumi.Input['RuleEcsParametersArgs']]:
return pulumi.get(self, "ecs_parameters")
@ecs_parameters.setter
def ecs_parameters(self, value: Optional[pulumi.Input['RuleEcsParametersArgs']]):
pulumi.set(self, "ecs_parameters", value)
@property
@pulumi.getter(name="httpParameters")
def http_parameters(self) -> Optional[pulumi.Input['RuleHttpParametersArgs']]:
return pulumi.get(self, "http_parameters")
@http_parameters.setter
def http_parameters(self, value: Optional[pulumi.Input['RuleHttpParametersArgs']]):
pulumi.set(self, "http_parameters", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input", value)
@property
@pulumi.getter(name="inputPath")
def input_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "input_path")
@input_path.setter
def input_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input_path", value)
@property
@pulumi.getter(name="inputTransformer")
def input_transformer(self) -> Optional[pulumi.Input['RuleInputTransformerArgs']]:
return pulumi.get(self, "input_transformer")
@input_transformer.setter
def input_transformer(self, value: Optional[pulumi.Input['RuleInputTransformerArgs']]):
pulumi.set(self, "input_transformer", value)
@property
@pulumi.getter(name="kinesisParameters")
def kinesis_parameters(self) -> Optional[pulumi.Input['RuleKinesisParametersArgs']]:
return pulumi.get(self, "kinesis_parameters")
@kinesis_parameters.setter
def kinesis_parameters(self, value: Optional[pulumi.Input['RuleKinesisParametersArgs']]):
pulumi.set(self, "kinesis_parameters", value)
@property
@pulumi.getter(name="redshiftDataParameters")
def redshift_data_parameters(self) -> Optional[pulumi.Input['RuleRedshiftDataParametersArgs']]:
return pulumi.get(self, "redshift_data_parameters")
@redshift_data_parameters.setter
def redshift_data_parameters(self, value: Optional[pulumi.Input['RuleRedshiftDataParametersArgs']]):
pulumi.set(self, "redshift_data_parameters", value)
@property
@pulumi.getter(name="retryPolicy")
def retry_policy(self) -> Optional[pulumi.Input['RuleRetryPolicyArgs']]:
return pulumi.get(self, "retry_policy")
@retry_policy.setter
def retry_policy(self, value: Optional[pulumi.Input['RuleRetryPolicyArgs']]):
pulumi.set(self, "retry_policy", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter(name="runCommandParameters")
def run_command_parameters(self) -> Optional[pulumi.Input['RuleRunCommandParametersArgs']]:
return pulumi.get(self, "run_command_parameters")
@run_command_parameters.setter
def run_command_parameters(self, value: Optional[pulumi.Input['RuleRunCommandParametersArgs']]):
pulumi.set(self, "run_command_parameters", value)
@property
@pulumi.getter(name="sageMakerPipelineParameters")
def sage_maker_pipeline_parameters(self) -> Optional[pulumi.Input['RuleSageMakerPipelineParametersArgs']]:
return pulumi.get(self, "sage_maker_pipeline_parameters")
@sage_maker_pipeline_parameters.setter
def sage_maker_pipeline_parameters(self, value: Optional[pulumi.Input['RuleSageMakerPipelineParametersArgs']]):
pulumi.set(self, "sage_maker_pipeline_parameters", value)
@property
@pulumi.getter(name="sqsParameters")
def sqs_parameters(self) -> Optional[pulumi.Input['RuleSqsParametersArgs']]:
return pulumi.get(self, "sqs_parameters")
@sqs_parameters.setter
def sqs_parameters(self, value: Optional[pulumi.Input['RuleSqsParametersArgs']]):
pulumi.set(self, "sqs_parameters", value)
| 37.43663 | 139 | 0.681449 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ConnectionApiKeyAuthParametersArgs',
'ConnectionAuthParametersArgs',
'ConnectionBasicAuthParametersArgs',
'ConnectionClientParametersArgs',
'ConnectionHttpParametersArgs',
'ConnectionOAuthParametersArgs',
'ConnectionParameterArgs',
'EndpointEventBusArgs',
'EndpointFailoverConfigArgs',
'EndpointPrimaryArgs',
'EndpointReplicationConfigArgs',
'EndpointRoutingConfigArgs',
'EndpointSecondaryArgs',
'EventBusPolicyConditionArgs',
'EventBusTagEntryArgs',
'RuleAwsVpcConfigurationArgs',
'RuleBatchArrayPropertiesArgs',
'RuleBatchParametersArgs',
'RuleBatchRetryStrategyArgs',
'RuleCapacityProviderStrategyItemArgs',
'RuleDeadLetterConfigArgs',
'RuleEcsParametersArgs',
'RuleHttpParametersArgs',
'RuleInputTransformerArgs',
'RuleKinesisParametersArgs',
'RuleNetworkConfigurationArgs',
'RulePlacementConstraintArgs',
'RulePlacementStrategyArgs',
'RuleRedshiftDataParametersArgs',
'RuleRetryPolicyArgs',
'RuleRunCommandParametersArgs',
'RuleRunCommandTargetArgs',
'RuleSageMakerPipelineParametersArgs',
'RuleSageMakerPipelineParameterArgs',
'RuleSqsParametersArgs',
'RuleTagArgs',
'RuleTargetArgs',
]
@pulumi.input_type
class ConnectionApiKeyAuthParametersArgs:
def __init__(__self__, *,
api_key_name: pulumi.Input[str],
api_key_value: pulumi.Input[str]):
pulumi.set(__self__, "api_key_name", api_key_name)
pulumi.set(__self__, "api_key_value", api_key_value)
@property
@pulumi.getter(name="apiKeyName")
def api_key_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "api_key_name")
@api_key_name.setter
def api_key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "api_key_name", value)
@property
@pulumi.getter(name="apiKeyValue")
def api_key_value(self) -> pulumi.Input[str]:
return pulumi.get(self, "api_key_value")
@api_key_value.setter
def api_key_value(self, value: pulumi.Input[str]):
pulumi.set(self, "api_key_value", value)
@pulumi.input_type
class ConnectionAuthParametersArgs:
def __init__(__self__, *,
api_key_auth_parameters: Optional[pulumi.Input['ConnectionApiKeyAuthParametersArgs']] = None,
basic_auth_parameters: Optional[pulumi.Input['ConnectionBasicAuthParametersArgs']] = None,
invocation_http_parameters: Optional[pulumi.Input['ConnectionHttpParametersArgs']] = None,
o_auth_parameters: Optional[pulumi.Input['ConnectionOAuthParametersArgs']] = None):
if api_key_auth_parameters is not None:
pulumi.set(__self__, "api_key_auth_parameters", api_key_auth_parameters)
if basic_auth_parameters is not None:
pulumi.set(__self__, "basic_auth_parameters", basic_auth_parameters)
if invocation_http_parameters is not None:
pulumi.set(__self__, "invocation_http_parameters", invocation_http_parameters)
if o_auth_parameters is not None:
pulumi.set(__self__, "o_auth_parameters", o_auth_parameters)
@property
@pulumi.getter(name="apiKeyAuthParameters")
def api_key_auth_parameters(self) -> Optional[pulumi.Input['ConnectionApiKeyAuthParametersArgs']]:
return pulumi.get(self, "api_key_auth_parameters")
@api_key_auth_parameters.setter
def api_key_auth_parameters(self, value: Optional[pulumi.Input['ConnectionApiKeyAuthParametersArgs']]):
pulumi.set(self, "api_key_auth_parameters", value)
@property
@pulumi.getter(name="basicAuthParameters")
def basic_auth_parameters(self) -> Optional[pulumi.Input['ConnectionBasicAuthParametersArgs']]:
return pulumi.get(self, "basic_auth_parameters")
@basic_auth_parameters.setter
def basic_auth_parameters(self, value: Optional[pulumi.Input['ConnectionBasicAuthParametersArgs']]):
pulumi.set(self, "basic_auth_parameters", value)
@property
@pulumi.getter(name="invocationHttpParameters")
def invocation_http_parameters(self) -> Optional[pulumi.Input['ConnectionHttpParametersArgs']]:
return pulumi.get(self, "invocation_http_parameters")
@invocation_http_parameters.setter
def invocation_http_parameters(self, value: Optional[pulumi.Input['ConnectionHttpParametersArgs']]):
pulumi.set(self, "invocation_http_parameters", value)
@property
@pulumi.getter(name="oAuthParameters")
def o_auth_parameters(self) -> Optional[pulumi.Input['ConnectionOAuthParametersArgs']]:
return pulumi.get(self, "o_auth_parameters")
@o_auth_parameters.setter
def o_auth_parameters(self, value: Optional[pulumi.Input['ConnectionOAuthParametersArgs']]):
pulumi.set(self, "o_auth_parameters", value)
@pulumi.input_type
class ConnectionBasicAuthParametersArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str]):
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class ConnectionClientParametersArgs:
def __init__(__self__, *,
client_id: pulumi.Input[str],
client_secret: pulumi.Input[str]):
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
@property
@pulumi.getter(name="clientID")
def client_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Input[str]:
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: pulumi.Input[str]):
pulumi.set(self, "client_secret", value)
@pulumi.input_type
class ConnectionHttpParametersArgs:
def __init__(__self__, *,
body_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]] = None,
header_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]] = None,
query_string_parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]] = None):
if body_parameters is not None:
pulumi.set(__self__, "body_parameters", body_parameters)
if header_parameters is not None:
pulumi.set(__self__, "header_parameters", header_parameters)
if query_string_parameters is not None:
pulumi.set(__self__, "query_string_parameters", query_string_parameters)
@property
@pulumi.getter(name="bodyParameters")
def body_parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]:
return pulumi.get(self, "body_parameters")
@body_parameters.setter
def body_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]):
pulumi.set(self, "body_parameters", value)
@property
@pulumi.getter(name="headerParameters")
def header_parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]:
return pulumi.get(self, "header_parameters")
@header_parameters.setter
def header_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]):
pulumi.set(self, "header_parameters", value)
@property
@pulumi.getter(name="queryStringParameters")
def query_string_parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]:
return pulumi.get(self, "query_string_parameters")
@query_string_parameters.setter
def query_string_parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionParameterArgs']]]]):
pulumi.set(self, "query_string_parameters", value)
@pulumi.input_type
class ConnectionOAuthParametersArgs:
def __init__(__self__, *,
authorization_endpoint: pulumi.Input[str],
client_parameters: pulumi.Input['ConnectionClientParametersArgs'],
http_method: pulumi.Input['ConnectionOAuthParametersHttpMethod'],
o_auth_http_parameters: Optional[pulumi.Input['ConnectionHttpParametersArgs']] = None):
pulumi.set(__self__, "authorization_endpoint", authorization_endpoint)
pulumi.set(__self__, "client_parameters", client_parameters)
pulumi.set(__self__, "http_method", http_method)
if o_auth_http_parameters is not None:
pulumi.set(__self__, "o_auth_http_parameters", o_auth_http_parameters)
@property
@pulumi.getter(name="authorizationEndpoint")
def authorization_endpoint(self) -> pulumi.Input[str]:
return pulumi.get(self, "authorization_endpoint")
@authorization_endpoint.setter
def authorization_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "authorization_endpoint", value)
@property
@pulumi.getter(name="clientParameters")
def client_parameters(self) -> pulumi.Input['ConnectionClientParametersArgs']:
return pulumi.get(self, "client_parameters")
@client_parameters.setter
def client_parameters(self, value: pulumi.Input['ConnectionClientParametersArgs']):
pulumi.set(self, "client_parameters", value)
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> pulumi.Input['ConnectionOAuthParametersHttpMethod']:
return pulumi.get(self, "http_method")
@http_method.setter
def http_method(self, value: pulumi.Input['ConnectionOAuthParametersHttpMethod']):
pulumi.set(self, "http_method", value)
@property
@pulumi.getter(name="oAuthHttpParameters")
def o_auth_http_parameters(self) -> Optional[pulumi.Input['ConnectionHttpParametersArgs']]:
return pulumi.get(self, "o_auth_http_parameters")
@o_auth_http_parameters.setter
def o_auth_http_parameters(self, value: Optional[pulumi.Input['ConnectionHttpParametersArgs']]):
pulumi.set(self, "o_auth_http_parameters", value)
@pulumi.input_type
class ConnectionParameterArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str],
is_value_secret: Optional[pulumi.Input[bool]] = None):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
if is_value_secret is not None:
pulumi.set(__self__, "is_value_secret", is_value_secret)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="isValueSecret")
def is_value_secret(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_value_secret")
@is_value_secret.setter
def is_value_secret(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_value_secret", value)
@pulumi.input_type
class EndpointEventBusArgs:
def __init__(__self__, *,
event_bus_arn: pulumi.Input[str]):
pulumi.set(__self__, "event_bus_arn", event_bus_arn)
@property
@pulumi.getter(name="eventBusArn")
def event_bus_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "event_bus_arn")
@event_bus_arn.setter
def event_bus_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "event_bus_arn", value)
@pulumi.input_type
class EndpointFailoverConfigArgs:
def __init__(__self__, *,
primary: pulumi.Input['EndpointPrimaryArgs'],
secondary: pulumi.Input['EndpointSecondaryArgs']):
pulumi.set(__self__, "primary", primary)
pulumi.set(__self__, "secondary", secondary)
@property
@pulumi.getter
def primary(self) -> pulumi.Input['EndpointPrimaryArgs']:
return pulumi.get(self, "primary")
@primary.setter
def primary(self, value: pulumi.Input['EndpointPrimaryArgs']):
pulumi.set(self, "primary", value)
@property
@pulumi.getter
def secondary(self) -> pulumi.Input['EndpointSecondaryArgs']:
return pulumi.get(self, "secondary")
@secondary.setter
def secondary(self, value: pulumi.Input['EndpointSecondaryArgs']):
pulumi.set(self, "secondary", value)
@pulumi.input_type
class EndpointPrimaryArgs:
def __init__(__self__, *,
health_check: pulumi.Input[str]):
pulumi.set(__self__, "health_check", health_check)
@property
@pulumi.getter(name="healthCheck")
def health_check(self) -> pulumi.Input[str]:
return pulumi.get(self, "health_check")
@health_check.setter
def health_check(self, value: pulumi.Input[str]):
pulumi.set(self, "health_check", value)
@pulumi.input_type
class EndpointReplicationConfigArgs:
def __init__(__self__, *,
state: pulumi.Input['EndpointReplicationState']):
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def state(self) -> pulumi.Input['EndpointReplicationState']:
return pulumi.get(self, "state")
@state.setter
def state(self, value: pulumi.Input['EndpointReplicationState']):
pulumi.set(self, "state", value)
@pulumi.input_type
class EndpointRoutingConfigArgs:
def __init__(__self__, *,
failover_config: pulumi.Input['EndpointFailoverConfigArgs']):
pulumi.set(__self__, "failover_config", failover_config)
@property
@pulumi.getter(name="failoverConfig")
def failover_config(self) -> pulumi.Input['EndpointFailoverConfigArgs']:
return pulumi.get(self, "failover_config")
@failover_config.setter
def failover_config(self, value: pulumi.Input['EndpointFailoverConfigArgs']):
pulumi.set(self, "failover_config", value)
@pulumi.input_type
class EndpointSecondaryArgs:
def __init__(__self__, *,
route: pulumi.Input[str]):
pulumi.set(__self__, "route", route)
@property
@pulumi.getter
def route(self) -> pulumi.Input[str]:
return pulumi.get(self, "route")
@route.setter
def route(self, value: pulumi.Input[str]):
pulumi.set(self, "route", value)
@pulumi.input_type
class EventBusPolicyConditionArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class EventBusTagEntryArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class RuleAwsVpcConfigurationArgs:
def __init__(__self__, *,
subnets: pulumi.Input[Sequence[pulumi.Input[str]]],
assign_public_ip: Optional[pulumi.Input[str]] = None,
security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "subnets", subnets)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
if security_groups is not None:
pulumi.set(__self__, "security_groups", security_groups)
@property
@pulumi.getter
def subnets(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "subnets")
@subnets.setter
def subnets(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "subnets", value)
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "assign_public_ip")
@assign_public_ip.setter
def assign_public_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "assign_public_ip", value)
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "security_groups")
@security_groups.setter
def security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_groups", value)
@pulumi.input_type
class RuleBatchArrayPropertiesArgs:
def __init__(__self__, *,
size: Optional[pulumi.Input[int]] = None):
if size is not None:
pulumi.set(__self__, "size", size)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@pulumi.input_type
class RuleBatchParametersArgs:
def __init__(__self__, *,
job_definition: pulumi.Input[str],
job_name: pulumi.Input[str],
array_properties: Optional[pulumi.Input['RuleBatchArrayPropertiesArgs']] = None,
retry_strategy: Optional[pulumi.Input['RuleBatchRetryStrategyArgs']] = None):
pulumi.set(__self__, "job_definition", job_definition)
pulumi.set(__self__, "job_name", job_name)
if array_properties is not None:
pulumi.set(__self__, "array_properties", array_properties)
if retry_strategy is not None:
pulumi.set(__self__, "retry_strategy", retry_strategy)
@property
@pulumi.getter(name="jobDefinition")
def job_definition(self) -> pulumi.Input[str]:
return pulumi.get(self, "job_definition")
@job_definition.setter
def job_definition(self, value: pulumi.Input[str]):
pulumi.set(self, "job_definition", value)
@property
@pulumi.getter(name="jobName")
def job_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "job_name")
@job_name.setter
def job_name(self, value: pulumi.Input[str]):
pulumi.set(self, "job_name", value)
@property
@pulumi.getter(name="arrayProperties")
def array_properties(self) -> Optional[pulumi.Input['RuleBatchArrayPropertiesArgs']]:
return pulumi.get(self, "array_properties")
@array_properties.setter
def array_properties(self, value: Optional[pulumi.Input['RuleBatchArrayPropertiesArgs']]):
pulumi.set(self, "array_properties", value)
@property
@pulumi.getter(name="retryStrategy")
def retry_strategy(self) -> Optional[pulumi.Input['RuleBatchRetryStrategyArgs']]:
return pulumi.get(self, "retry_strategy")
@retry_strategy.setter
def retry_strategy(self, value: Optional[pulumi.Input['RuleBatchRetryStrategyArgs']]):
pulumi.set(self, "retry_strategy", value)
@pulumi.input_type
class RuleBatchRetryStrategyArgs:
def __init__(__self__, *,
attempts: Optional[pulumi.Input[int]] = None):
if attempts is not None:
pulumi.set(__self__, "attempts", attempts)
@property
@pulumi.getter
def attempts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "attempts")
@attempts.setter
def attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "attempts", value)
@pulumi.input_type
class RuleCapacityProviderStrategyItemArgs:
def __init__(__self__, *,
capacity_provider: pulumi.Input[str],
base: Optional[pulumi.Input[int]] = None,
weight: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "capacity_provider", capacity_provider)
if base is not None:
pulumi.set(__self__, "base", base)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="capacityProvider")
def capacity_provider(self) -> pulumi.Input[str]:
return pulumi.get(self, "capacity_provider")
@capacity_provider.setter
def capacity_provider(self, value: pulumi.Input[str]):
pulumi.set(self, "capacity_provider", value)
@property
@pulumi.getter
def base(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "base")
@base.setter
def base(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "base", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class RuleDeadLetterConfigArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None):
if arn is not None:
pulumi.set(__self__, "arn", arn)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@pulumi.input_type
class RuleEcsParametersArgs:
def __init__(__self__, *,
task_definition_arn: pulumi.Input[str],
capacity_provider_strategy: Optional[pulumi.Input[Sequence[pulumi.Input['RuleCapacityProviderStrategyItemArgs']]]] = None,
enable_ecs_managed_tags: Optional[pulumi.Input[bool]] = None,
enable_execute_command: Optional[pulumi.Input[bool]] = None,
group: Optional[pulumi.Input[str]] = None,
launch_type: Optional[pulumi.Input[str]] = None,
network_configuration: Optional[pulumi.Input['RuleNetworkConfigurationArgs']] = None,
placement_constraints: Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementConstraintArgs']]]] = None,
placement_strategies: Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementStrategyArgs']]]] = None,
platform_version: Optional[pulumi.Input[str]] = None,
propagate_tags: Optional[pulumi.Input[str]] = None,
reference_id: Optional[pulumi.Input[str]] = None,
tag_list: Optional[pulumi.Input[Sequence[pulumi.Input['RuleTagArgs']]]] = None,
task_count: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "task_definition_arn", task_definition_arn)
if capacity_provider_strategy is not None:
pulumi.set(__self__, "capacity_provider_strategy", capacity_provider_strategy)
if enable_ecs_managed_tags is not None:
pulumi.set(__self__, "enable_ecs_managed_tags", enable_ecs_managed_tags)
if enable_execute_command is not None:
pulumi.set(__self__, "enable_execute_command", enable_execute_command)
if group is not None:
pulumi.set(__self__, "group", group)
if launch_type is not None:
pulumi.set(__self__, "launch_type", launch_type)
if network_configuration is not None:
pulumi.set(__self__, "network_configuration", network_configuration)
if placement_constraints is not None:
pulumi.set(__self__, "placement_constraints", placement_constraints)
if placement_strategies is not None:
pulumi.set(__self__, "placement_strategies", placement_strategies)
if platform_version is not None:
pulumi.set(__self__, "platform_version", platform_version)
if propagate_tags is not None:
pulumi.set(__self__, "propagate_tags", propagate_tags)
if reference_id is not None:
pulumi.set(__self__, "reference_id", reference_id)
if tag_list is not None:
pulumi.set(__self__, "tag_list", tag_list)
if task_count is not None:
pulumi.set(__self__, "task_count", task_count)
@property
@pulumi.getter(name="taskDefinitionArn")
def task_definition_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "task_definition_arn")
@task_definition_arn.setter
def task_definition_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "task_definition_arn", value)
@property
@pulumi.getter(name="capacityProviderStrategy")
def capacity_provider_strategy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleCapacityProviderStrategyItemArgs']]]]:
return pulumi.get(self, "capacity_provider_strategy")
@capacity_provider_strategy.setter
def capacity_provider_strategy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleCapacityProviderStrategyItemArgs']]]]):
pulumi.set(self, "capacity_provider_strategy", value)
@property
@pulumi.getter(name="enableECSManagedTags")
def enable_ecs_managed_tags(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_ecs_managed_tags")
@enable_ecs_managed_tags.setter
def enable_ecs_managed_tags(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ecs_managed_tags", value)
@property
@pulumi.getter(name="enableExecuteCommand")
def enable_execute_command(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_execute_command")
@enable_execute_command.setter
def enable_execute_command(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_execute_command", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter(name="launchType")
def launch_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "launch_type")
@launch_type.setter
def launch_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_type", value)
@property
@pulumi.getter(name="networkConfiguration")
def network_configuration(self) -> Optional[pulumi.Input['RuleNetworkConfigurationArgs']]:
return pulumi.get(self, "network_configuration")
@network_configuration.setter
def network_configuration(self, value: Optional[pulumi.Input['RuleNetworkConfigurationArgs']]):
pulumi.set(self, "network_configuration", value)
@property
@pulumi.getter(name="placementConstraints")
def placement_constraints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementConstraintArgs']]]]:
return pulumi.get(self, "placement_constraints")
@placement_constraints.setter
def placement_constraints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementConstraintArgs']]]]):
pulumi.set(self, "placement_constraints", value)
@property
@pulumi.getter(name="placementStrategies")
def placement_strategies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementStrategyArgs']]]]:
return pulumi.get(self, "placement_strategies")
@placement_strategies.setter
def placement_strategies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RulePlacementStrategyArgs']]]]):
pulumi.set(self, "placement_strategies", value)
@property
@pulumi.getter(name="platformVersion")
def platform_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "platform_version")
@platform_version.setter
def platform_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "platform_version", value)
@property
@pulumi.getter(name="propagateTags")
def propagate_tags(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "propagate_tags")
@propagate_tags.setter
def propagate_tags(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "propagate_tags", value)
@property
@pulumi.getter(name="referenceId")
def reference_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reference_id")
@reference_id.setter
def reference_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reference_id", value)
@property
@pulumi.getter(name="tagList")
def tag_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleTagArgs']]]]:
return pulumi.get(self, "tag_list")
@tag_list.setter
def tag_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleTagArgs']]]]):
pulumi.set(self, "tag_list", value)
@property
@pulumi.getter(name="taskCount")
def task_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "task_count")
@task_count.setter
def task_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "task_count", value)
@pulumi.input_type
class RuleHttpParametersArgs:
def __init__(__self__, *,
header_parameters: Optional[Any] = None,
path_parameter_values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
query_string_parameters: Optional[Any] = None):
if header_parameters is not None:
pulumi.set(__self__, "header_parameters", header_parameters)
if path_parameter_values is not None:
pulumi.set(__self__, "path_parameter_values", path_parameter_values)
if query_string_parameters is not None:
pulumi.set(__self__, "query_string_parameters", query_string_parameters)
@property
@pulumi.getter(name="headerParameters")
def header_parameters(self) -> Optional[Any]:
return pulumi.get(self, "header_parameters")
@header_parameters.setter
def header_parameters(self, value: Optional[Any]):
pulumi.set(self, "header_parameters", value)
@property
@pulumi.getter(name="pathParameterValues")
def path_parameter_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "path_parameter_values")
@path_parameter_values.setter
def path_parameter_values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "path_parameter_values", value)
@property
@pulumi.getter(name="queryStringParameters")
def query_string_parameters(self) -> Optional[Any]:
return pulumi.get(self, "query_string_parameters")
@query_string_parameters.setter
def query_string_parameters(self, value: Optional[Any]):
pulumi.set(self, "query_string_parameters", value)
@pulumi.input_type
class RuleInputTransformerArgs:
def __init__(__self__, *,
input_template: pulumi.Input[str],
input_paths_map: Optional[Any] = None):
pulumi.set(__self__, "input_template", input_template)
if input_paths_map is not None:
pulumi.set(__self__, "input_paths_map", input_paths_map)
@property
@pulumi.getter(name="inputTemplate")
def input_template(self) -> pulumi.Input[str]:
return pulumi.get(self, "input_template")
@input_template.setter
def input_template(self, value: pulumi.Input[str]):
pulumi.set(self, "input_template", value)
@property
@pulumi.getter(name="inputPathsMap")
def input_paths_map(self) -> Optional[Any]:
return pulumi.get(self, "input_paths_map")
@input_paths_map.setter
def input_paths_map(self, value: Optional[Any]):
pulumi.set(self, "input_paths_map", value)
@pulumi.input_type
class RuleKinesisParametersArgs:
def __init__(__self__, *,
partition_key_path: pulumi.Input[str]):
pulumi.set(__self__, "partition_key_path", partition_key_path)
@property
@pulumi.getter(name="partitionKeyPath")
def partition_key_path(self) -> pulumi.Input[str]:
return pulumi.get(self, "partition_key_path")
@partition_key_path.setter
def partition_key_path(self, value: pulumi.Input[str]):
pulumi.set(self, "partition_key_path", value)
@pulumi.input_type
class RuleNetworkConfigurationArgs:
def __init__(__self__, *,
aws_vpc_configuration: Optional[pulumi.Input['RuleAwsVpcConfigurationArgs']] = None):
if aws_vpc_configuration is not None:
pulumi.set(__self__, "aws_vpc_configuration", aws_vpc_configuration)
@property
@pulumi.getter(name="awsVpcConfiguration")
def aws_vpc_configuration(self) -> Optional[pulumi.Input['RuleAwsVpcConfigurationArgs']]:
return pulumi.get(self, "aws_vpc_configuration")
@aws_vpc_configuration.setter
def aws_vpc_configuration(self, value: Optional[pulumi.Input['RuleAwsVpcConfigurationArgs']]):
pulumi.set(self, "aws_vpc_configuration", value)
@pulumi.input_type
class RulePlacementConstraintArgs:
def __init__(__self__, *,
expression: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if expression is not None:
pulumi.set(__self__, "expression", expression)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def expression(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RulePlacementStrategyArgs:
def __init__(__self__, *,
field: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if field is not None:
pulumi.set(__self__, "field", field)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def field(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "field")
@field.setter
def field(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RuleRedshiftDataParametersArgs:
def __init__(__self__, *,
database: pulumi.Input[str],
sql: pulumi.Input[str],
db_user: Optional[pulumi.Input[str]] = None,
secret_manager_arn: Optional[pulumi.Input[str]] = None,
statement_name: Optional[pulumi.Input[str]] = None,
with_event: Optional[pulumi.Input[bool]] = None):
pulumi.set(__self__, "database", database)
pulumi.set(__self__, "sql", sql)
if db_user is not None:
pulumi.set(__self__, "db_user", db_user)
if secret_manager_arn is not None:
pulumi.set(__self__, "secret_manager_arn", secret_manager_arn)
if statement_name is not None:
pulumi.set(__self__, "statement_name", statement_name)
if with_event is not None:
pulumi.set(__self__, "with_event", with_event)
@property
@pulumi.getter
def database(self) -> pulumi.Input[str]:
return pulumi.get(self, "database")
@database.setter
def database(self, value: pulumi.Input[str]):
pulumi.set(self, "database", value)
@property
@pulumi.getter
def sql(self) -> pulumi.Input[str]:
return pulumi.get(self, "sql")
@sql.setter
def sql(self, value: pulumi.Input[str]):
pulumi.set(self, "sql", value)
@property
@pulumi.getter(name="dbUser")
def db_user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "db_user")
@db_user.setter
def db_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_user", value)
@property
@pulumi.getter(name="secretManagerArn")
def secret_manager_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "secret_manager_arn")
@secret_manager_arn.setter
def secret_manager_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_manager_arn", value)
@property
@pulumi.getter(name="statementName")
def statement_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "statement_name")
@statement_name.setter
def statement_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statement_name", value)
@property
@pulumi.getter(name="withEvent")
def with_event(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "with_event")
@with_event.setter
def with_event(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "with_event", value)
@pulumi.input_type
class RuleRetryPolicyArgs:
def __init__(__self__, *,
maximum_event_age_in_seconds: Optional[pulumi.Input[int]] = None,
maximum_retry_attempts: Optional[pulumi.Input[int]] = None):
if maximum_event_age_in_seconds is not None:
pulumi.set(__self__, "maximum_event_age_in_seconds", maximum_event_age_in_seconds)
if maximum_retry_attempts is not None:
pulumi.set(__self__, "maximum_retry_attempts", maximum_retry_attempts)
@property
@pulumi.getter(name="maximumEventAgeInSeconds")
def maximum_event_age_in_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum_event_age_in_seconds")
@maximum_event_age_in_seconds.setter
def maximum_event_age_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_event_age_in_seconds", value)
@property
@pulumi.getter(name="maximumRetryAttempts")
def maximum_retry_attempts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "maximum_retry_attempts")
@maximum_retry_attempts.setter
def maximum_retry_attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "maximum_retry_attempts", value)
@pulumi.input_type
class RuleRunCommandParametersArgs:
def __init__(__self__, *,
run_command_targets: pulumi.Input[Sequence[pulumi.Input['RuleRunCommandTargetArgs']]]):
pulumi.set(__self__, "run_command_targets", run_command_targets)
@property
@pulumi.getter(name="runCommandTargets")
def run_command_targets(self) -> pulumi.Input[Sequence[pulumi.Input['RuleRunCommandTargetArgs']]]:
return pulumi.get(self, "run_command_targets")
@run_command_targets.setter
def run_command_targets(self, value: pulumi.Input[Sequence[pulumi.Input['RuleRunCommandTargetArgs']]]):
pulumi.set(self, "run_command_targets", value)
@pulumi.input_type
class RuleRunCommandTargetArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class RuleSageMakerPipelineParametersArgs:
def __init__(__self__, *,
pipeline_parameter_list: Optional[pulumi.Input[Sequence[pulumi.Input['RuleSageMakerPipelineParameterArgs']]]] = None):
if pipeline_parameter_list is not None:
pulumi.set(__self__, "pipeline_parameter_list", pipeline_parameter_list)
@property
@pulumi.getter(name="pipelineParameterList")
def pipeline_parameter_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleSageMakerPipelineParameterArgs']]]]:
return pulumi.get(self, "pipeline_parameter_list")
@pipeline_parameter_list.setter
def pipeline_parameter_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleSageMakerPipelineParameterArgs']]]]):
pulumi.set(self, "pipeline_parameter_list", value)
@pulumi.input_type
class RuleSageMakerPipelineParameterArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class RuleSqsParametersArgs:
def __init__(__self__, *,
message_group_id: pulumi.Input[str]):
pulumi.set(__self__, "message_group_id", message_group_id)
@property
@pulumi.getter(name="messageGroupId")
def message_group_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "message_group_id")
@message_group_id.setter
def message_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "message_group_id", value)
@pulumi.input_type
class RuleTagArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class RuleTargetArgs:
def __init__(__self__, *,
arn: pulumi.Input[str],
id: pulumi.Input[str],
batch_parameters: Optional[pulumi.Input['RuleBatchParametersArgs']] = None,
dead_letter_config: Optional[pulumi.Input['RuleDeadLetterConfigArgs']] = None,
ecs_parameters: Optional[pulumi.Input['RuleEcsParametersArgs']] = None,
http_parameters: Optional[pulumi.Input['RuleHttpParametersArgs']] = None,
input: Optional[pulumi.Input[str]] = None,
input_path: Optional[pulumi.Input[str]] = None,
input_transformer: Optional[pulumi.Input['RuleInputTransformerArgs']] = None,
kinesis_parameters: Optional[pulumi.Input['RuleKinesisParametersArgs']] = None,
redshift_data_parameters: Optional[pulumi.Input['RuleRedshiftDataParametersArgs']] = None,
retry_policy: Optional[pulumi.Input['RuleRetryPolicyArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
run_command_parameters: Optional[pulumi.Input['RuleRunCommandParametersArgs']] = None,
sage_maker_pipeline_parameters: Optional[pulumi.Input['RuleSageMakerPipelineParametersArgs']] = None,
sqs_parameters: Optional[pulumi.Input['RuleSqsParametersArgs']] = None):
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "id", id)
if batch_parameters is not None:
pulumi.set(__self__, "batch_parameters", batch_parameters)
if dead_letter_config is not None:
pulumi.set(__self__, "dead_letter_config", dead_letter_config)
if ecs_parameters is not None:
pulumi.set(__self__, "ecs_parameters", ecs_parameters)
if http_parameters is not None:
pulumi.set(__self__, "http_parameters", http_parameters)
if input is not None:
pulumi.set(__self__, "input", input)
if input_path is not None:
pulumi.set(__self__, "input_path", input_path)
if input_transformer is not None:
pulumi.set(__self__, "input_transformer", input_transformer)
if kinesis_parameters is not None:
pulumi.set(__self__, "kinesis_parameters", kinesis_parameters)
if redshift_data_parameters is not None:
pulumi.set(__self__, "redshift_data_parameters", redshift_data_parameters)
if retry_policy is not None:
pulumi.set(__self__, "retry_policy", retry_policy)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if run_command_parameters is not None:
pulumi.set(__self__, "run_command_parameters", run_command_parameters)
if sage_maker_pipeline_parameters is not None:
pulumi.set(__self__, "sage_maker_pipeline_parameters", sage_maker_pipeline_parameters)
if sqs_parameters is not None:
pulumi.set(__self__, "sqs_parameters", sqs_parameters)
@property
@pulumi.getter
def arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: pulumi.Input[str]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="batchParameters")
def batch_parameters(self) -> Optional[pulumi.Input['RuleBatchParametersArgs']]:
return pulumi.get(self, "batch_parameters")
@batch_parameters.setter
def batch_parameters(self, value: Optional[pulumi.Input['RuleBatchParametersArgs']]):
pulumi.set(self, "batch_parameters", value)
@property
@pulumi.getter(name="deadLetterConfig")
def dead_letter_config(self) -> Optional[pulumi.Input['RuleDeadLetterConfigArgs']]:
return pulumi.get(self, "dead_letter_config")
@dead_letter_config.setter
def dead_letter_config(self, value: Optional[pulumi.Input['RuleDeadLetterConfigArgs']]):
pulumi.set(self, "dead_letter_config", value)
@property
@pulumi.getter(name="ecsParameters")
def ecs_parameters(self) -> Optional[pulumi.Input['RuleEcsParametersArgs']]:
return pulumi.get(self, "ecs_parameters")
@ecs_parameters.setter
def ecs_parameters(self, value: Optional[pulumi.Input['RuleEcsParametersArgs']]):
pulumi.set(self, "ecs_parameters", value)
@property
@pulumi.getter(name="httpParameters")
def http_parameters(self) -> Optional[pulumi.Input['RuleHttpParametersArgs']]:
return pulumi.get(self, "http_parameters")
@http_parameters.setter
def http_parameters(self, value: Optional[pulumi.Input['RuleHttpParametersArgs']]):
pulumi.set(self, "http_parameters", value)
@property
@pulumi.getter
def input(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "input")
@input.setter
def input(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input", value)
@property
@pulumi.getter(name="inputPath")
def input_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "input_path")
@input_path.setter
def input_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "input_path", value)
@property
@pulumi.getter(name="inputTransformer")
def input_transformer(self) -> Optional[pulumi.Input['RuleInputTransformerArgs']]:
return pulumi.get(self, "input_transformer")
@input_transformer.setter
def input_transformer(self, value: Optional[pulumi.Input['RuleInputTransformerArgs']]):
pulumi.set(self, "input_transformer", value)
@property
@pulumi.getter(name="kinesisParameters")
def kinesis_parameters(self) -> Optional[pulumi.Input['RuleKinesisParametersArgs']]:
return pulumi.get(self, "kinesis_parameters")
@kinesis_parameters.setter
def kinesis_parameters(self, value: Optional[pulumi.Input['RuleKinesisParametersArgs']]):
pulumi.set(self, "kinesis_parameters", value)
@property
@pulumi.getter(name="redshiftDataParameters")
def redshift_data_parameters(self) -> Optional[pulumi.Input['RuleRedshiftDataParametersArgs']]:
return pulumi.get(self, "redshift_data_parameters")
@redshift_data_parameters.setter
def redshift_data_parameters(self, value: Optional[pulumi.Input['RuleRedshiftDataParametersArgs']]):
pulumi.set(self, "redshift_data_parameters", value)
@property
@pulumi.getter(name="retryPolicy")
def retry_policy(self) -> Optional[pulumi.Input['RuleRetryPolicyArgs']]:
return pulumi.get(self, "retry_policy")
@retry_policy.setter
def retry_policy(self, value: Optional[pulumi.Input['RuleRetryPolicyArgs']]):
pulumi.set(self, "retry_policy", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter(name="runCommandParameters")
def run_command_parameters(self) -> Optional[pulumi.Input['RuleRunCommandParametersArgs']]:
return pulumi.get(self, "run_command_parameters")
@run_command_parameters.setter
def run_command_parameters(self, value: Optional[pulumi.Input['RuleRunCommandParametersArgs']]):
pulumi.set(self, "run_command_parameters", value)
@property
@pulumi.getter(name="sageMakerPipelineParameters")
def sage_maker_pipeline_parameters(self) -> Optional[pulumi.Input['RuleSageMakerPipelineParametersArgs']]:
return pulumi.get(self, "sage_maker_pipeline_parameters")
@sage_maker_pipeline_parameters.setter
def sage_maker_pipeline_parameters(self, value: Optional[pulumi.Input['RuleSageMakerPipelineParametersArgs']]):
pulumi.set(self, "sage_maker_pipeline_parameters", value)
@property
@pulumi.getter(name="sqsParameters")
def sqs_parameters(self) -> Optional[pulumi.Input['RuleSqsParametersArgs']]:
return pulumi.get(self, "sqs_parameters")
@sqs_parameters.setter
def sqs_parameters(self, value: Optional[pulumi.Input['RuleSqsParametersArgs']]):
pulumi.set(self, "sqs_parameters", value)
| true | true |
f73a702be5b625fdf39c3c96f7eaf1abd7603d8f | 20,723 | py | Python | rhapsody/lib/RhapsodyPixels.py | anarchiae/rhapsody5 | bd4c4f008d9d3df17ef400ce8895d3da913f6b39 | [
"MIT"
] | null | null | null | rhapsody/lib/RhapsodyPixels.py | anarchiae/rhapsody5 | bd4c4f008d9d3df17ef400ce8895d3da913f6b39 | [
"MIT"
] | null | null | null | rhapsody/lib/RhapsodyPixels.py | anarchiae/rhapsody5 | bd4c4f008d9d3df17ef400ce8895d3da913f6b39 | [
"MIT"
] | null | null | null | import time
import random
import RPi.GPIO as GPIO
import Adafruit_WS2801
import Adafruit_GPIO.SPI as SPI
class RhapsodyPixels:
numpixels = 0
myPixels = None
pixel_groups = None
def __init__(self, numpixels):
self.numpixels = numpixels
self.pixel_groups = dict()
self.myPixels = Adafruit_WS2801.WS2801Pixels(numpixels, spi=SPI.SpiDev(0, 0), gpio=GPIO)
# TOOLS METHODS
# These methods are used by other functions of the class and cannot be used outside
@staticmethod
def __rgb_to_color(r, g, b):
"""Convert three 8-bit red, green, blue component values to a single 24-bit
color value.
"""
return ((r & 0xFF) << 16) | ((g & 0xFF) << 8) | (b & 0xFF)
def __set_pixel(self, pixel, color):
"""Assign a color to the pixel given in params"""
self.myPixels.set_pixel(pixel, self.__rgb_to_color(color[0], color[1], color[2]))
def __set_all_pixels(self, color):
"""Assign one color to all the pixels"""
for p in range(0, self.numpixels):
self.myPixels.set_pixel(p, self.__rgb_to_color(color[0], color[1], color[2]))
self.__show()
def __show(self):
"""Apply the modifications made on the pixels"""
self.myPixels.show()
def __wheel(self, pos):
if pos < 85:
return_value = self.__rgb_to_color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return_value = self.__rgb_to_color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return_value = self.__rgb_to_color(0, pos * 3, 255 - pos * 3)
return return_value
@staticmethod
def __get_current_time_millis():
return time.time() * 1000
# DATA ORGANISATION METHODS
def create_group(self, group_name):
"""Add a new group to the list of pixels"""
self.pixel_groups[group_name] = None
def add_pixels_to_group(self, pixels, group_name):
"""Add a pixel to a designated group"""
try:
if type(pixels) is list:
self.pixel_groups[group_name] = pixels
elif type(pixels) is int:
self.pixel_groups[group_name] = [pixels]
except KeyError:
print("The specified group does not exist")
@staticmethod
def __create_disposable_group(start, end):
"""Create a disposable group that will be destroyed once the
calling method as been executed. This method is private and
should be used only if there is no alternative"""
group = []
for i in range(start, end):
group.append(i)
return group
def print_groups(self):
"""Print a visual representation of the different
groups created"""
print(self.pixel_groups)
# ANIMATIONS
def brightness_decrease(self, pixels=None, interval=0.01, step=1):
"""Turn off all the selected pixels with a fade effect"""
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
for j in range(int(256 // step)):
for i in range(len(pixels)):
r, g, b = self.myPixels.get_pixel_rgb(pixels[i])
r = int(max(0, r - step))
g = int(max(0, g - step))
b = int(max(0, b - step))
self.__set_pixel(pixels[i], [r, g, b])
self.__show()
if interval > 0:
time.sleep(interval)
def off(self, pixels=None):
"""Turn off the pixels given in args"""
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
# Turning off the pixels
for p in range(len(pixels)):
color = [0, 0, 0]
self.__set_pixel(p, color)
self.__show()
def static(self, colors, pixels=None, random_assign=False):
"""Apply a color to selected pixels"""
# Check if there is one or multiple colors
# If there is only one color, then the first item in
# the list, must be an int and not a list
if type(colors[0]) is int:
colors = [colors]
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
# Apply the effect
if random_assign:
for p in range(len(pixels)):
color = colors[random.randint(0, len(colors)-1)]
self.__set_pixel(pixels[p], color)
self.__show()
else:
color_index = 0
for p in range(len(pixels)):
color = colors[color_index]
self.__set_pixel(pixels[p], color)
color_index = color_index + 1
if color_index > len(colors)-1:
color_index = 0
self.__show()
def sparkle(self, colors, interval, duration, pixels=None, number_of_flashes=3,
random_assign=False, keep_old=False):
"""Sparkle effect on selected pixels"""
# Time
animation_end_time = self.__get_current_time_millis() + (duration * 1000)
# Selected pixels. They will be set as flashes
selected_pixels = []
return_to_normal_colors = []
# Check if there is one or multiple colors
# If there is only one color, then the first item in
# the list, must be an int and not a list
if type(colors[0]) is int:
colors = [colors]
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
# Animation start
while self.__get_current_time_millis() < animation_end_time:
# Select the pixels that will be used as flashes
for i in range(number_of_flashes):
selected_pixels.append(pixels[random.randint(0, len(pixels) - 1)])
# Retrieves the color that will be applied once the selected pixels
# will return to there "non-flash" status. If the argument keep_old is
# False, then the pixel will be turned off (color values red : 0,
# green : 0, blue : 0
if keep_old:
for i in range(number_of_flashes):
return_to_normal_colors.append(self.myPixels.get_pixel_rgb(selected_pixels[i]))
else:
for i in range(number_of_flashes):
return_to_normal_colors.append([0, 0, 0])
# Apply the colors to the pixels
if random_assign:
for p in range(number_of_flashes):
color = colors[random.randint(0, len(colors) - 1)]
self.__set_pixel(selected_pixels[p], color)
self.__show()
else:
color_index = 0
for p in range(number_of_flashes):
color = colors[color_index]
self.__set_pixel(selected_pixels[p], color)
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
self.__show()
# Waits for the time of the interval
time.sleep(interval)
# Reset all pixels to normal status
for p in range(number_of_flashes):
self.__set_pixel(selected_pixels[p], return_to_normal_colors[p])
self.__show()
# Empty lists
selected_pixels = []
return_to_normal_colors = []
def altern(self, colors, interval, duration, pixels=None):
# Time
animation_end_time = self.__get_current_time_millis() + (duration * 1000)
# Check if there is more than one color in the colors list
# given in args. If there is only one color, then the other
# color will be black [0, 0, 0] .If there is more than two
# colors, then, only the first two colors will be used.
if len(colors) < 1:
colors = [colors, [0, 0, 0]]
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
while self.__get_current_time_millis() < animation_end_time:
for p in range(len(pixels)):
if p % 2 == 0:
self.__set_pixel(pixels[p], colors[0])
else:
self.__set_pixel(pixels[p], colors[1])
self.__show()
time.sleep(interval)
for p in range(len(pixels)):
if p % 2 == 0:
self.__set_pixel(pixels[p], colors[1])
else:
self.__set_pixel(pixels[p], colors[0])
self.__show()
time.sleep(interval)
def wipe(self, colors, interval, pixels=None, direction="forward", random_assign=False):
"""Apply a color to LEDS one after another"""
# Check if there is one or multiple colors
# If there is only one color, then the first item in
# the list, must be an int and not a list
if type(colors[0]) is int:
colors = [colors]
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
# Apply the effect
if random_assign:
if direction == "forward":
for p in range(len(pixels)):
color = colors[random.randint(0, len(colors) - 1)]
self.__set_pixel(pixels[p], color)
self.__show()
time.sleep(interval)
else:
for p in range((len(pixels)-1), 0, -1):
color = colors[random.randint(0, len(colors) - 1)]
self.__set_pixel(pixels[p], color)
self.__show()
time.sleep(interval)
else:
if direction == "forward":
color_index = 0
for p in range(len(pixels)):
color = colors[color_index]
self.__set_pixel(pixels[p], color)
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
self.__show()
else:
color_index = 0
for p in range((len(pixels)-1), 0, -1):
color = colors[color_index]
self.__set_pixel(pixels[p], color)
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
self.__show()
def group_by_group(self, colors, interval, direction="up", random_assign=False, assign_type="group",
keep_old=False, remain=False, groups=None):
back_to_normal_values = []
# Checks if group names are specified in
# str
if groups is not None:
for group in groups:
if type(group) is not str:
raise TypeError("Groups names must be of type 'str'")
# Check if there is one or multiple colors
# If there is only one color, then the first item in
# the list, must be an int and not a list
if type(colors[0]) is int:
colors = [colors]
if random_assign:
if direction == "up":
for group in groups:
color = colors[random.randint(0, len(colors) - 1)]
for p in range(0, len(self.pixel_groups[group])):
# Retrieve the current values of the pixel
back_to_normal_values.append(self.myPixels.get_pixel_rgb(self.pixel_groups[group][p]))
# If the assign_type is set to "pixel" in the parameters
# then, each pixel will have a different color. Else, the
# color applied will be the one selected at the beginning
# of the groups loop.
if assign_type == "pixel":
color = colors[random.randint(0, len(colors) - 1)]
# Apply the new color
self.__set_pixel(self.pixel_groups[group][p], color)
self.__show()
time.sleep(interval)
# Return to normal
if keep_old:
for p in range(0, len(self.pixel_groups[group])):
color = back_to_normal_values[p]
self.__set_pixel(self.pixel_groups[group][p], color)
elif not remain:
for p in range(0, len(self.pixel_groups[group])):
self.__set_pixel(self.pixel_groups[group][p], [0, 0, 0])
self.__show()
back_to_normal_values = list()
else: # Direction = down
for g in range(len(self.pixel_groups) - 1, -1, -1):
color = colors[random.randint(0, len(colors) - 1)]
for p in range(0, len(self.pixel_groups[groups[g]])):
# Retrieve the current values of the pixel
back_to_normal_values.append(self.myPixels.get_pixel_rgb(self.pixel_groups[groups[g]][p]))
# If the assign_type is set to "pixel" in the parameters
# then, each pixel will have a different color. Else, the
# color applied will be the one selected at the beginning
# of the groups loop.
if assign_type == "pixel":
color = colors[random.randint(0, len(colors) - 1)]
# Apply the new color
self.__set_pixel(self.pixel_groups[groups[g]][p], color)
self.__show()
time.sleep(interval)
# Return to normal
if keep_old:
for p in range(0, len(self.pixel_groups[groups[g]])):
color = back_to_normal_values[p]
self.__set_pixel(self.pixel_groups[groups[g]][p], color)
self.__show()
elif not remain:
for p in range(0, len(self.pixel_groups[groups[g]])):
self.__set_pixel(self.pixel_groups[groups[g]][p], [0, 0, 0])
self.__show()
else: # Not random_assign
if direction == "up":
color_index = 0
for group in groups:
for p in range(len(self.pixel_groups[group])):
back_to_normal_values.append(self.myPixels.get_pixel_rgb(self.pixel_groups[group][p]))
# If the assign_type is set to "pixel" in the parameters
# then, each pixel will have a different color. Else, the
# color applied will be the one selected at the beginning
# of the groups loop.
if assign_type == "pixel":
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
self.__set_pixel(self.pixel_groups[group][p], colors[color_index])
self.__show()
time.sleep(interval)
# Return to normal
if keep_old:
for p in range(0, len(self.pixel_groups[group])):
color = back_to_normal_values[p]
self.__set_pixel(self.pixel_groups[group][p], color)
elif not remain:
for p in range(0, len(self.pixel_groups[group])):
self.__set_pixel(self.pixel_groups[group][p], [0, 0, 0])
self.__show()
# Increment color_index
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
back_to_normal_values = list()
else: # direction = down
color_index = 0
for g in range(len(self.pixel_groups)-1, -1, -1):
for p in range(len(self.pixel_groups[groups[g]])):
back_to_normal_values.append(self.myPixels.get_pixel_rgb(self.pixel_groups[groups[g]][p]))
# If the assign_type is set to "pixel" in the parameters
# then, each pixel will have a different color. Else, the
# color applied will be the one selected at the beginning
# of the groups loop.
if assign_type == "pixel":
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
color = colors[color_index]
self.__set_pixel(self.pixel_groups[groups[g]][p], color)
self.__show()
time.sleep(interval)
# Return to normal
if keep_old:
for p in range(0, len(self.pixel_groups[groups[g]])):
color = back_to_normal_values[p]
self.__set_pixel(self.pixel_groups[groups[g]][p], color)
self.__show()
elif not remain:
for p in range(0, len(self.pixel_groups[groups[g]])):
self.__set_pixel(self.pixel_groups[groups[g]][p], [0, 0, 0])
self.__show()
# Increment color_index
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
back_to_normal_values = list()
def rainbow(self, interval, duration, pixels=None):
animation_end_time = self.__get_current_time_millis() + (duration * 1000)
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
while self.__get_current_time_millis() < animation_end_time:
for j in range(256): # one cycle of all 256 colors in the wheel
for p in range(len(pixels) - 1):
self.myPixels.set_pixel(pixels[p], self.__wheel(((p * 256 // self.myPixels.count()) + j) % 256))
self.__show()
time.sleep(interval)
| 39.775432 | 116 | 0.520822 | import time
import random
import RPi.GPIO as GPIO
import Adafruit_WS2801
import Adafruit_GPIO.SPI as SPI
class RhapsodyPixels:
numpixels = 0
myPixels = None
pixel_groups = None
def __init__(self, numpixels):
self.numpixels = numpixels
self.pixel_groups = dict()
self.myPixels = Adafruit_WS2801.WS2801Pixels(numpixels, spi=SPI.SpiDev(0, 0), gpio=GPIO)
@staticmethod
def __rgb_to_color(r, g, b):
return ((r & 0xFF) << 16) | ((g & 0xFF) << 8) | (b & 0xFF)
def __set_pixel(self, pixel, color):
self.myPixels.set_pixel(pixel, self.__rgb_to_color(color[0], color[1], color[2]))
def __set_all_pixels(self, color):
for p in range(0, self.numpixels):
self.myPixels.set_pixel(p, self.__rgb_to_color(color[0], color[1], color[2]))
self.__show()
def __show(self):
self.myPixels.show()
def __wheel(self, pos):
if pos < 85:
return_value = self.__rgb_to_color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return_value = self.__rgb_to_color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return_value = self.__rgb_to_color(0, pos * 3, 255 - pos * 3)
return return_value
@staticmethod
def __get_current_time_millis():
return time.time() * 1000
def create_group(self, group_name):
self.pixel_groups[group_name] = None
def add_pixels_to_group(self, pixels, group_name):
try:
if type(pixels) is list:
self.pixel_groups[group_name] = pixels
elif type(pixels) is int:
self.pixel_groups[group_name] = [pixels]
except KeyError:
print("The specified group does not exist")
@staticmethod
def __create_disposable_group(start, end):
group = []
for i in range(start, end):
group.append(i)
return group
def print_groups(self):
print(self.pixel_groups)
def brightness_decrease(self, pixels=None, interval=0.01, step=1):
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
for j in range(int(256 // step)):
for i in range(len(pixels)):
r, g, b = self.myPixels.get_pixel_rgb(pixels[i])
r = int(max(0, r - step))
g = int(max(0, g - step))
b = int(max(0, b - step))
self.__set_pixel(pixels[i], [r, g, b])
self.__show()
if interval > 0:
time.sleep(interval)
def off(self, pixels=None):
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
for p in range(len(pixels)):
color = [0, 0, 0]
self.__set_pixel(p, color)
self.__show()
def static(self, colors, pixels=None, random_assign=False):
if type(colors[0]) is int:
colors = [colors]
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
# Apply the effect
if random_assign:
for p in range(len(pixels)):
color = colors[random.randint(0, len(colors)-1)]
self.__set_pixel(pixels[p], color)
self.__show()
else:
color_index = 0
for p in range(len(pixels)):
color = colors[color_index]
self.__set_pixel(pixels[p], color)
color_index = color_index + 1
if color_index > len(colors)-1:
color_index = 0
self.__show()
def sparkle(self, colors, interval, duration, pixels=None, number_of_flashes=3,
random_assign=False, keep_old=False):
# Time
animation_end_time = self.__get_current_time_millis() + (duration * 1000)
# Selected pixels. They will be set as flashes
selected_pixels = []
return_to_normal_colors = []
# Check if there is one or multiple colors
# If there is only one color, then the first item in
# the list, must be an int and not a list
if type(colors[0]) is int:
colors = [colors]
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
while self.__get_current_time_millis() < animation_end_time:
for i in range(number_of_flashes):
selected_pixels.append(pixels[random.randint(0, len(pixels) - 1)])
if keep_old:
for i in range(number_of_flashes):
return_to_normal_colors.append(self.myPixels.get_pixel_rgb(selected_pixels[i]))
else:
for i in range(number_of_flashes):
return_to_normal_colors.append([0, 0, 0])
if random_assign:
for p in range(number_of_flashes):
color = colors[random.randint(0, len(colors) - 1)]
self.__set_pixel(selected_pixels[p], color)
self.__show()
else:
color_index = 0
for p in range(number_of_flashes):
color = colors[color_index]
self.__set_pixel(selected_pixels[p], color)
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
self.__show()
time.sleep(interval)
for p in range(number_of_flashes):
self.__set_pixel(selected_pixels[p], return_to_normal_colors[p])
self.__show()
selected_pixels = []
return_to_normal_colors = []
def altern(self, colors, interval, duration, pixels=None):
animation_end_time = self.__get_current_time_millis() + (duration * 1000)
if len(colors) < 1:
colors = [colors, [0, 0, 0]]
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
while self.__get_current_time_millis() < animation_end_time:
for p in range(len(pixels)):
if p % 2 == 0:
self.__set_pixel(pixels[p], colors[0])
else:
self.__set_pixel(pixels[p], colors[1])
self.__show()
time.sleep(interval)
for p in range(len(pixels)):
if p % 2 == 0:
self.__set_pixel(pixels[p], colors[1])
else:
self.__set_pixel(pixels[p], colors[0])
self.__show()
time.sleep(interval)
def wipe(self, colors, interval, pixels=None, direction="forward", random_assign=False):
# Check if there is one or multiple colors
# If there is only one color, then the first item in
# the list, must be an int and not a list
if type(colors[0]) is int:
colors = [colors]
# Check the type of the pixels argument and works
# to make it a list if it's not
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
if random_assign:
if direction == "forward":
for p in range(len(pixels)):
color = colors[random.randint(0, len(colors) - 1)]
self.__set_pixel(pixels[p], color)
self.__show()
time.sleep(interval)
else:
for p in range((len(pixels)-1), 0, -1):
color = colors[random.randint(0, len(colors) - 1)]
self.__set_pixel(pixels[p], color)
self.__show()
time.sleep(interval)
else:
if direction == "forward":
color_index = 0
for p in range(len(pixels)):
color = colors[color_index]
self.__set_pixel(pixels[p], color)
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
self.__show()
else:
color_index = 0
for p in range((len(pixels)-1), 0, -1):
color = colors[color_index]
self.__set_pixel(pixels[p], color)
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
self.__show()
def group_by_group(self, colors, interval, direction="up", random_assign=False, assign_type="group",
keep_old=False, remain=False, groups=None):
back_to_normal_values = []
if groups is not None:
for group in groups:
if type(group) is not str:
raise TypeError("Groups names must be of type 'str'")
if type(colors[0]) is int:
colors = [colors]
if random_assign:
if direction == "up":
for group in groups:
color = colors[random.randint(0, len(colors) - 1)]
for p in range(0, len(self.pixel_groups[group])):
back_to_normal_values.append(self.myPixels.get_pixel_rgb(self.pixel_groups[group][p]))
if assign_type == "pixel":
color = colors[random.randint(0, len(colors) - 1)]
self.__set_pixel(self.pixel_groups[group][p], color)
self.__show()
time.sleep(interval)
if keep_old:
for p in range(0, len(self.pixel_groups[group])):
color = back_to_normal_values[p]
self.__set_pixel(self.pixel_groups[group][p], color)
elif not remain:
for p in range(0, len(self.pixel_groups[group])):
self.__set_pixel(self.pixel_groups[group][p], [0, 0, 0])
self.__show()
back_to_normal_values = list()
else:
for g in range(len(self.pixel_groups) - 1, -1, -1):
color = colors[random.randint(0, len(colors) - 1)]
for p in range(0, len(self.pixel_groups[groups[g]])):
back_to_normal_values.append(self.myPixels.get_pixel_rgb(self.pixel_groups[groups[g]][p]))
if assign_type == "pixel":
color = colors[random.randint(0, len(colors) - 1)]
self.__set_pixel(self.pixel_groups[groups[g]][p], color)
self.__show()
time.sleep(interval)
if keep_old:
for p in range(0, len(self.pixel_groups[groups[g]])):
color = back_to_normal_values[p]
self.__set_pixel(self.pixel_groups[groups[g]][p], color)
self.__show()
elif not remain:
for p in range(0, len(self.pixel_groups[groups[g]])):
self.__set_pixel(self.pixel_groups[groups[g]][p], [0, 0, 0])
self.__show()
else:
if direction == "up":
color_index = 0
for group in groups:
for p in range(len(self.pixel_groups[group])):
back_to_normal_values.append(self.myPixels.get_pixel_rgb(self.pixel_groups[group][p]))
if assign_type == "pixel":
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
self.__set_pixel(self.pixel_groups[group][p], colors[color_index])
self.__show()
time.sleep(interval)
if keep_old:
for p in range(0, len(self.pixel_groups[group])):
color = back_to_normal_values[p]
self.__set_pixel(self.pixel_groups[group][p], color)
elif not remain:
for p in range(0, len(self.pixel_groups[group])):
self.__set_pixel(self.pixel_groups[group][p], [0, 0, 0])
self.__show()
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
back_to_normal_values = list()
else:
color_index = 0
for g in range(len(self.pixel_groups)-1, -1, -1):
for p in range(len(self.pixel_groups[groups[g]])):
back_to_normal_values.append(self.myPixels.get_pixel_rgb(self.pixel_groups[groups[g]][p]))
if assign_type == "pixel":
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
color = colors[color_index]
self.__set_pixel(self.pixel_groups[groups[g]][p], color)
self.__show()
time.sleep(interval)
if keep_old:
for p in range(0, len(self.pixel_groups[groups[g]])):
color = back_to_normal_values[p]
self.__set_pixel(self.pixel_groups[groups[g]][p], color)
self.__show()
elif not remain:
for p in range(0, len(self.pixel_groups[groups[g]])):
self.__set_pixel(self.pixel_groups[groups[g]][p], [0, 0, 0])
self.__show()
color_index = color_index + 1
if color_index > len(colors) - 1:
color_index = 0
back_to_normal_values = list()
def rainbow(self, interval, duration, pixels=None):
animation_end_time = self.__get_current_time_millis() + (duration * 1000)
if pixels is not None:
if type(pixels) is int:
pixels = [pixels]
elif type(pixels) is str:
pixels = self.pixel_groups[pixels]
else:
pixels = self.__create_disposable_group(0, self.numpixels)
while self.__get_current_time_millis() < animation_end_time:
for j in range(256): # one cycle of all 256 colors in the wheel
for p in range(len(pixels) - 1):
self.myPixels.set_pixel(pixels[p], self.__wheel(((p * 256 // self.myPixels.count()) + j) % 256))
self.__show()
time.sleep(interval)
| true | true |
f73a7075c60d39285ebd033cd6dd058aa7b72cc3 | 434 | py | Python | 1-DiveIntoPython/week5/lecturesdemos/AsychnchronousProgramming/asyncio4.py | mamoudmatook/PythonSpecializaionInRussian | 3340780b2f8a876a0f59e22036147fd98909d545 | [
"MIT"
] | null | null | null | 1-DiveIntoPython/week5/lecturesdemos/AsychnchronousProgramming/asyncio4.py | mamoudmatook/PythonSpecializaionInRussian | 3340780b2f8a876a0f59e22036147fd98909d545 | [
"MIT"
] | null | null | null | 1-DiveIntoPython/week5/lecturesdemos/AsychnchronousProgramming/asyncio4.py | mamoudmatook/PythonSpecializaionInRussian | 3340780b2f8a876a0f59e22036147fd98909d545 | [
"MIT"
] | null | null | null | import asyncio
async def sleep_task(num):
for i in range(5):
print(f'process task: {num} iter: {i}')
await asyncio.sleep(1)
return num
loop = asyncio.get_event_loop()
task_list = [loop.create_task(sleep_task(i)) for i in range(2)]
loop.run_until_complete(asyncio.wait(task_list))
loop.run_until_complete(loop.create_task(sleep_task(3)))
loop.run_until_complete(asyncio.gather(sleep_task(10), sleep_task(20))) | 33.384615 | 71 | 0.735023 | import asyncio
async def sleep_task(num):
for i in range(5):
print(f'process task: {num} iter: {i}')
await asyncio.sleep(1)
return num
loop = asyncio.get_event_loop()
task_list = [loop.create_task(sleep_task(i)) for i in range(2)]
loop.run_until_complete(asyncio.wait(task_list))
loop.run_until_complete(loop.create_task(sleep_task(3)))
loop.run_until_complete(asyncio.gather(sleep_task(10), sleep_task(20))) | true | true |
f73a717b8ef83204b9688dc413b4540f99317b3e | 730 | py | Python | Mundo-3/desafio-091.py | LeonardoARGR/Desafios-Python-Curso-em-Video | 3fb1b0615fce88f968b5ba6e4bac43fcb0e72d98 | [
"MIT"
] | 2 | 2020-04-18T21:56:35.000Z | 2020-04-23T00:00:08.000Z | Mundo-3/desafio-091.py | LeonardoARGR/Desafios-Python-Curso-em-Video | 3fb1b0615fce88f968b5ba6e4bac43fcb0e72d98 | [
"MIT"
] | null | null | null | Mundo-3/desafio-091.py | LeonardoARGR/Desafios-Python-Curso-em-Video | 3fb1b0615fce88f968b5ba6e4bac43fcb0e72d98 | [
"MIT"
] | null | null | null | # Importando funções.
from random import randint
from time import sleep
from operator import itemgetter
# Declarando os dicionários.
jogadores = dict()
colocacao = list()
# Colocando os jogadores e seus valores no dicionário, e mostrando eles na tela.
for r in range(1, 5):
jogadores[f'jogador{r}'] = randint(1, 6)
print(f'O jogador{r} tirou {jogadores[f"jogador{r}"]} no dado.')
sleep(0.5)
# Criando uma linha para organizar o programa.
print('=' * 25)
print(f'{"Ranking dos Jogadores":^25}')
print('=' * 25)
# Criando e mostrando na tela a colocação em ordem.
colocacao = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
for i, v in enumerate(colocacao):
print(f'{i+1}° lugar - {v[0]} com {v[1]}')
| 29.2 | 80 | 0.69589 |
from random import randint
from time import sleep
from operator import itemgetter
jogadores = dict()
colocacao = list()
for r in range(1, 5):
jogadores[f'jogador{r}'] = randint(1, 6)
print(f'O jogador{r} tirou {jogadores[f"jogador{r}"]} no dado.')
sleep(0.5)
print('=' * 25)
print(f'{"Ranking dos Jogadores":^25}')
print('=' * 25)
colocacao = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
for i, v in enumerate(colocacao):
print(f'{i+1}° lugar - {v[0]} com {v[1]}')
| true | true |
f73a718b1681f71998772b1d8bd361ba44fd6a7d | 1,835 | py | Python | mtp_api/apps/prison/utils.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 5 | 2016-01-05T12:21:35.000Z | 2020-10-28T17:06:02.000Z | mtp_api/apps/prison/utils.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 209 | 2015-06-12T09:39:41.000Z | 2022-03-21T16:01:19.000Z | mtp_api/apps/prison/utils.py | ministryofjustice/mtp-api | b1c34c29e4aa9f48598cb060abe1368ae7686e0b | [
"MIT"
] | 1 | 2021-04-11T06:19:23.000Z | 2021-04-11T06:19:23.000Z | import logging
from typing import Optional
import requests
from mtp_common import nomis
from prison.models import Prison, PrisonerLocation
logger = logging.getLogger('mtp')
def fetch_prisoner_location_from_nomis(prisoner_location: PrisonerLocation) -> Optional[PrisonerLocation]:
new_location = None
try:
new_location = nomis.get_location(prisoner_location.prisoner_number)
if not new_location:
logger.error(
'Malformed response from NOMIS when looking up prisoner location for %(prisoner_number)s',
{'prisoner_number': prisoner_location.prisoner_number}
)
return None
new_prison = Prison.objects.get(nomis_id=new_location['nomis_id'])
except requests.RequestException:
logger.error(
'Cannot look up prisoner location for %(prisoner_number)s in NOMIS',
{'prisoner_number': prisoner_location.prisoner_number}
)
return None
except Prison.DoesNotExist:
logger.error(
'Cannot find %(nomis_id)s in Prison table',
{'nomis_id': new_location['nomis_id']}
)
return None
else:
logger.info(
'Location fetched from nomis of %(prisoner_number)s is %(prison_nomis_id)s',
{
'prisoner_number': prisoner_location.prisoner_number,
'prison_nomis_id': new_prison.nomis_id,
}
)
# This update will only persist in python space. It is NOT committed to the database
# This is because we should be calling credit_prisons_need_updating on any update to PrisonerLocation and that
# takes too long to do synchronously off the back of a user-triggered API Request
prisoner_location.prison = new_prison
return prisoner_location
| 38.229167 | 118 | 0.667575 | import logging
from typing import Optional
import requests
from mtp_common import nomis
from prison.models import Prison, PrisonerLocation
logger = logging.getLogger('mtp')
def fetch_prisoner_location_from_nomis(prisoner_location: PrisonerLocation) -> Optional[PrisonerLocation]:
new_location = None
try:
new_location = nomis.get_location(prisoner_location.prisoner_number)
if not new_location:
logger.error(
'Malformed response from NOMIS when looking up prisoner location for %(prisoner_number)s',
{'prisoner_number': prisoner_location.prisoner_number}
)
return None
new_prison = Prison.objects.get(nomis_id=new_location['nomis_id'])
except requests.RequestException:
logger.error(
'Cannot look up prisoner location for %(prisoner_number)s in NOMIS',
{'prisoner_number': prisoner_location.prisoner_number}
)
return None
except Prison.DoesNotExist:
logger.error(
'Cannot find %(nomis_id)s in Prison table',
{'nomis_id': new_location['nomis_id']}
)
return None
else:
logger.info(
'Location fetched from nomis of %(prisoner_number)s is %(prison_nomis_id)s',
{
'prisoner_number': prisoner_location.prisoner_number,
'prison_nomis_id': new_prison.nomis_id,
}
)
prisoner_location.prison = new_prison
return prisoner_location
| true | true |
f73a72404a4472fc57a4f25d62a2062da253395f | 630 | py | Python | app/grandchallenge/evaluation/migrations/0009_auto_20210723_0911.py | nlessmann/grand-challenge.org | 36abf6ccb40e2fc3fd3ff00e81deabd76f7e1ef8 | [
"Apache-2.0"
] | 101 | 2018-04-11T14:48:04.000Z | 2022-03-28T00:29:48.000Z | app/grandchallenge/evaluation/migrations/0009_auto_20210723_0911.py | nlessmann/grand-challenge.org | 36abf6ccb40e2fc3fd3ff00e81deabd76f7e1ef8 | [
"Apache-2.0"
] | 1,733 | 2018-03-21T11:56:16.000Z | 2022-03-31T14:58:30.000Z | app/grandchallenge/evaluation/migrations/0009_auto_20210723_0911.py | nlessmann/grand-challenge.org | 36abf6ccb40e2fc3fd3ff00e81deabd76f7e1ef8 | [
"Apache-2.0"
] | 42 | 2018-06-08T05:49:07.000Z | 2022-03-29T08:43:01.000Z | # Generated by Django 3.1.11 on 2021-07-23 09:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("evaluation", "0008_evaluation_input_prefixes"),
]
operations = [
migrations.RemoveField(model_name="submission", name="creators_ip",),
migrations.RemoveField(
model_name="submission", name="creators_user_agent",
),
migrations.AddField(
model_name="submission",
name="staged_predictions_file_uuid",
field=models.UUIDField(blank=True, editable=False, null=True),
),
]
| 28.636364 | 77 | 0.64127 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("evaluation", "0008_evaluation_input_prefixes"),
]
operations = [
migrations.RemoveField(model_name="submission", name="creators_ip",),
migrations.RemoveField(
model_name="submission", name="creators_user_agent",
),
migrations.AddField(
model_name="submission",
name="staged_predictions_file_uuid",
field=models.UUIDField(blank=True, editable=False, null=True),
),
]
| true | true |
f73a7323151468c16dd2139217f839b9d42f3a79 | 624 | py | Python | peekingduck/pipeline/nodes/heuristic/utils/__init__.py | leeping-ng/PeekingDuck | 16784b4c35f30c463fcc0c7caccdda6141797a6b | [
"Apache-2.0"
] | null | null | null | peekingduck/pipeline/nodes/heuristic/utils/__init__.py | leeping-ng/PeekingDuck | 16784b4c35f30c463fcc0c7caccdda6141797a6b | [
"Apache-2.0"
] | null | null | null | peekingduck/pipeline/nodes/heuristic/utils/__init__.py | leeping-ng/PeekingDuck | 16784b4c35f30c463fcc0c7caccdda6141797a6b | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for heuristic nodes
"""
| 34.666667 | 74 | 0.759615 | true | true | |
f73a74d09d20b64f141e8b191e4eb76a7ddc11e3 | 791 | py | Python | ondewo/qa/core/services_container.py | foldvaridominic/ondewo-nlu-client-python | a4e766252fc2fdd2372860755082480b4234609a | [
"Apache-2.0"
] | null | null | null | ondewo/qa/core/services_container.py | foldvaridominic/ondewo-nlu-client-python | a4e766252fc2fdd2372860755082480b4234609a | [
"Apache-2.0"
] | 5 | 2021-11-23T09:43:28.000Z | 2021-12-17T15:09:06.000Z | ondewo/qa/core/services_container.py | foldvaridominic/ondewo-nlu-client-python | a4e766252fc2fdd2372860755082480b4234609a | [
"Apache-2.0"
] | 1 | 2022-02-22T08:54:57.000Z | 2022-02-22T08:54:57.000Z | # Copyright 2021 ONDEWO GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from ondewo.qa.services.qa import QA
from ondewo.utils.base_service_container import BaseServicesContainer
@dataclass
class ServicesContainer(BaseServicesContainer):
qa: QA
| 31.64 | 74 | 0.782554 |
from dataclasses import dataclass
from ondewo.qa.services.qa import QA
from ondewo.utils.base_service_container import BaseServicesContainer
@dataclass
class ServicesContainer(BaseServicesContainer):
qa: QA
| true | true |
f73a766cb9d5eec77567047d3db3f49699b74b1b | 13,030 | py | Python | cryptoapis/model/list_confirmed_transactions_by_address_r_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | cryptoapis/model/list_confirmed_transactions_by_address_r_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | cryptoapis/model/list_confirmed_transactions_by_address_r_data.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.list_confirmed_transactions_by_address_ri import ListConfirmedTransactionsByAddressRI
globals()['ListConfirmedTransactionsByAddressRI'] = ListConfirmedTransactionsByAddressRI
class ListConfirmedTransactionsByAddressRData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'offset': (int,), # noqa: E501
'limit': (int,), # noqa: E501
'total': (int,), # noqa: E501
'items': ([ListConfirmedTransactionsByAddressRI],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'offset': 'offset', # noqa: E501
'limit': 'limit', # noqa: E501
'total': 'total', # noqa: E501
'items': 'items', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, offset, limit, total, items, *args, **kwargs): # noqa: E501
"""ListConfirmedTransactionsByAddressRData - a model defined in OpenAPI
Args:
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.
limit (int): Defines how many items should be returned in the response per page basis.
total (int): Defines the total number of items returned in the response.
items ([ListConfirmedTransactionsByAddressRI]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.offset = offset
self.limit = limit
self.total = total
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, offset, limit, total, items, *args, **kwargs): # noqa: E501
"""ListConfirmedTransactionsByAddressRData - a model defined in OpenAPI
Args:
offset (int): The starting index of the response items, i.e. where the response should start listing the returned items.
limit (int): Defines how many items should be returned in the response per page basis.
total (int): Defines the total number of items returned in the response.
items ([ListConfirmedTransactionsByAddressRI]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.offset = offset
self.limit = limit
self.total = total
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.400697 | 484 | 0.589793 |
import re
import sys
from cryptoapis.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.list_confirmed_transactions_by_address_ri import ListConfirmedTransactionsByAddressRI
globals()['ListConfirmedTransactionsByAddressRI'] = ListConfirmedTransactionsByAddressRI
class ListConfirmedTransactionsByAddressRData(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'offset': (int,),
'limit': (int,),
'total': (int,),
'items': ([ListConfirmedTransactionsByAddressRI],),
}
@cached_property
def discriminator():
return None
attribute_map = {
'offset': 'offset',
'limit': 'limit',
'total': 'total',
'items': 'items',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, offset, limit, total, items, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.offset = offset
self.limit = limit
self.total = total
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, offset, limit, total, items, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.offset = offset
self.limit = limit
self.total = total
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
f73a781985dc979e642260615b0f59b3ed2aac0c | 8,949 | py | Python | rl/envs/mujoco/ant.py | luanagbmartins/cavia | 91f093af9d6f463ee651db533f6c2acc637c7e9f | [
"MIT"
] | 122 | 2019-05-24T01:47:19.000Z | 2022-03-30T13:16:17.000Z | rl/envs/mujoco/ant.py | luanagbmartins/cavia | 91f093af9d6f463ee651db533f6c2acc637c7e9f | [
"MIT"
] | 5 | 2019-06-12T15:55:38.000Z | 2021-07-02T01:22:47.000Z | rl/envs/mujoco/ant.py | luanagbmartins/cavia | 91f093af9d6f463ee651db533f6c2acc637c7e9f | [
"MIT"
] | 36 | 2019-06-19T11:36:58.000Z | 2022-03-30T13:16:18.000Z | import numpy as np
from gym.envs.mujoco import AntEnv as AntEnv_
class AntEnv(AntEnv_):
@property
def action_scaling(self):
if (not hasattr(self, 'action_space')) or (self.action_space is None):
return 1.0
if self._action_scaling is None:
lb, ub = self.action_space.low, self.action_space.high
self._action_scaling = 0.5 * (ub - lb)
return self._action_scaling
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
self.sim.data.get_body_xmat("torso").flat,
self.get_body_com("torso").flat,
]).astype(np.float32).flatten()
def viewer_setup(self):
camera_id = self.model.camera_name2id('track')
self.viewer.cam.type = 2
self.viewer.cam.fixedcamid = camera_id
self.viewer.cam.distance = self.model.stat.extent * 0.35
# Hide the overlay
self.viewer._hide_overlay = True
def render(self, mode='human'):
if mode == 'rgb_array':
self._get_viewer().render()
# window size used for old mujoco-py:
width, height = 500, 500
data = self._get_viewer().read_pixels(width, height, depth=False)
return data
elif mode == 'human':
self._get_viewer().render()
class AntVelEnv(AntEnv):
"""Ant environment with target velocity, as described in [1]. The
code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/ant_env_rand.py
The ant follows the dynamics from MuJoCo [2], and receives at each
time step a reward composed of a control cost, a contact cost, a survival
reward, and a penalty equal to the difference between its current velocity
and the target velocity. The tasks are generated by sampling the target
velocities from the uniform distribution on [0, 3].
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] Emanuel Todorov, Tom Erez, Yuval Tassa, "MuJoCo: A physics engine for
model-based control", 2012
(https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)
"""
def __init__(self, task={}):
self._task = task
self._goal_vel = task.get('velocity', 0.0)
self._action_scaling = None
super(AntVelEnv, self).__init__()
def step(self, action):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = -1.0 * np.abs(forward_vel - self._goal_vel) + 1.0
survive_reward = 0.05
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
observation = self._get_obs()
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
infos = dict(reward_forward=forward_reward, reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost, reward_survive=survive_reward,
task=self._task)
return (observation, reward, done, infos)
def sample_tasks(self, num_tasks):
velocities = self.np_random.uniform(0.0, 3.0, size=(num_tasks,))
tasks = [{'velocity': velocity} for velocity in velocities]
return tasks
def reset_task(self, task):
self._task = task
self._goal_vel = task['velocity']
class AntDirEnv(AntEnv):
"""Ant environment with target direction, as described in [1]. The
code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/ant_env_rand_direc.py
The ant follows the dynamics from MuJoCo [2], and receives at each
time step a reward composed of a control cost, a contact cost, a survival
reward, and a reward equal to its velocity in the target direction. The
tasks are generated by sampling the target directions from a Bernoulli
distribution on {-1, 1} with parameter 0.5 (-1: backward, +1: forward).
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] Emanuel Todorov, Tom Erez, Yuval Tassa, "MuJoCo: A physics engine for
model-based control", 2012
(https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)
"""
def __init__(self, task={}):
self._task = task
self._goal_dir = task.get('direction', 1)
self._action_scaling = None
super(AntDirEnv, self).__init__()
def step(self, action):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = self._goal_dir * forward_vel
survive_reward = 0.05
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
observation = self._get_obs()
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
infos = dict(reward_forward=forward_reward, reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost, reward_survive=survive_reward,
task=self._task)
return (observation, reward, done, infos)
def sample_tasks(self, num_tasks):
directions = 2 * self.np_random.binomial(1, p=0.5, size=(num_tasks,)) - 1
tasks = [{'direction': direction} for direction in directions]
return tasks
def reset_task(self, task):
self._task = task
self._goal_dir = task['direction']
class AntPosEnv(AntEnv):
"""Ant environment with target position. The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/ant_env_rand_goal.py
The ant follows the dynamics from MuJoCo [1], and receives at each
time step a reward composed of a control cost, a contact cost, a survival
reward, and a penalty equal to its L1 distance to the target position. The
tasks are generated by sampling the target positions from the uniform
distribution on [-3, 3]^2.
[1] Emanuel Todorov, Tom Erez, Yuval Tassa, "MuJoCo: A physics engine for
model-based control", 2012
(https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)
"""
def __init__(self, task={}):
self._task = task
self._goal_pos = task.get('position', np.zeros((2,), dtype=np.float32))
self._action_scaling = None
super(AntPosEnv, self).__init__()
def step(self, action):
self.do_simulation(action, self.frame_skip)
xyposafter = self.get_body_com("torso")[:2]
goal_reward = -np.sum(np.abs(xyposafter - self._goal_pos)) + 4.0
survive_reward = 0.05
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
observation = self._get_obs()
reward = goal_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
infos = dict(reward_goal=goal_reward, reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost, reward_survive=survive_reward,
task=self._task)
return (observation, reward, done, infos)
def sample_tasks(self, num_tasks):
positions = self.np_random.uniform(-3.0, 3.0, size=(num_tasks, 2))
tasks = [{'position': position} for position in positions]
return tasks
def reset_task(self, task):
self._task = task
self._goal_pos = task['position']
| 42.212264 | 124 | 0.624986 | import numpy as np
from gym.envs.mujoco import AntEnv as AntEnv_
class AntEnv(AntEnv_):
@property
def action_scaling(self):
if (not hasattr(self, 'action_space')) or (self.action_space is None):
return 1.0
if self._action_scaling is None:
lb, ub = self.action_space.low, self.action_space.high
self._action_scaling = 0.5 * (ub - lb)
return self._action_scaling
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
self.sim.data.get_body_xmat("torso").flat,
self.get_body_com("torso").flat,
]).astype(np.float32).flatten()
def viewer_setup(self):
camera_id = self.model.camera_name2id('track')
self.viewer.cam.type = 2
self.viewer.cam.fixedcamid = camera_id
self.viewer.cam.distance = self.model.stat.extent * 0.35
self.viewer._hide_overlay = True
def render(self, mode='human'):
if mode == 'rgb_array':
self._get_viewer().render()
width, height = 500, 500
data = self._get_viewer().read_pixels(width, height, depth=False)
return data
elif mode == 'human':
self._get_viewer().render()
class AntVelEnv(AntEnv):
def __init__(self, task={}):
self._task = task
self._goal_vel = task.get('velocity', 0.0)
self._action_scaling = None
super(AntVelEnv, self).__init__()
def step(self, action):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = -1.0 * np.abs(forward_vel - self._goal_vel) + 1.0
survive_reward = 0.05
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
observation = self._get_obs()
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
infos = dict(reward_forward=forward_reward, reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost, reward_survive=survive_reward,
task=self._task)
return (observation, reward, done, infos)
def sample_tasks(self, num_tasks):
velocities = self.np_random.uniform(0.0, 3.0, size=(num_tasks,))
tasks = [{'velocity': velocity} for velocity in velocities]
return tasks
def reset_task(self, task):
self._task = task
self._goal_vel = task['velocity']
class AntDirEnv(AntEnv):
def __init__(self, task={}):
self._task = task
self._goal_dir = task.get('direction', 1)
self._action_scaling = None
super(AntDirEnv, self).__init__()
def step(self, action):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = self._goal_dir * forward_vel
survive_reward = 0.05
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
observation = self._get_obs()
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
infos = dict(reward_forward=forward_reward, reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost, reward_survive=survive_reward,
task=self._task)
return (observation, reward, done, infos)
def sample_tasks(self, num_tasks):
directions = 2 * self.np_random.binomial(1, p=0.5, size=(num_tasks,)) - 1
tasks = [{'direction': direction} for direction in directions]
return tasks
def reset_task(self, task):
self._task = task
self._goal_dir = task['direction']
class AntPosEnv(AntEnv):
def __init__(self, task={}):
self._task = task
self._goal_pos = task.get('position', np.zeros((2,), dtype=np.float32))
self._action_scaling = None
super(AntPosEnv, self).__init__()
def step(self, action):
self.do_simulation(action, self.frame_skip)
xyposafter = self.get_body_com("torso")[:2]
goal_reward = -np.sum(np.abs(xyposafter - self._goal_pos)) + 4.0
survive_reward = 0.05
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
observation = self._get_obs()
reward = goal_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
infos = dict(reward_goal=goal_reward, reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost, reward_survive=survive_reward,
task=self._task)
return (observation, reward, done, infos)
def sample_tasks(self, num_tasks):
positions = self.np_random.uniform(-3.0, 3.0, size=(num_tasks, 2))
tasks = [{'position': position} for position in positions]
return tasks
def reset_task(self, task):
self._task = task
self._goal_pos = task['position']
| true | true |
f73a785727b404231c6b1d1649819a2e9efcadf2 | 1,045 | py | Python | python/cudf/cudf/tests/test_decimal.py | BenikaHall/cudf | d3f5add210293a4832dafb85f04cbb73149b9d54 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/tests/test_decimal.py | BenikaHall/cudf | d3f5add210293a4832dafb85f04cbb73149b9d54 | [
"Apache-2.0"
] | 1 | 2021-02-23T18:05:36.000Z | 2021-02-23T18:05:36.000Z | python/cudf/cudf/tests/test_decimal.py | BenikaHall/cudf | d3f5add210293a4832dafb85f04cbb73149b9d54 | [
"Apache-2.0"
] | 1 | 2020-11-10T03:19:16.000Z | 2020-11-10T03:19:16.000Z | # Copyright (c) 2021, NVIDIA CORPORATION.
from decimal import Decimal
import pyarrow as pa
import pytest
from cudf.core.column import DecimalColumn
@pytest.mark.parametrize(
"data",
[
[Decimal("1.1"), Decimal("2.2"), Decimal("3.3"), Decimal("4.4")],
[Decimal("-1.1"), Decimal("2.2"), Decimal("3.3"), Decimal("4.4")],
[1],
[-1],
[1, 2, 3, 4],
[42, 1729, 4104],
[1, 2, None, 4],
[None, None, None],
[],
],
)
@pytest.mark.parametrize(
"typ",
[
pa.decimal128(precision=4, scale=2),
pa.decimal128(precision=5, scale=3),
pa.decimal128(precision=6, scale=4),
],
)
def test_round_trip_decimal_column(data, typ):
pa_arr = pa.array(data, type=typ)
col = DecimalColumn.from_arrow(pa_arr)
assert pa_arr.equals(col.to_arrow())
def test_from_arrow_max_precision():
with pytest.raises(ValueError):
DecimalColumn.from_arrow(
pa.array([1, 2, 3], type=pa.decimal128(scale=0, precision=19))
)
| 23.75 | 74 | 0.583732 |
from decimal import Decimal
import pyarrow as pa
import pytest
from cudf.core.column import DecimalColumn
@pytest.mark.parametrize(
"data",
[
[Decimal("1.1"), Decimal("2.2"), Decimal("3.3"), Decimal("4.4")],
[Decimal("-1.1"), Decimal("2.2"), Decimal("3.3"), Decimal("4.4")],
[1],
[-1],
[1, 2, 3, 4],
[42, 1729, 4104],
[1, 2, None, 4],
[None, None, None],
[],
],
)
@pytest.mark.parametrize(
"typ",
[
pa.decimal128(precision=4, scale=2),
pa.decimal128(precision=5, scale=3),
pa.decimal128(precision=6, scale=4),
],
)
def test_round_trip_decimal_column(data, typ):
pa_arr = pa.array(data, type=typ)
col = DecimalColumn.from_arrow(pa_arr)
assert pa_arr.equals(col.to_arrow())
def test_from_arrow_max_precision():
with pytest.raises(ValueError):
DecimalColumn.from_arrow(
pa.array([1, 2, 3], type=pa.decimal128(scale=0, precision=19))
)
| true | true |
f73a78765c11722356d6568c01bb9cec01563be3 | 3,916 | py | Python | displayminion/Action.py | cedarsuite/displayclient | 2ea5786d956edcfe34deda9694f9e6b353446bc8 | [
"MIT"
] | null | null | null | displayminion/Action.py | cedarsuite/displayclient | 2ea5786d956edcfe34deda9694f9e6b353446bc8 | [
"MIT"
] | null | null | null | displayminion/Action.py | cedarsuite/displayclient | 2ea5786d956edcfe34deda9694f9e6b353446bc8 | [
"MIT"
] | null | null | null | fade_old_max_wait = 1 # Wait no more than this many seconds to fade out old action
import kivy
kivy.require('1.9.0')
from kivy.animation import Animation
from kivy.clock import Clock
class Action:
def __init__(self, action, old_action, client):
self.action = action
self.old_action = old_action
self.client = client
self.meteor = self.client.meteor
self.time = self.client.time
self.layer = self.action['layer']
self.settings = self.combine_settings(self.client.defaults, self.action.get('settings'))
self.args = action.get('args', {})
self.fade_length = None
self.ready = False
self.shown = False
self.removed = False
self.anim_widgets = []
self.anims_ended = 0
self.show_schedule_handle = None
def add_anim_widget(self, widget, prop, vin, vout):
self.anim_widgets.append((widget, prop, vin, vout))
def do_in_animation(self, duration):
for widget, prop, vin, vout in self.anim_widgets:
Animation.cancel_all(widget, prop)
kwargs = {'transition': 'out_quad', 'duration': duration}
kwargs[prop] = vin
Animation(**kwargs).start(widget)
def do_out_animation(self, duration):
for widget, prop, vin, vout in self.anim_widgets:
Animation.cancel_all(widget, prop)
kwargs = {'transition': 'in_quad', 'duration': duration}
kwargs[prop] = vout
anim = Animation(**kwargs)
anim.on_complete = self._out_animation_end
anim.start(widget)
def _out_animation_end(self, widget):
self.anims_ended += 1
if self.anims_ended >= len(self.anim_widgets):
self.out_animation_end()
def out_animation_end(self):
pass
def combine_settings(self, *args):
result = {}
for arg in args:
if type(arg) == dict:
for k, v in arg.items():
if not type(v) == type(None):
result[k] = v
return result
def get_current_widget_index(self):
return
def check_ready(self):
return True
def get_fade_duration(self):
if self.fade_length == None:
if self.old_action and self.old_action.fade_length:
return self.old_action.fade_length or 0
else: return 0
else:
return self.fade_length
def remove_old(self):
if self.old_action:
self.old_action.hide(self.get_fade_duration())
self.old_action.remove()
self.old_action = None
def show(self):
self.show_schedule_handle = None
self.ready = self.check_ready()
if self.ready:
self.shown = True
self.remove_old()
self.on_show(self.get_fade_duration())
else:
if self.old_action and self.time.now() - self.action['time'] > fade_old_max_wait:
self.remove_old()
self.show_schedule_handle = Clock.schedule_once(lambda dt: self.show(), 0)
def on_show(self, duration):
pass
def hide(self, duration = None):
if self.show_schedule_handle: self.show_schedule_handle.cancel()
self.remove_old()
if duration == None: duration = self.get_fade_duration()
self.on_hide(duration)
def on_hide(self, duration):
self.shown = False
def remove(self):
if self.shown:
Clock.schedule_once(lambda dt: self.remove(), 0)
else:
self.removed = True
| 29.443609 | 96 | 0.550817 | fade_old_max_wait = 1
import kivy
kivy.require('1.9.0')
from kivy.animation import Animation
from kivy.clock import Clock
class Action:
def __init__(self, action, old_action, client):
self.action = action
self.old_action = old_action
self.client = client
self.meteor = self.client.meteor
self.time = self.client.time
self.layer = self.action['layer']
self.settings = self.combine_settings(self.client.defaults, self.action.get('settings'))
self.args = action.get('args', {})
self.fade_length = None
self.ready = False
self.shown = False
self.removed = False
self.anim_widgets = []
self.anims_ended = 0
self.show_schedule_handle = None
def add_anim_widget(self, widget, prop, vin, vout):
self.anim_widgets.append((widget, prop, vin, vout))
def do_in_animation(self, duration):
for widget, prop, vin, vout in self.anim_widgets:
Animation.cancel_all(widget, prop)
kwargs = {'transition': 'out_quad', 'duration': duration}
kwargs[prop] = vin
Animation(**kwargs).start(widget)
def do_out_animation(self, duration):
for widget, prop, vin, vout in self.anim_widgets:
Animation.cancel_all(widget, prop)
kwargs = {'transition': 'in_quad', 'duration': duration}
kwargs[prop] = vout
anim = Animation(**kwargs)
anim.on_complete = self._out_animation_end
anim.start(widget)
def _out_animation_end(self, widget):
self.anims_ended += 1
if self.anims_ended >= len(self.anim_widgets):
self.out_animation_end()
def out_animation_end(self):
pass
def combine_settings(self, *args):
result = {}
for arg in args:
if type(arg) == dict:
for k, v in arg.items():
if not type(v) == type(None):
result[k] = v
return result
def get_current_widget_index(self):
return
def check_ready(self):
return True
def get_fade_duration(self):
if self.fade_length == None:
if self.old_action and self.old_action.fade_length:
return self.old_action.fade_length or 0
else: return 0
else:
return self.fade_length
def remove_old(self):
if self.old_action:
self.old_action.hide(self.get_fade_duration())
self.old_action.remove()
self.old_action = None
def show(self):
self.show_schedule_handle = None
self.ready = self.check_ready()
if self.ready:
self.shown = True
self.remove_old()
self.on_show(self.get_fade_duration())
else:
if self.old_action and self.time.now() - self.action['time'] > fade_old_max_wait:
self.remove_old()
self.show_schedule_handle = Clock.schedule_once(lambda dt: self.show(), 0)
def on_show(self, duration):
pass
def hide(self, duration = None):
if self.show_schedule_handle: self.show_schedule_handle.cancel()
self.remove_old()
if duration == None: duration = self.get_fade_duration()
self.on_hide(duration)
def on_hide(self, duration):
self.shown = False
def remove(self):
if self.shown:
Clock.schedule_once(lambda dt: self.remove(), 0)
else:
self.removed = True
| true | true |
f73a78a965ea8458029ca99d12740408aa1b376a | 2,016 | py | Python | 4.conditionals/challenge2_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | 4.conditionals/challenge2_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | 4.conditionals/challenge2_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | #Importamos la librería random para el flip de la moneda
import random
print("\tWelcome to the Coin Toss App")
#Pedimos la cantidad de veces que se va tirar la moneda
flip = int(input("How many times would you like me to flip the coin: "))
r = input("Would you like to see the result of each flip (y/n): ").lower()
#Creamos una lista en blanco para asi llevar la cuenta de caras o sellos que salen
x = []
head = 0
tail = 0
print("\nFlipping!\n")
#Este if es por si no quiere ver los resultados de cada lanzamiento
if r.startswith('n'):
for i in range(flip):
tiro = random.randint(0,1)
if tiro == 0:
x.append("heads")
elif tiro ==1:
x.append("tails")
if x.count("heads") == x.count("tails"):
print("At ",x.count("heads")+x.count("tails")," flips, the number of heads and tails were equal at ",(x.count("heads")+x.count("tails"))/2,"each.")
print("\nResults Of Flipping A Coin ",flip," Times:\n")
print("Side\t\tCount\t\tPercentage")
print("Heads\t\t",x.count("heads"),"/",flip,"\t",round(x.count("heads")/flip *100,2),"%")
print("Tails\t\t",x.count("tails"),"/",flip,"\t",round(x.count("tails")/flip *100,2),"%")
#Y este if es por si quiere ver los resultados de todos los lanzamientos
if r.startswith('y'):
for i in range(flip):
tiro = random.randint(0,1)
if tiro == 0:
print("HEADS")
x.append("heads")
elif tiro ==1:
print("TAILS")
x.append("tails")
if x.count("heads") == x.count("tails"):
print("At ",x.count("heads")+x.count("tails")," flips, the number of heads and tails were equal at ",(x.count("heads")+x.count("tails"))/2,"each.")
print("\nResults Of Flipping A Coin ",flip," Times:\n")
print("Side\t\tCount\t\tPercentage")
print("Heads\t\t",x.count("heads"),"/",flip,"\t",round(x.count("heads")/flip *100,2),"%")
print("Tails\t\t",x.count("tails"),"/",flip,"\t",round(x.count("tails")/flip *100,2),"%")
| 48 | 159 | 0.600694 |
import random
print("\tWelcome to the Coin Toss App")
flip = int(input("How many times would you like me to flip the coin: "))
r = input("Would you like to see the result of each flip (y/n): ").lower()
x = []
head = 0
tail = 0
print("\nFlipping!\n")
if r.startswith('n'):
for i in range(flip):
tiro = random.randint(0,1)
if tiro == 0:
x.append("heads")
elif tiro ==1:
x.append("tails")
if x.count("heads") == x.count("tails"):
print("At ",x.count("heads")+x.count("tails")," flips, the number of heads and tails were equal at ",(x.count("heads")+x.count("tails"))/2,"each.")
print("\nResults Of Flipping A Coin ",flip," Times:\n")
print("Side\t\tCount\t\tPercentage")
print("Heads\t\t",x.count("heads"),"/",flip,"\t",round(x.count("heads")/flip *100,2),"%")
print("Tails\t\t",x.count("tails"),"/",flip,"\t",round(x.count("tails")/flip *100,2),"%")
if r.startswith('y'):
for i in range(flip):
tiro = random.randint(0,1)
if tiro == 0:
print("HEADS")
x.append("heads")
elif tiro ==1:
print("TAILS")
x.append("tails")
if x.count("heads") == x.count("tails"):
print("At ",x.count("heads")+x.count("tails")," flips, the number of heads and tails were equal at ",(x.count("heads")+x.count("tails"))/2,"each.")
print("\nResults Of Flipping A Coin ",flip," Times:\n")
print("Side\t\tCount\t\tPercentage")
print("Heads\t\t",x.count("heads"),"/",flip,"\t",round(x.count("heads")/flip *100,2),"%")
print("Tails\t\t",x.count("tails"),"/",flip,"\t",round(x.count("tails")/flip *100,2),"%")
| true | true |
f73a7912985d51f91ff457ce2256f6b6cfa396d9 | 775 | py | Python | Classification/Work 2/NaiveBayes/naivebayes_crossvalidation.py | leniel/DataMining | f249f636ede67a29de986b8f34c9cbe75b680f47 | [
"MIT"
] | null | null | null | Classification/Work 2/NaiveBayes/naivebayes_crossvalidation.py | leniel/DataMining | f249f636ede67a29de986b8f34c9cbe75b680f47 | [
"MIT"
] | null | null | null | Classification/Work 2/NaiveBayes/naivebayes_crossvalidation.py | leniel/DataMining | f249f636ede67a29de986b8f34c9cbe75b680f47 | [
"MIT"
] | null | null | null | '''
Created on Sat Nov 05 2016
Copyright (c) 2016 Leniel Macaferi's Consulting
'''
import os
import pandas as pd
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
path = os.path.realpath('..')
# Loading the data used to train
trainingSet = pd.read_csv(os.path.join(path, '../Data/classification-training.csv'), sep=',', header = None)
classes = trainingSet[trainingSet.columns[22]] # Last column
features = trainingSet[trainingSet.columns[1:22]] # Columns between indexes 1 to 22
#pd.set_option('display.max_columns', 23)
#print(features)
classifier = GaussianNB()
scores = cross_val_score(classifier, features, classes, cv = 5)
print(scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) | 26.724138 | 108 | 0.731613 |
import os
import pandas as pd
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
path = os.path.realpath('..')
trainingSet = pd.read_csv(os.path.join(path, '../Data/classification-training.csv'), sep=',', header = None)
classes = trainingSet[trainingSet.columns[22]]
features = trainingSet[trainingSet.columns[1:22]]
classifier = GaussianNB()
scores = cross_val_score(classifier, features, classes, cv = 5)
print(scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) | true | true |
f73a791557d22427892d85d38038b66fb93156e5 | 1,376 | py | Python | python/word-pattern/test_word_pattern.py | gurupratap-matharu/Exercism | a083c8d4bbc10d777524d917329a34e560201c9c | [
"MIT"
] | null | null | null | python/word-pattern/test_word_pattern.py | gurupratap-matharu/Exercism | a083c8d4bbc10d777524d917329a34e560201c9c | [
"MIT"
] | null | null | null | python/word-pattern/test_word_pattern.py | gurupratap-matharu/Exercism | a083c8d4bbc10d777524d917329a34e560201c9c | [
"MIT"
] | null | null | null | """
Testing script to check edge cases for word_pattern.py
"""
import unittest
from word_pattern import Solution
class WordPatternTests(unittest.TestCase):
"""
Test suite to check edge cases for word patterns.
"""
def test_small_valid_word_pattern(self):
self.assertTrue(Solution().word_pattern("abb", "dog cat cat"))
def test_medium_valid_word_pattern(self):
self.assertTrue(
Solution().word_pattern(
"xxyyzzxx", "hola hola como como estas estas hola hola"
)
)
def test_large_valid_word_pattern(self):
self.assertTrue(
Solution().word_pattern(
"abcabccbacba", "how are you how are you you are how you are how"
)
)
def test_small_tricky_valid_word_pattern(self):
self.assertTrue(
Solution().word_pattern("abccbbaa", "me you love love you you me me")
)
def test_medium_tricky_valid_word_pattern(self):
self.assertTrue(
Solution().word_pattern("xyzzzzzyx", "how are you you you you you are how")
)
def test_small_invalid_word_pattern(self):
self.assertFalse(Solution().word_pattern("abb", "cat dog cat"))
def test_medium_invalid_word_pattern(self):
self.assertFalse(Solution().word_pattern("abcabca", "cat dog bull cat dog bull dog"))
| 29.276596 | 93 | 0.647529 |
import unittest
from word_pattern import Solution
class WordPatternTests(unittest.TestCase):
def test_small_valid_word_pattern(self):
self.assertTrue(Solution().word_pattern("abb", "dog cat cat"))
def test_medium_valid_word_pattern(self):
self.assertTrue(
Solution().word_pattern(
"xxyyzzxx", "hola hola como como estas estas hola hola"
)
)
def test_large_valid_word_pattern(self):
self.assertTrue(
Solution().word_pattern(
"abcabccbacba", "how are you how are you you are how you are how"
)
)
def test_small_tricky_valid_word_pattern(self):
self.assertTrue(
Solution().word_pattern("abccbbaa", "me you love love you you me me")
)
def test_medium_tricky_valid_word_pattern(self):
self.assertTrue(
Solution().word_pattern("xyzzzzzyx", "how are you you you you you are how")
)
def test_small_invalid_word_pattern(self):
self.assertFalse(Solution().word_pattern("abb", "cat dog cat"))
def test_medium_invalid_word_pattern(self):
self.assertFalse(Solution().word_pattern("abcabca", "cat dog bull cat dog bull dog"))
| true | true |
f73a799ba3ebcc86d0f399b51583b392f194be6c | 9,441 | py | Python | docs_rst/conf-docset.py | anjlip/pymatgen | 62ecae1c7382a41861e3a5d9b9c8dd1207472409 | [
"MIT"
] | 2 | 2017-10-02T03:11:47.000Z | 2018-12-02T12:56:12.000Z | docs_rst/conf-docset.py | darnoceloc/pymatgen | 5cc42912a12a265a603df7e34c856561f76edc1f | [
"MIT"
] | 3 | 2017-07-18T01:13:41.000Z | 2019-04-29T18:17:30.000Z | docs_rst/conf-docset.py | darnoceloc/pymatgen | 5cc42912a12a265a603df7e34c856561f76edc1f | [
"MIT"
] | 2 | 2020-04-30T14:19:12.000Z | 2021-07-30T08:24:48.000Z | # -*- coding: utf-8 -*-
#
# pymatgen documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 15 00:13:52 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.dirname('..'))
sys.path.insert(0, os.path.dirname('../pymatgen'))
sys.path.insert(0, os.path.dirname('../..'))
from pymatgen import __version__, __author__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', "sphinx.ext.mathjax"]
exclude_patterns = ['../**/tests*']
exclude_dirnames = ['../**/tests*']
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymatgen'
copyright = u'2011, ' + __author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": True
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymatgendoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pymatgen.tex', u'pymatgen Documentation', __author__, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymatgen', u'pymatgen Documentation',
[__author__], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pymatgen', u'pymatgen Documentation',
__author__, 'pymatgen', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pymatgen'
epub_author = __author__
epub_publisher = u'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier, Dan Gunter, William Davidson Richards'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 31.787879 | 121 | 0.716132 |
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.dirname('..'))
sys.path.insert(0, os.path.dirname('../pymatgen'))
sys.path.insert(0, os.path.dirname('../..'))
from pymatgen import __version__, __author__
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', "sphinx.ext.mathjax"]
exclude_patterns = ['../**/tests*']
exclude_dirnames = ['../**/tests*']
autoclass_content = 'both'
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'pymatgen'
copyright = u'2011, ' + __author__
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": True
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymatgendoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pymatgen.tex', u'pymatgen Documentation', __author__, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymatgen', u'pymatgen Documentation',
[__author__], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pymatgen', u'pymatgen Documentation',
__author__, 'pymatgen', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pymatgen'
epub_author = __author__
epub_publisher = u'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier, Dan Gunter, William Davidson Richards'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| true | true |
f73a79c1205e1ae17b0e4c7f5e1b7c1ba58c9c4b | 12,000 | py | Python | train.py | hieuhoang/parSentExtract | 9e7aa4c0f0f93934d7f6986d655195bf5bd8e03d | [
"MIT"
] | 1 | 2018-12-14T18:49:54.000Z | 2018-12-14T18:49:54.000Z | train.py | hieuhoang/parSentExtract | 9e7aa4c0f0f93934d7f6986d655195bf5bd8e03d | [
"MIT"
] | null | null | null | train.py | hieuhoang/parSentExtract | 9e7aa4c0f0f93934d7f6986d655195bf5bd8e03d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from six.moves import xrange
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import numpy as np
import tensorflow as tf
import utils
from model import Config, BiRNN
tf.flags.DEFINE_string("source_train_path", "",
"Path to the file containing the source sentences to "
"train the model.")
tf.flags.DEFINE_string("target_train_path", "",
"Path to the file containing the target sentences to "
"train the model.")
tf.flags.DEFINE_string("source_valid_path", "",
"Path to the file containing the source sentences to "
"evaluate the model.")
tf.flags.DEFINE_string("target_valid_path", "",
"Path to the file containing the target sentences to "
"evaluate the model.")
tf.flags.DEFINE_string("checkpoint_dir", "./tflogs",
"Directory to save checkpoints and summaries of the model.")
tf.flags.DEFINE_integer("source_vocab_size", 100000,
"Number of the most frequent words to keep in the source "
"vocabulary.")
tf.flags.DEFINE_integer("target_vocab_size", 100000,
"Number of the most frequent words to keep in target "
"vocabulary.")
tf.flags.DEFINE_float("learning_rate", 2e-4,
"Learning rate.")
tf.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradient to this norm.")
tf.flags.DEFINE_float("decision_threshold", 0.99,
"Decision threshold to predict a positive label.")
tf.flags.DEFINE_integer("embedding_size", 300,
"Size of each word embedding.")
tf.flags.DEFINE_integer("state_size", 300,
"Size of the recurrent state in the BiRNN encoder.")
tf.flags.DEFINE_integer("hidden_size", 128,
"Size of the hidden layer in the feed-forward neural "
"network.")
tf.flags.DEFINE_integer("num_layers", 1,
"Number of layers in the BiRNN encoder.")
tf.flags.DEFINE_string("source_embeddings_path", None,
"Pretrained embeddings to initialize the source embeddings "
"matrix.")
tf.flags.DEFINE_string("target_embeddings_path", None,
"Pretrained embeddings to initialize the target embeddings "
"matrix.")
tf.flags.DEFINE_boolean("fix_pretrained", False,
"If true fix pretrained embeddings.")
tf.flags.DEFINE_boolean("use_lstm", False,
"If true use LSTM cells. Otherwise use GRU cells.")
tf.flags.DEFINE_boolean("use_mean_pooling", False,
"If true use mean pooling for final sentence representation.")
tf.flags.DEFINE_boolean("use_max_pooling", False,
"If true use max pooling for final sentence representation.")
tf.flags.DEFINE_integer("batch_size", 128,
"Batch size to use during training.")
tf.flags.DEFINE_integer("num_epochs", 15,
"Number of epochs to train the model.")
tf.flags.DEFINE_integer("num_negative", 5,
"Number of negative examples to sample per pair of "
"parallel sentences in training dataset.")
tf.flags.DEFINE_float("keep_prob_input", 0.8,
"Keep probability for dropout applied at the embedding layer.")
tf.flags.DEFINE_float("keep_prob_output", 0.7,
"Keep probability for dropout applied at the prediction layer.")
tf.flags.DEFINE_integer("steps_per_checkpoint", 200,
"Number of steps to save a model checkpoint.")
FLAGS = tf.flags.FLAGS
def eval_epoch(sess, model, data_iterator, summary_writer):
"""Evaluate model for one epoch."""
sess.run(tf.local_variables_initializer())
num_iter = int(np.ceil(data_iterator.size / FLAGS.batch_size))
epoch_loss = 0
for step in xrange(num_iter):
source, target, label = data_iterator.next_batch(FLAGS.batch_size)
source_len = utils.sequence_length(source)
target_len = utils.sequence_length(target)
feed_dict = {model.x_source: source,
model.x_target: target,
model.labels: label,
model.source_seq_length: source_len,
model.target_seq_length: target_len,
model.decision_threshold: FLAGS.decision_threshold}
loss_value, epoch_accuracy,\
epoch_precision, epoch_recall = sess.run([model.mean_loss,
model.accuracy[1],
model.precision[1],
model.recall[1]],
feed_dict=feed_dict)
epoch_loss += loss_value
if step % FLAGS.steps_per_checkpoint == 0:
summary = sess.run(model.summaries, feed_dict=feed_dict)
summary_writer.add_summary(summary, global_step=data_iterator.global_step)
epoch_loss /= step
epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)
print(" Testing: Loss = {:.6f}, Accuracy = {:.4f}, "
"Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}"
.format(epoch_loss, epoch_accuracy,
epoch_precision, epoch_recall, epoch_f1))
def main(_):
assert FLAGS.source_train_path, ("--source_train_path is required.")
assert FLAGS.target_train_path, ("--target_train_path is required.")
assert FLAGS.source_valid_path, ("--source_valid_path is required.")
assert FLAGS.target_valid_path, ("--target_valid_path is required.")
# Create vocabularies.
source_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),
"vocabulary.source")
target_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),
"vocabulary.target")
utils.create_vocabulary(source_vocab_path, FLAGS.source_train_path, FLAGS.source_vocab_size)
utils.create_vocabulary(target_vocab_path, FLAGS.target_train_path, FLAGS.target_vocab_size)
# Read vocabularies.
source_vocab, rev_source_vocab = utils.initialize_vocabulary(source_vocab_path)
#print("source_vocab", source_vocab)
#print("rev_source_vocab", rev_source_vocab)
target_vocab, rev_target_vocab = utils.initialize_vocabulary(target_vocab_path)
#print("target_vocab", target_vocab)
# Read parallel sentences.
parallel_data = utils.read_data(FLAGS.source_train_path, FLAGS.target_train_path,
source_vocab, target_vocab)
print("parallel_data", type(parallel_data), len(parallel_data))
print("parallel_data[0]", type(parallel_data[0]), len(parallel_data[0]), parallel_data[0])
# Read validation data set.
if FLAGS.source_valid_path and FLAGS.target_valid_path:
valid_data = utils.read_data(FLAGS.source_valid_path, FLAGS.target_valid_path,
source_vocab, target_vocab)
# Initialize BiRNN.
config = Config(len(source_vocab),
len(target_vocab),
FLAGS.embedding_size,
FLAGS.state_size,
FLAGS.hidden_size,
FLAGS.num_layers,
FLAGS.learning_rate,
FLAGS.max_gradient_norm,
FLAGS.use_lstm,
FLAGS.use_mean_pooling,
FLAGS.use_max_pooling,
FLAGS.source_embeddings_path,
FLAGS.target_embeddings_path,
FLAGS.fix_pretrained)
model = BiRNN(config)
# Build graph.
model.build_graph()
# Train model.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
train_iterator = utils.TrainingIterator(parallel_data, FLAGS.num_negative)
train_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, "train"), sess.graph)
if FLAGS.source_valid_path and FLAGS.target_valid_path:
valid_iterator = utils.EvalIterator(valid_data)
valid_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, "valid"), sess.graph)
epoch_loss = 0
epoch_completed = 0
batch_completed = 0
num_iter = int(np.ceil(train_iterator.size / FLAGS.batch_size * FLAGS.num_epochs))
start_time = time.time()
print("Training model on {} sentence pairs per epoch:".
format(train_iterator.size, valid_iterator.size))
for step in xrange(num_iter):
source, target, label = train_iterator.next_batch(FLAGS.batch_size)
source_len = utils.sequence_length(source)
target_len = utils.sequence_length(target)
feed_dict = {model.x_source: source,
model.x_target: target,
model.labels: label,
model.source_seq_length: source_len,
model.target_seq_length: target_len,
model.input_dropout: FLAGS.keep_prob_input,
model.output_dropout: FLAGS.keep_prob_output,
model.decision_threshold: FLAGS.decision_threshold}
_, loss_value, epoch_accuracy,\
epoch_precision, epoch_recall = sess.run([model.train_op,
model.mean_loss,
model.accuracy[1],
model.precision[1],
model.recall[1]],
feed_dict=feed_dict)
epoch_loss += loss_value
batch_completed += 1
# Write the model's training summaries.
if step % FLAGS.steps_per_checkpoint == 0:
summary = sess.run(model.summaries, feed_dict=feed_dict)
train_summary_writer.add_summary(summary, global_step=step)
# End of current epoch.
if train_iterator.epoch_completed > epoch_completed:
epoch_time = time.time() - start_time
epoch_loss /= batch_completed
epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)
epoch_completed += 1
print("Epoch {} in {:.0f} sec\n"
" Training: Loss = {:.6f}, Accuracy = {:.4f}, "
"Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}"
.format(epoch_completed, epoch_time,
epoch_loss, epoch_accuracy,
epoch_precision, epoch_recall, epoch_f1))
# Save a model checkpoint.
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, "model.ckpt")
model.saver.save(sess, checkpoint_path, global_step=step)
# Evaluate model on the validation set.
if FLAGS.source_valid_path and FLAGS.target_valid_path:
eval_epoch(sess, model, valid_iterator, valid_summary_writer)
# Initialize local variables for new epoch.
batch_completed = 0
epoch_loss = 0
sess.run(tf.local_variables_initializer())
start_time = time.time()
print("Training done with {} steps.".format(num_iter))
train_summary_writer.close()
valid_summary_writer.close()
if __name__ == "__main__":
tf.app.run()
| 43.3213 | 113 | 0.593 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from six.moves import xrange
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import numpy as np
import tensorflow as tf
import utils
from model import Config, BiRNN
tf.flags.DEFINE_string("source_train_path", "",
"Path to the file containing the source sentences to "
"train the model.")
tf.flags.DEFINE_string("target_train_path", "",
"Path to the file containing the target sentences to "
"train the model.")
tf.flags.DEFINE_string("source_valid_path", "",
"Path to the file containing the source sentences to "
"evaluate the model.")
tf.flags.DEFINE_string("target_valid_path", "",
"Path to the file containing the target sentences to "
"evaluate the model.")
tf.flags.DEFINE_string("checkpoint_dir", "./tflogs",
"Directory to save checkpoints and summaries of the model.")
tf.flags.DEFINE_integer("source_vocab_size", 100000,
"Number of the most frequent words to keep in the source "
"vocabulary.")
tf.flags.DEFINE_integer("target_vocab_size", 100000,
"Number of the most frequent words to keep in target "
"vocabulary.")
tf.flags.DEFINE_float("learning_rate", 2e-4,
"Learning rate.")
tf.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradient to this norm.")
tf.flags.DEFINE_float("decision_threshold", 0.99,
"Decision threshold to predict a positive label.")
tf.flags.DEFINE_integer("embedding_size", 300,
"Size of each word embedding.")
tf.flags.DEFINE_integer("state_size", 300,
"Size of the recurrent state in the BiRNN encoder.")
tf.flags.DEFINE_integer("hidden_size", 128,
"Size of the hidden layer in the feed-forward neural "
"network.")
tf.flags.DEFINE_integer("num_layers", 1,
"Number of layers in the BiRNN encoder.")
tf.flags.DEFINE_string("source_embeddings_path", None,
"Pretrained embeddings to initialize the source embeddings "
"matrix.")
tf.flags.DEFINE_string("target_embeddings_path", None,
"Pretrained embeddings to initialize the target embeddings "
"matrix.")
tf.flags.DEFINE_boolean("fix_pretrained", False,
"If true fix pretrained embeddings.")
tf.flags.DEFINE_boolean("use_lstm", False,
"If true use LSTM cells. Otherwise use GRU cells.")
tf.flags.DEFINE_boolean("use_mean_pooling", False,
"If true use mean pooling for final sentence representation.")
tf.flags.DEFINE_boolean("use_max_pooling", False,
"If true use max pooling for final sentence representation.")
tf.flags.DEFINE_integer("batch_size", 128,
"Batch size to use during training.")
tf.flags.DEFINE_integer("num_epochs", 15,
"Number of epochs to train the model.")
tf.flags.DEFINE_integer("num_negative", 5,
"Number of negative examples to sample per pair of "
"parallel sentences in training dataset.")
tf.flags.DEFINE_float("keep_prob_input", 0.8,
"Keep probability for dropout applied at the embedding layer.")
tf.flags.DEFINE_float("keep_prob_output", 0.7,
"Keep probability for dropout applied at the prediction layer.")
tf.flags.DEFINE_integer("steps_per_checkpoint", 200,
"Number of steps to save a model checkpoint.")
FLAGS = tf.flags.FLAGS
def eval_epoch(sess, model, data_iterator, summary_writer):
sess.run(tf.local_variables_initializer())
num_iter = int(np.ceil(data_iterator.size / FLAGS.batch_size))
epoch_loss = 0
for step in xrange(num_iter):
source, target, label = data_iterator.next_batch(FLAGS.batch_size)
source_len = utils.sequence_length(source)
target_len = utils.sequence_length(target)
feed_dict = {model.x_source: source,
model.x_target: target,
model.labels: label,
model.source_seq_length: source_len,
model.target_seq_length: target_len,
model.decision_threshold: FLAGS.decision_threshold}
loss_value, epoch_accuracy,\
epoch_precision, epoch_recall = sess.run([model.mean_loss,
model.accuracy[1],
model.precision[1],
model.recall[1]],
feed_dict=feed_dict)
epoch_loss += loss_value
if step % FLAGS.steps_per_checkpoint == 0:
summary = sess.run(model.summaries, feed_dict=feed_dict)
summary_writer.add_summary(summary, global_step=data_iterator.global_step)
epoch_loss /= step
epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)
print(" Testing: Loss = {:.6f}, Accuracy = {:.4f}, "
"Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}"
.format(epoch_loss, epoch_accuracy,
epoch_precision, epoch_recall, epoch_f1))
def main(_):
assert FLAGS.source_train_path, ("--source_train_path is required.")
assert FLAGS.target_train_path, ("--target_train_path is required.")
assert FLAGS.source_valid_path, ("--source_valid_path is required.")
assert FLAGS.target_valid_path, ("--target_valid_path is required.")
source_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),
"vocabulary.source")
target_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),
"vocabulary.target")
utils.create_vocabulary(source_vocab_path, FLAGS.source_train_path, FLAGS.source_vocab_size)
utils.create_vocabulary(target_vocab_path, FLAGS.target_train_path, FLAGS.target_vocab_size)
source_vocab, rev_source_vocab = utils.initialize_vocabulary(source_vocab_path)
target_vocab, rev_target_vocab = utils.initialize_vocabulary(target_vocab_path)
parallel_data = utils.read_data(FLAGS.source_train_path, FLAGS.target_train_path,
source_vocab, target_vocab)
print("parallel_data", type(parallel_data), len(parallel_data))
print("parallel_data[0]", type(parallel_data[0]), len(parallel_data[0]), parallel_data[0])
if FLAGS.source_valid_path and FLAGS.target_valid_path:
valid_data = utils.read_data(FLAGS.source_valid_path, FLAGS.target_valid_path,
source_vocab, target_vocab)
config = Config(len(source_vocab),
len(target_vocab),
FLAGS.embedding_size,
FLAGS.state_size,
FLAGS.hidden_size,
FLAGS.num_layers,
FLAGS.learning_rate,
FLAGS.max_gradient_norm,
FLAGS.use_lstm,
FLAGS.use_mean_pooling,
FLAGS.use_max_pooling,
FLAGS.source_embeddings_path,
FLAGS.target_embeddings_path,
FLAGS.fix_pretrained)
model = BiRNN(config)
model.build_graph()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
train_iterator = utils.TrainingIterator(parallel_data, FLAGS.num_negative)
train_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, "train"), sess.graph)
if FLAGS.source_valid_path and FLAGS.target_valid_path:
valid_iterator = utils.EvalIterator(valid_data)
valid_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, "valid"), sess.graph)
epoch_loss = 0
epoch_completed = 0
batch_completed = 0
num_iter = int(np.ceil(train_iterator.size / FLAGS.batch_size * FLAGS.num_epochs))
start_time = time.time()
print("Training model on {} sentence pairs per epoch:".
format(train_iterator.size, valid_iterator.size))
for step in xrange(num_iter):
source, target, label = train_iterator.next_batch(FLAGS.batch_size)
source_len = utils.sequence_length(source)
target_len = utils.sequence_length(target)
feed_dict = {model.x_source: source,
model.x_target: target,
model.labels: label,
model.source_seq_length: source_len,
model.target_seq_length: target_len,
model.input_dropout: FLAGS.keep_prob_input,
model.output_dropout: FLAGS.keep_prob_output,
model.decision_threshold: FLAGS.decision_threshold}
_, loss_value, epoch_accuracy,\
epoch_precision, epoch_recall = sess.run([model.train_op,
model.mean_loss,
model.accuracy[1],
model.precision[1],
model.recall[1]],
feed_dict=feed_dict)
epoch_loss += loss_value
batch_completed += 1
if step % FLAGS.steps_per_checkpoint == 0:
summary = sess.run(model.summaries, feed_dict=feed_dict)
train_summary_writer.add_summary(summary, global_step=step)
# End of current epoch.
if train_iterator.epoch_completed > epoch_completed:
epoch_time = time.time() - start_time
epoch_loss /= batch_completed
epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)
epoch_completed += 1
print("Epoch {} in {:.0f} sec\n"
" Training: Loss = {:.6f}, Accuracy = {:.4f}, "
"Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}"
.format(epoch_completed, epoch_time,
epoch_loss, epoch_accuracy,
epoch_precision, epoch_recall, epoch_f1))
# Save a model checkpoint.
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, "model.ckpt")
model.saver.save(sess, checkpoint_path, global_step=step)
# Evaluate model on the validation set.
if FLAGS.source_valid_path and FLAGS.target_valid_path:
eval_epoch(sess, model, valid_iterator, valid_summary_writer)
# Initialize local variables for new epoch.
batch_completed = 0
epoch_loss = 0
sess.run(tf.local_variables_initializer())
start_time = time.time()
print("Training done with {} steps.".format(num_iter))
train_summary_writer.close()
valid_summary_writer.close()
if __name__ == "__main__":
tf.app.run()
| true | true |
f73a7a4383cb16b6d9ea8d238293baed3c9cfc96 | 13,559 | py | Python | app/backend/src/couchers/tasks.py | foormea/couchers | 4015769e5cdfbb9b9e10460fd979cccc5f203b88 | [
"MIT"
] | 226 | 2020-12-01T23:46:57.000Z | 2022-03-30T20:48:48.000Z | app/backend/src/couchers/tasks.py | foormea/couchers | 4015769e5cdfbb9b9e10460fd979cccc5f203b88 | [
"MIT"
] | 1,713 | 2020-10-06T14:20:02.000Z | 2022-03-31T17:22:49.000Z | app/backend/src/couchers/tasks.py | foormea/couchers | 4015769e5cdfbb9b9e10460fd979cccc5f203b88 | [
"MIT"
] | 80 | 2020-11-19T00:12:55.000Z | 2022-03-27T19:21:26.000Z | import logging
from datetime import timedelta
from sqlalchemy.sql import func, select
from couchers import email, urls
from couchers.config import config
from couchers.constants import EMAIL_TOKEN_VALIDITY
from couchers.crypto import urlsafe_secure_token
from couchers.db import session_scope
from couchers.models import ClusterRole, ClusterSubscription, LoginToken, Node, PasswordResetToken, User
from couchers.sql import couchers_select as select
from couchers.utils import now
logger = logging.getLogger(__name__)
def send_signup_email(flow):
logger.info(f"Sending signup email to {flow.email=}:")
# whether we've sent an email at all yet
email_sent_before = flow.email_sent
if flow.email_verified:
# we just send a link to continue, not a verification link
signup_link = urls.signup_link(token=flow.flow_token)
elif flow.email_token and flow.token_is_valid:
# if the verification email was sent and still is not expired, just resend the verification email
signup_link = urls.signup_link(token=flow.email_token)
else:
# otherwise send a fresh email with new token
token = urlsafe_secure_token()
flow.email_verified = False
flow.email_token = token
flow.email_token_expiry = now() + EMAIL_TOKEN_VALIDITY
signup_link = urls.signup_link(token=flow.email_token)
flow.email_sent = True
logger.info(f"Link is: {signup_link}")
template = "signup_verify" if not email_sent_before else "signup_continue"
email.enqueue_email_from_template(flow.email, template, template_args={"flow": flow, "signup_link": signup_link})
def send_login_email(session, user):
login_token = LoginToken(token=urlsafe_secure_token(), user=user, expiry=now() + timedelta(hours=2))
session.add(login_token)
logger.info(f"Sending login email to {user=}:")
logger.info(f"Email for {user.username=} to {user.email=}")
logger.info(f"Token: {login_token=} ({login_token.created=}")
login_link = urls.login_link(login_token=login_token.token)
logger.info(f"Link is: {login_link}")
email.enqueue_email_from_template(user.email, "login", template_args={"user": user, "login_link": login_link})
return login_token
def send_api_key_email(session, user, token, expiry):
logger.info(f"Sending API key email to {user=}:")
email.enqueue_email_from_template(
user.email, "api_key", template_args={"user": user, "token": token, "expiry": expiry}
)
def send_password_reset_email(session, user):
password_reset_token = PasswordResetToken(
token=urlsafe_secure_token(), user=user, expiry=now() + timedelta(hours=2)
)
session.add(password_reset_token)
logger.info(f"Sending password reset email to {user=}:")
password_reset_link = urls.password_reset_link(password_reset_token=password_reset_token.token)
logger.info(f"Link is: {password_reset_link}")
email.enqueue_email_from_template(
user.email, "password_reset", template_args={"user": user, "password_reset_link": password_reset_link}
)
return password_reset_token
def send_content_report_email(content_report):
target_email = config["REPORTS_EMAIL_RECIPIENT"]
logger.info(f"Sending content report email to {target_email=}")
email.enqueue_email_from_template(
target_email,
"content_report",
template_args={
"report": content_report,
"author_user_user_link": urls.user_link(content_report.author_user.username),
"reporting_user_user_link": urls.user_link(content_report.reporting_user.username),
},
)
def send_new_host_request_email(host_request):
logger.info(f"Sending host request email to {host_request.host=}:")
logger.info(f"Host request sent by {host_request.surfer}")
logger.info(f"Email for {host_request.host.username=} sent to {host_request.host.email=}")
email.enqueue_email_from_template(
host_request.host.email,
"host_request",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_host(),
},
)
def send_host_request_accepted_email_to_guest(host_request):
logger.info(f"Sending host request accepted email to guest: {host_request.surfer=}:")
logger.info(f"Email for {host_request.surfer.username=} sent to {host_request.surfer.email=}")
email.enqueue_email_from_template(
host_request.surfer.email,
"host_request_accepted_guest",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_guest(),
},
)
def send_host_request_rejected_email_to_guest(host_request):
logger.info(f"Sending host request rejected email to guest: {host_request.surfer=}:")
logger.info(f"Email for {host_request.surfer.username=} sent to {host_request.surfer.email=}")
email.enqueue_email_from_template(
host_request.surfer.email,
"host_request_rejected_guest",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_guest(),
},
)
def send_host_request_confirmed_email_to_host(host_request):
logger.info(f"Sending host request confirmed email to host: {host_request.host=}:")
logger.info(f"Email for {host_request.host.username=} sent to {host_request.host.email=}")
email.enqueue_email_from_template(
host_request.host.email,
"host_request_confirmed_host",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_host(),
},
)
def send_host_request_cancelled_email_to_host(host_request):
logger.info(f"Sending host request cancelled email to host: {host_request.host=}:")
logger.info(f"Email for {host_request.host.username=} sent to {host_request.host.email=}")
email.enqueue_email_from_template(
host_request.host.email,
"host_request_cancelled_host",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_host(),
},
)
def send_friend_request_email(friend_relationship):
friend_requests_link = urls.friend_requests_link()
logger.info(f"Sending friend request email to {friend_relationship.to_user=}:")
logger.info(f"Email for {friend_relationship.to_user.username=} sent to {friend_relationship.to_user.email=}")
logger.info(f"Friend request sent by {friend_relationship.from_user.username=}")
email.enqueue_email_from_template(
friend_relationship.to_user.email,
"friend_request",
template_args={
"friend_relationship": friend_relationship,
"friend_requests_link": friend_requests_link,
},
)
def send_friend_request_accepted_email(friend_relationship):
logger.info(f"Sending friend request acceptance email to {friend_relationship.from_user=}:")
logger.info(f"Email for {friend_relationship.from_user.username=} sent to {friend_relationship.from_user.email=}")
email.enqueue_email_from_template(
friend_relationship.from_user.email,
"friend_request_accepted",
template_args={
"friend_relationship": friend_relationship,
"to_user_user_link": urls.user_link(friend_relationship.to_user.username),
},
)
def send_host_reference_email(reference, both_written):
"""
both_written iff both the surfer and hoster wrote a reference
"""
assert reference.host_request_id
logger.info(f"Sending host reference email to {reference.to_user=} for {reference.id=}")
email.enqueue_email_from_template(
reference.to_user.email,
"host_reference",
template_args={
"reference": reference,
# if this reference was written by the surfer, then the recipient hosted
"surfed": reference.host_request.surfer_user_id != reference.from_user_id,
"both_written": both_written,
},
)
def send_friend_reference_email(reference):
assert not reference.host_request_id
logger.info(f"Sending friend reference email to {reference.to_user=} for {reference.id=}")
email.enqueue_email_from_template(
reference.to_user.email,
"friend_reference",
template_args={
"reference": reference,
},
)
def send_reference_reminder_email(user, other_user, host_request, surfed, time_left_text):
logger.info(f"Sending host reference email to {user=}, they have {time_left_text} left to write a ref")
email.enqueue_email_from_template(
user.email,
"reference_reminder",
template_args={
"user": user,
"other_user": other_user,
"host_request": host_request,
"leave_reference_link": urls.leave_reference_link(
"surfed" if surfed else "hosted", other_user.id, host_request.conversation_id
),
"surfed": surfed,
"time_left_text": time_left_text,
},
)
def send_password_changed_email(user):
"""
Send the user an email saying their password has been changed.
"""
logger.info(f"Sending password changed (notification) email to {user=}")
email.enqueue_email_from_template(user.email, "password_changed", template_args={"user": user})
def send_email_changed_notification_email(user):
"""
Send an email to user's original address notifying that it has been changed
"""
logger.info(
f"Sending email changed (notification) email to {user=} (old email: {user.email=}, new email: {user.new_email=})"
)
email.enqueue_email_from_template(user.email, "email_changed_notification", template_args={"user": user})
def send_email_changed_confirmation_to_old_email(user):
"""
Send an email to user's original email address requesting confirmation of email change
"""
logger.info(
f"Sending email changed (confirmation) email to {user=}'s old email address, (old email: {user.email}, new email: {user.new_email=})"
)
confirmation_link = urls.change_email_link(confirmation_token=user.old_email_token)
email.enqueue_email_from_template(
user.email,
"email_changed_confirmation_old_email",
template_args={"user": user, "confirmation_link": confirmation_link},
)
def send_email_changed_confirmation_to_new_email(user):
"""
Send an email to user's new email address requesting confirmation of email change
"""
logger.info(
f"Sending email changed (confirmation) email to {user=}'s new email address, (old email: {user.email}, new email: {user.new_email=})"
)
confirmation_link = urls.change_email_link(confirmation_token=user.new_email_token)
email.enqueue_email_from_template(
user.new_email,
"email_changed_confirmation_new_email",
template_args={"user": user, "confirmation_link": confirmation_link},
)
def send_onboarding_email(user, email_number):
email.enqueue_email_from_template(
user.email,
f"onboarding{email_number}",
template_args={
"user": user,
"app_link": urls.app_link(),
"profile_link": urls.profile_link(),
"edit_profile_link": urls.edit_profile_link(),
},
)
def send_donation_email(user, amount, receipt_url):
email.enqueue_email_from_template(
user.email,
"donation_received",
template_args={"user": user, "amount": amount, "receipt_url": receipt_url},
)
def maybe_send_contributor_form_email(form):
target_email = config["CONTRIBUTOR_FORM_EMAIL_RECIPIENT"]
if form.should_notify:
email.enqueue_email_from_template(
target_email,
"contributor_form",
template_args={"form": form, "user_link": urls.user_link(form.user.username)},
)
def enforce_community_memberships():
"""
Go through all communities and make sure every user in the polygon is also a member
"""
with session_scope() as session:
for node in session.execute(select(Node)).scalars().all():
existing_users = select(ClusterSubscription.user_id).where(
ClusterSubscription.cluster == node.official_cluster
)
users_needing_adding = (
session.execute(
select(User)
.where(User.is_visible)
.where(func.ST_Contains(node.geom, User.geom))
.where(~User.id.in_(existing_users))
)
.scalars()
.all()
)
for user in users_needing_adding:
node.official_cluster.cluster_subscriptions.append(
ClusterSubscription(
user=user,
role=ClusterRole.member,
)
)
session.commit()
def enforce_community_memberships_for_user(session, user):
"""
Adds a given user to all the communities they belong in based on their location.
"""
nodes = session.execute(select(Node).where(func.ST_Contains(Node.geom, user.geom))).scalars().all()
for node in nodes:
node.official_cluster.cluster_subscriptions.append(
ClusterSubscription(
user=user,
role=ClusterRole.member,
)
)
session.commit()
| 36.157333 | 141 | 0.685965 | import logging
from datetime import timedelta
from sqlalchemy.sql import func, select
from couchers import email, urls
from couchers.config import config
from couchers.constants import EMAIL_TOKEN_VALIDITY
from couchers.crypto import urlsafe_secure_token
from couchers.db import session_scope
from couchers.models import ClusterRole, ClusterSubscription, LoginToken, Node, PasswordResetToken, User
from couchers.sql import couchers_select as select
from couchers.utils import now
logger = logging.getLogger(__name__)
def send_signup_email(flow):
logger.info(f"Sending signup email to {flow.email=}:")
email_sent_before = flow.email_sent
if flow.email_verified:
# we just send a link to continue, not a verification link
signup_link = urls.signup_link(token=flow.flow_token)
elif flow.email_token and flow.token_is_valid:
# if the verification email was sent and still is not expired, just resend the verification email
signup_link = urls.signup_link(token=flow.email_token)
else:
# otherwise send a fresh email with new token
token = urlsafe_secure_token()
flow.email_verified = False
flow.email_token = token
flow.email_token_expiry = now() + EMAIL_TOKEN_VALIDITY
signup_link = urls.signup_link(token=flow.email_token)
flow.email_sent = True
logger.info(f"Link is: {signup_link}")
template = "signup_verify" if not email_sent_before else "signup_continue"
email.enqueue_email_from_template(flow.email, template, template_args={"flow": flow, "signup_link": signup_link})
def send_login_email(session, user):
login_token = LoginToken(token=urlsafe_secure_token(), user=user, expiry=now() + timedelta(hours=2))
session.add(login_token)
logger.info(f"Sending login email to {user=}:")
logger.info(f"Email for {user.username=} to {user.email=}")
logger.info(f"Token: {login_token=} ({login_token.created=}")
login_link = urls.login_link(login_token=login_token.token)
logger.info(f"Link is: {login_link}")
email.enqueue_email_from_template(user.email, "login", template_args={"user": user, "login_link": login_link})
return login_token
def send_api_key_email(session, user, token, expiry):
logger.info(f"Sending API key email to {user=}:")
email.enqueue_email_from_template(
user.email, "api_key", template_args={"user": user, "token": token, "expiry": expiry}
)
def send_password_reset_email(session, user):
password_reset_token = PasswordResetToken(
token=urlsafe_secure_token(), user=user, expiry=now() + timedelta(hours=2)
)
session.add(password_reset_token)
logger.info(f"Sending password reset email to {user=}:")
password_reset_link = urls.password_reset_link(password_reset_token=password_reset_token.token)
logger.info(f"Link is: {password_reset_link}")
email.enqueue_email_from_template(
user.email, "password_reset", template_args={"user": user, "password_reset_link": password_reset_link}
)
return password_reset_token
def send_content_report_email(content_report):
target_email = config["REPORTS_EMAIL_RECIPIENT"]
logger.info(f"Sending content report email to {target_email=}")
email.enqueue_email_from_template(
target_email,
"content_report",
template_args={
"report": content_report,
"author_user_user_link": urls.user_link(content_report.author_user.username),
"reporting_user_user_link": urls.user_link(content_report.reporting_user.username),
},
)
def send_new_host_request_email(host_request):
logger.info(f"Sending host request email to {host_request.host=}:")
logger.info(f"Host request sent by {host_request.surfer}")
logger.info(f"Email for {host_request.host.username=} sent to {host_request.host.email=}")
email.enqueue_email_from_template(
host_request.host.email,
"host_request",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_host(),
},
)
def send_host_request_accepted_email_to_guest(host_request):
logger.info(f"Sending host request accepted email to guest: {host_request.surfer=}:")
logger.info(f"Email for {host_request.surfer.username=} sent to {host_request.surfer.email=}")
email.enqueue_email_from_template(
host_request.surfer.email,
"host_request_accepted_guest",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_guest(),
},
)
def send_host_request_rejected_email_to_guest(host_request):
logger.info(f"Sending host request rejected email to guest: {host_request.surfer=}:")
logger.info(f"Email for {host_request.surfer.username=} sent to {host_request.surfer.email=}")
email.enqueue_email_from_template(
host_request.surfer.email,
"host_request_rejected_guest",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_guest(),
},
)
def send_host_request_confirmed_email_to_host(host_request):
logger.info(f"Sending host request confirmed email to host: {host_request.host=}:")
logger.info(f"Email for {host_request.host.username=} sent to {host_request.host.email=}")
email.enqueue_email_from_template(
host_request.host.email,
"host_request_confirmed_host",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_host(),
},
)
def send_host_request_cancelled_email_to_host(host_request):
logger.info(f"Sending host request cancelled email to host: {host_request.host=}:")
logger.info(f"Email for {host_request.host.username=} sent to {host_request.host.email=}")
email.enqueue_email_from_template(
host_request.host.email,
"host_request_cancelled_host",
template_args={
"host_request": host_request,
"host_request_link": urls.host_request_link_host(),
},
)
def send_friend_request_email(friend_relationship):
friend_requests_link = urls.friend_requests_link()
logger.info(f"Sending friend request email to {friend_relationship.to_user=}:")
logger.info(f"Email for {friend_relationship.to_user.username=} sent to {friend_relationship.to_user.email=}")
logger.info(f"Friend request sent by {friend_relationship.from_user.username=}")
email.enqueue_email_from_template(
friend_relationship.to_user.email,
"friend_request",
template_args={
"friend_relationship": friend_relationship,
"friend_requests_link": friend_requests_link,
},
)
def send_friend_request_accepted_email(friend_relationship):
logger.info(f"Sending friend request acceptance email to {friend_relationship.from_user=}:")
logger.info(f"Email for {friend_relationship.from_user.username=} sent to {friend_relationship.from_user.email=}")
email.enqueue_email_from_template(
friend_relationship.from_user.email,
"friend_request_accepted",
template_args={
"friend_relationship": friend_relationship,
"to_user_user_link": urls.user_link(friend_relationship.to_user.username),
},
)
def send_host_reference_email(reference, both_written):
assert reference.host_request_id
logger.info(f"Sending host reference email to {reference.to_user=} for {reference.id=}")
email.enqueue_email_from_template(
reference.to_user.email,
"host_reference",
template_args={
"reference": reference,
# if this reference was written by the surfer, then the recipient hosted
"surfed": reference.host_request.surfer_user_id != reference.from_user_id,
"both_written": both_written,
},
)
def send_friend_reference_email(reference):
assert not reference.host_request_id
logger.info(f"Sending friend reference email to {reference.to_user=} for {reference.id=}")
email.enqueue_email_from_template(
reference.to_user.email,
"friend_reference",
template_args={
"reference": reference,
},
)
def send_reference_reminder_email(user, other_user, host_request, surfed, time_left_text):
logger.info(f"Sending host reference email to {user=}, they have {time_left_text} left to write a ref")
email.enqueue_email_from_template(
user.email,
"reference_reminder",
template_args={
"user": user,
"other_user": other_user,
"host_request": host_request,
"leave_reference_link": urls.leave_reference_link(
"surfed" if surfed else "hosted", other_user.id, host_request.conversation_id
),
"surfed": surfed,
"time_left_text": time_left_text,
},
)
def send_password_changed_email(user):
logger.info(f"Sending password changed (notification) email to {user=}")
email.enqueue_email_from_template(user.email, "password_changed", template_args={"user": user})
def send_email_changed_notification_email(user):
logger.info(
f"Sending email changed (notification) email to {user=} (old email: {user.email=}, new email: {user.new_email=})"
)
email.enqueue_email_from_template(user.email, "email_changed_notification", template_args={"user": user})
def send_email_changed_confirmation_to_old_email(user):
logger.info(
f"Sending email changed (confirmation) email to {user=}'s old email address, (old email: {user.email}, new email: {user.new_email=})"
)
confirmation_link = urls.change_email_link(confirmation_token=user.old_email_token)
email.enqueue_email_from_template(
user.email,
"email_changed_confirmation_old_email",
template_args={"user": user, "confirmation_link": confirmation_link},
)
def send_email_changed_confirmation_to_new_email(user):
logger.info(
f"Sending email changed (confirmation) email to {user=}'s new email address, (old email: {user.email}, new email: {user.new_email=})"
)
confirmation_link = urls.change_email_link(confirmation_token=user.new_email_token)
email.enqueue_email_from_template(
user.new_email,
"email_changed_confirmation_new_email",
template_args={"user": user, "confirmation_link": confirmation_link},
)
def send_onboarding_email(user, email_number):
email.enqueue_email_from_template(
user.email,
f"onboarding{email_number}",
template_args={
"user": user,
"app_link": urls.app_link(),
"profile_link": urls.profile_link(),
"edit_profile_link": urls.edit_profile_link(),
},
)
def send_donation_email(user, amount, receipt_url):
email.enqueue_email_from_template(
user.email,
"donation_received",
template_args={"user": user, "amount": amount, "receipt_url": receipt_url},
)
def maybe_send_contributor_form_email(form):
target_email = config["CONTRIBUTOR_FORM_EMAIL_RECIPIENT"]
if form.should_notify:
email.enqueue_email_from_template(
target_email,
"contributor_form",
template_args={"form": form, "user_link": urls.user_link(form.user.username)},
)
def enforce_community_memberships():
with session_scope() as session:
for node in session.execute(select(Node)).scalars().all():
existing_users = select(ClusterSubscription.user_id).where(
ClusterSubscription.cluster == node.official_cluster
)
users_needing_adding = (
session.execute(
select(User)
.where(User.is_visible)
.where(func.ST_Contains(node.geom, User.geom))
.where(~User.id.in_(existing_users))
)
.scalars()
.all()
)
for user in users_needing_adding:
node.official_cluster.cluster_subscriptions.append(
ClusterSubscription(
user=user,
role=ClusterRole.member,
)
)
session.commit()
def enforce_community_memberships_for_user(session, user):
nodes = session.execute(select(Node).where(func.ST_Contains(Node.geom, user.geom))).scalars().all()
for node in nodes:
node.official_cluster.cluster_subscriptions.append(
ClusterSubscription(
user=user,
role=ClusterRole.member,
)
)
session.commit()
| true | true |
f73a7aae1e6bdd5f3485cb8a370873294816ad6c | 1,642 | py | Python | ivi/tektronix/tektronixMDO3054.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 161 | 2015-01-23T17:43:01.000Z | 2022-03-29T14:42:42.000Z | ivi/tektronix/tektronixMDO3054.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 45 | 2015-01-15T13:35:04.000Z | 2021-06-03T01:58:55.000Z | ivi/tektronix/tektronixMDO3054.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 87 | 2015-01-31T10:55:23.000Z | 2022-03-17T08:18:47.000Z | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMDO3000 import *
class tektronixMDO3054(tektronixMDO3000):
"Tektronix MDO3054 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MDO3054')
super(tektronixMDO3054, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._bandwidth = 500e6
# AFG option
self._output_count = 1
self._init_channels()
self._init_outputs()
| 35.695652 | 77 | 0.760049 |
from .tektronixMDO3000 import *
class tektronixMDO3054(tektronixMDO3000):
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MDO3054')
super(tektronixMDO3054, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._bandwidth = 500e6
self._output_count = 1
self._init_channels()
self._init_outputs()
| true | true |
f73a7c3128e5dd708a34c79443e2be9357338263 | 452 | py | Python | test/solution_tests/SUM/test_sum.py | DPNT-Sourcecode/CHK-ncxx01 | ff46ea745f069fed7bd366609ea1e0481e854d3c | [
"Apache-2.0"
] | null | null | null | test/solution_tests/SUM/test_sum.py | DPNT-Sourcecode/CHK-ncxx01 | ff46ea745f069fed7bd366609ea1e0481e854d3c | [
"Apache-2.0"
] | null | null | null | test/solution_tests/SUM/test_sum.py | DPNT-Sourcecode/CHK-ncxx01 | ff46ea745f069fed7bd366609ea1e0481e854d3c | [
"Apache-2.0"
] | null | null | null | from solutions.SUM import sum_solution
class TestSum():
"""Class to test sum_solution"""
def test_sum(self):
"""Happy path test"""
assert sum_solution.compute(1, 2) == 3
def test_check_bounds(self):
"""Raise value error if integer passed in is out of limit"""
try:
sol = sum_solution.compute(-1, 2)
except ValueError as e:
assert str(e) == "Passed in value out of bounds"
| 26.588235 | 68 | 0.603982 | from solutions.SUM import sum_solution
class TestSum():
def test_sum(self):
assert sum_solution.compute(1, 2) == 3
def test_check_bounds(self):
try:
sol = sum_solution.compute(-1, 2)
except ValueError as e:
assert str(e) == "Passed in value out of bounds"
| true | true |
f73a7c4ef45e40c07dffff95faa42a3018198639 | 3,740 | py | Python | WitQuake/settings.py | abhishek2x/WitQuake-Namecheap | 834e5003b9d231277e8cc521427f4dec58e992b5 | [
"MIT"
] | null | null | null | WitQuake/settings.py | abhishek2x/WitQuake-Namecheap | 834e5003b9d231277e8cc521427f4dec58e992b5 | [
"MIT"
] | 4 | 2021-09-29T06:57:37.000Z | 2022-03-12T00:42:17.000Z | WitQuake/settings.py | abhishek2x/WitQuake-Namecheap | 834e5003b9d231277e8cc521427f4dec58e992b5 | [
"MIT"
] | null | null | null | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9u1))8ol#(lg^cyoxbw4-+50uue^a+p%vdsfv%$^#$^&gbg6346sddfbg^$%dsn+_t#9y)yjl4!%r)bv'
# SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obtm;vdanfn3kt43256Q#$#Re*#@$*@(%(_@#+t+_+_+#)r%+$#@)^_%@#$%)$#2_+&k3q+pmu)5%asj6yjpkag')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# Default apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# My apps
'community',
'learn',
'registration',
# Third Party Libraries
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'WitQuake.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'WitQuake.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.witquakedb'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# INTERNATIONALIZATION
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# ACCOUNT SETTING
QUOTES_PER_PAGE = 2
ACCOUNT_ACTIVATION_DAYS = 3
# EMAIL_HOST = 'mail.domain.com'
# EMAIL_HOST_USER = 'abc@domain.com'
# EMAIL_HOST_PASSWORD = 'abcdef'
# DEFAULT_FROM_EMAIL = 'abc@domain.com'
# SERVER_EMAIL = 'abc@domain.com'
# EMAIL_PORT = 25
# EMAIL_USE_TLS = False
EMAIL_HOST = 'mail.witquake.co.in'
EMAIL_HOST_USER = 'services@witquake.co.in'
EMAIL_HOST_PASSWORD = 'witquake@123'
DEFAULT_FROM_EMAIL = 'services@witquake.co.in'
SERVER_EMAIL = 'services@witquake.co.in'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# STATIC FILES (CSS, JavaScript, Images)
LOGIN_REDIRECT_URL = "/forum/question"
STATIC_ROOT = os.path.join(BASE_DIR + '/staticfiles')
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR + '/static/images')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| 24.129032 | 164 | 0.686096 | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '9u1))8ol#(lg^cyoxbw4-+50uue^a+p%vdsfv%$^#$^&gbg6346sddfbg^$%dsn+_t#9y)yjl4!%r)bv'
DEBUG = False
# DEBUG = os.environ.get('DJANGO_DEBUG', '') != 'False'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# Default apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# My apps
'community',
'learn',
'registration',
# Third Party Libraries
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'WitQuake.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'WitQuake.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.witquakedb'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# INTERNATIONALIZATION
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# ACCOUNT SETTING
QUOTES_PER_PAGE = 2
ACCOUNT_ACTIVATION_DAYS = 3
# EMAIL_HOST = 'mail.domain.com'
# EMAIL_HOST_USER = 'abc@domain.com'
# EMAIL_HOST_PASSWORD = 'abcdef'
# DEFAULT_FROM_EMAIL = 'abc@domain.com'
# SERVER_EMAIL = 'abc@domain.com'
# EMAIL_PORT = 25
# EMAIL_USE_TLS = False
EMAIL_HOST = 'mail.witquake.co.in'
EMAIL_HOST_USER = 'services@witquake.co.in'
EMAIL_HOST_PASSWORD = 'witquake@123'
DEFAULT_FROM_EMAIL = 'services@witquake.co.in'
SERVER_EMAIL = 'services@witquake.co.in'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# STATIC FILES (CSS, JavaScript, Images)
LOGIN_REDIRECT_URL = "/forum/question"
STATIC_ROOT = os.path.join(BASE_DIR + '/staticfiles')
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR + '/static/images')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| true | true |
f73a7d07aba5c6eda2e5833be150bdea23004471 | 3,088 | py | Python | tbot/modules/commands.py | sonjek/python-telegram-bot-warframe | 7c4b2d003aa9963f271cbace1993e79e23009c26 | [
"BSD-3-Clause"
] | 1 | 2020-12-21T23:30:38.000Z | 2020-12-21T23:30:38.000Z | tbot/modules/commands.py | sonjek/warframe_notification_tbot | 7c4b2d003aa9963f271cbace1993e79e23009c26 | [
"BSD-3-Clause"
] | null | null | null | tbot/modules/commands.py | sonjek/warframe_notification_tbot | 7c4b2d003aa9963f271cbace1993e79e23009c26 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
import traceback
from telegram import ParseMode
from . import menus, keyboards
from ..sources import wf, twitch
from ..utils import utils
from ..utils.logging import logger
from ..utils.loadconfig import config
warframe = wf.Warframe()
tw = twitch.Twitch()
def error(update, context):
trace = ''.join(traceback.format_tb(sys.exc_info()[2]))
text = f'The error <code>{context.error}</code> happened. The full traceback:\n\n<code>{trace}</code>'
context.bot.send_message(config['admin_id'], text, parse_mode=ParseMode.HTML)
logger.error(f'Update: {update}')
raise context.error
def start(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
update.message.reply_text('Please choose:', reply_markup=keyboards.main_menu_keyboard())
def alerts(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
update.message.reply_text(text=wf.get_alerts(), parse_mode=ParseMode.MARKDOWN)
def invasions(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
text = wf.get_invasions(update.message.from_user.id, True, False)
update.message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN)
def void_trader(update, context):
if update.message is not None:
from_user = update.message.from_user
else:
from_user = update.callback_query.from_user
utils.update_user_data(update.message.from_user, context.user_data)
text = wf.get_void_trader_items(from_user.id)
update.message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN)
def twitch_get_channel_status(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
text, info = tw.get_twitch_status(update.message.from_user.id)
update.message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN)
def admin(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
menus.admin_menu(update, context)
def job_invasions(update):
chat_id = update.job.context
msg = warframe.get_invasions(chat_id, False, True)
if msg and msg != wf.msg_no_invasions and msg != wf.msg_no_invasions_rare_rewards:
update.bot.send_message(chat_id=chat_id, text=msg, parse_mode=ParseMode.MARKDOWN)
def job_alerts(update):
chat_id = update.job.context
msg = warframe.get_alerts(chat_id, True, True)
if msg and msg != wf.msg_no_alerts:
update.bot.send_message(chat_id=chat_id, text=msg, parse_mode=ParseMode.MARKDOWN)
def job_void_trader(update):
chat_id = update.job.context
msg = warframe.get_void_trader_items(chat_id, True)
if msg:
update.bot.send_message(chat_id=chat_id, text=msg, parse_mode=ParseMode.MARKDOWN)
def job_twitch_get_channel_status(update):
chat_id = update.job.context
text, info = tw.get_twitch_status(chat_id, True)
if text and text != twitch.msg_not_active:
update.bot.send_message(chat_id=chat_id, text=text, parse_mode=ParseMode.MARKDOWN)
| 33.934066 | 106 | 0.751943 |
import sys
import traceback
from telegram import ParseMode
from . import menus, keyboards
from ..sources import wf, twitch
from ..utils import utils
from ..utils.logging import logger
from ..utils.loadconfig import config
warframe = wf.Warframe()
tw = twitch.Twitch()
def error(update, context):
trace = ''.join(traceback.format_tb(sys.exc_info()[2]))
text = f'The error <code>{context.error}</code> happened. The full traceback:\n\n<code>{trace}</code>'
context.bot.send_message(config['admin_id'], text, parse_mode=ParseMode.HTML)
logger.error(f'Update: {update}')
raise context.error
def start(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
update.message.reply_text('Please choose:', reply_markup=keyboards.main_menu_keyboard())
def alerts(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
update.message.reply_text(text=wf.get_alerts(), parse_mode=ParseMode.MARKDOWN)
def invasions(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
text = wf.get_invasions(update.message.from_user.id, True, False)
update.message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN)
def void_trader(update, context):
if update.message is not None:
from_user = update.message.from_user
else:
from_user = update.callback_query.from_user
utils.update_user_data(update.message.from_user, context.user_data)
text = wf.get_void_trader_items(from_user.id)
update.message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN)
def twitch_get_channel_status(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
text, info = tw.get_twitch_status(update.message.from_user.id)
update.message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN)
def admin(update, context):
utils.update_user_data(update.message.from_user, context.user_data)
menus.admin_menu(update, context)
def job_invasions(update):
chat_id = update.job.context
msg = warframe.get_invasions(chat_id, False, True)
if msg and msg != wf.msg_no_invasions and msg != wf.msg_no_invasions_rare_rewards:
update.bot.send_message(chat_id=chat_id, text=msg, parse_mode=ParseMode.MARKDOWN)
def job_alerts(update):
chat_id = update.job.context
msg = warframe.get_alerts(chat_id, True, True)
if msg and msg != wf.msg_no_alerts:
update.bot.send_message(chat_id=chat_id, text=msg, parse_mode=ParseMode.MARKDOWN)
def job_void_trader(update):
chat_id = update.job.context
msg = warframe.get_void_trader_items(chat_id, True)
if msg:
update.bot.send_message(chat_id=chat_id, text=msg, parse_mode=ParseMode.MARKDOWN)
def job_twitch_get_channel_status(update):
chat_id = update.job.context
text, info = tw.get_twitch_status(chat_id, True)
if text and text != twitch.msg_not_active:
update.bot.send_message(chat_id=chat_id, text=text, parse_mode=ParseMode.MARKDOWN)
| true | true |
f73a7d27bd71d7ada6b22b9db79fbda63c0f88f8 | 5,927 | py | Python | flexbe_core/test/test_exceptions.py | Jmz919/flexbe_behavior_engine | bdb85de41fafbfea6e4eb74c271b9cee18be4d8b | [
"BSD-3-Clause"
] | 1 | 2022-03-11T04:56:31.000Z | 2022-03-11T04:56:31.000Z | flexbe_core/test/test_exceptions.py | FlexBE/flexbe_behavior_engine | 735a3b68dfbe817db9383e53fef63afd6868219d | [
"BSD-3-Clause"
] | null | null | null | flexbe_core/test/test_exceptions.py | FlexBE/flexbe_behavior_engine | 735a3b68dfbe817db9383e53fef63afd6868219d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import unittest
import rclpy
from rclpy.executors import MultiThreadedExecutor, SingleThreadedExecutor
from flexbe_core import EventState, OperatableStateMachine
from flexbe_core.core.exceptions import StateError, StateMachineError, UserDataError
class TestExceptions(unittest.TestCase):
def setUp(self):
self.context = rclpy.context.Context()
rclpy.init(context=self.context)
self.executor = MultiThreadedExecutor(context=self.context)
self.node = rclpy.create_node('TestExceptions', context=self.context)
def tearDown(self):
self.node.destroy_node()
self.executor.shutdown()
rclpy.shutdown(context=self.context)
def test_invalid_outcome(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class ReturnInvalidOutcome(EventState):
def __init__(self):
self.initialize_ros(node)
super(ReturnInvalidOutcome, self).__init__(outcomes=['done'])
def execute(self, userdata):
return 'invalid'
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('state', ReturnInvalidOutcome(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, StateError)
def test_invalid_transition(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class ReturnDone(EventState):
def __init__(self):
ReturnDone.initialize_ros(node)
super(ReturnDone, self).__init__(outcomes=['done'])
def execute(self, userdata):
return 'done'
inner_sm = OperatableStateMachine(outcomes=['done'])
with inner_sm:
OperatableStateMachine.add('state', ReturnDone(), transitions={'done': 'invalid'})
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('inner', inner_sm, transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, StateMachineError)
def test_invalid_userdata_input(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class AccessInvalidInput(EventState):
def __init__(self):
AccessInvalidInput.initialize_ros(node)
super(AccessInvalidInput, self).__init__(outcomes=['done'], input_keys=['input'])
def execute(self, userdata):
print(userdata.invalid)
return 'done'
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('state', AccessInvalidInput(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, UserDataError)
def test_invalid_userdata_output(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class SetInvalidOutput(EventState):
def __init__(self):
SetInvalidOutput.initialize_ros(node)
super(SetInvalidOutput, self).__init__(outcomes=['done'], output_keys=['output'])
def execute(self, userdata):
userdata.invalid = False
return 'done'
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('state', SetInvalidOutput(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, UserDataError)
def test_missing_userdata(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class AccessValidInput(EventState):
def __init__(self):
AccessValidInput.initialize_ros(node)
super(AccessValidInput, self).__init__(outcomes=['done'], input_keys=['missing'])
def execute(self, userdata):
print(userdata.missing)
return 'done'
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('state', AccessValidInput(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, UserDataError)
def test_modify_input_key(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class ModifyInputKey(EventState):
def __init__(self):
ModifyInputKey.initialize_ros(node)
super(ModifyInputKey, self).__init__(outcomes=['done'], input_keys=['only_input'])
def execute(self, userdata):
userdata.only_input['new'] = 'not_allowed'
return 'done'
sm = OperatableStateMachine(outcomes=['done'])
sm.userdata.only_input = {'existing': 'is_allowed'}
with sm:
OperatableStateMachine.add('state', ModifyInputKey(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, UserDataError)
if __name__ == '__main__':
unittest.main()
| 37.27673 | 101 | 0.647714 |
import unittest
import rclpy
from rclpy.executors import MultiThreadedExecutor, SingleThreadedExecutor
from flexbe_core import EventState, OperatableStateMachine
from flexbe_core.core.exceptions import StateError, StateMachineError, UserDataError
class TestExceptions(unittest.TestCase):
def setUp(self):
self.context = rclpy.context.Context()
rclpy.init(context=self.context)
self.executor = MultiThreadedExecutor(context=self.context)
self.node = rclpy.create_node('TestExceptions', context=self.context)
def tearDown(self):
self.node.destroy_node()
self.executor.shutdown()
rclpy.shutdown(context=self.context)
def test_invalid_outcome(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class ReturnInvalidOutcome(EventState):
def __init__(self):
self.initialize_ros(node)
super(ReturnInvalidOutcome, self).__init__(outcomes=['done'])
def execute(self, userdata):
return 'invalid'
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('state', ReturnInvalidOutcome(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, StateError)
def test_invalid_transition(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class ReturnDone(EventState):
def __init__(self):
ReturnDone.initialize_ros(node)
super(ReturnDone, self).__init__(outcomes=['done'])
def execute(self, userdata):
return 'done'
inner_sm = OperatableStateMachine(outcomes=['done'])
with inner_sm:
OperatableStateMachine.add('state', ReturnDone(), transitions={'done': 'invalid'})
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('inner', inner_sm, transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, StateMachineError)
def test_invalid_userdata_input(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class AccessInvalidInput(EventState):
def __init__(self):
AccessInvalidInput.initialize_ros(node)
super(AccessInvalidInput, self).__init__(outcomes=['done'], input_keys=['input'])
def execute(self, userdata):
print(userdata.invalid)
return 'done'
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('state', AccessInvalidInput(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, UserDataError)
def test_invalid_userdata_output(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class SetInvalidOutput(EventState):
def __init__(self):
SetInvalidOutput.initialize_ros(node)
super(SetInvalidOutput, self).__init__(outcomes=['done'], output_keys=['output'])
def execute(self, userdata):
userdata.invalid = False
return 'done'
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('state', SetInvalidOutput(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, UserDataError)
def test_missing_userdata(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class AccessValidInput(EventState):
def __init__(self):
AccessValidInput.initialize_ros(node)
super(AccessValidInput, self).__init__(outcomes=['done'], input_keys=['missing'])
def execute(self, userdata):
print(userdata.missing)
return 'done'
sm = OperatableStateMachine(outcomes=['done'])
with sm:
OperatableStateMachine.add('state', AccessValidInput(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, UserDataError)
def test_modify_input_key(self):
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1)
OperatableStateMachine.initialize_ros(self.node)
node = self.node
class ModifyInputKey(EventState):
def __init__(self):
ModifyInputKey.initialize_ros(node)
super(ModifyInputKey, self).__init__(outcomes=['done'], input_keys=['only_input'])
def execute(self, userdata):
userdata.only_input['new'] = 'not_allowed'
return 'done'
sm = OperatableStateMachine(outcomes=['done'])
sm.userdata.only_input = {'existing': 'is_allowed'}
with sm:
OperatableStateMachine.add('state', ModifyInputKey(), transitions={'done': 'done'})
outcome = sm.execute(None)
self.assertIsNone(outcome)
self.assertIsInstance(sm._last_exception, UserDataError)
if __name__ == '__main__':
unittest.main()
| true | true |
f73a7f4a34b14a8e898d8d1043a4003cb09b0b68 | 15,981 | py | Python | venv/Lib/site-packages/pandas/tests/arrays/categorical/test_operators.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/arrays/categorical/test_operators.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/arrays/categorical/test_operators.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | import operator
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalOpsWithFactor(TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == "a"]
expected = self.factor[np.asarray(self.factor) == "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != "a"]
expected = self.factor[np.asarray(self.factor) != "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < "c"]
expected = self.factor[np.asarray(self.factor) < "c"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > "a"]
expected = self.factor[np.asarray(self.factor) > "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= "b"]
expected = self.factor[np.asarray(self.factor) >= "b"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= "b"]
expected = self.factor[np.asarray(self.factor) <= "b"]
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == "d"
expected = np.zeros(len(self.factor), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True
)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"])
with pytest.raises(TypeError, match=msg):
cat_rev > cat_rev_base2
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
with pytest.raises(TypeError, match=msg):
cat > cat_unorderd
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
# check that zero-dim array gets unboxed
res = cat_rev > np.array("b")
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps:
def test_compare_frame(self):
# GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame
data = ["a", "b", 2, "a"]
cat = Categorical(data)
df = DataFrame(cat)
result = cat == df.T
expected = DataFrame([[True, True, True, True]])
tm.assert_frame_equal(result, expected)
result = cat[::-1] != df.T
expected = DataFrame([[False, True, True, False]])
tm.assert_frame_equal(result, expected)
def test_compare_frame_raises(self, all_compare_operators):
# alignment raises unless we transpose
op = getattr(operator, all_compare_operators)
cat = Categorical(["a", "b", 2, "a"])
df = DataFrame(cat)
msg = "Unable to coerce to Series, length must be 1: given 4"
with pytest.raises(ValueError, match=msg):
op(cat, df)
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
msg = "Invalid comparison between dtype=category and int"
with pytest.raises(TypeError, match=msg):
cat < 4
with pytest.raises(TypeError, match=msg):
cat > 4
with pytest.raises(TypeError, match=msg):
4 < cat
with pytest.raises(TypeError, match=msg):
4 > cat
tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))
def test_comparison_with_tuple(self):
cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object))
result = cat == "foo"
expected = np.array([True, False, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat == (0, 1)
expected = np.array([False, True, False, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat != (0, 1)
tm.assert_numpy_array_equal(result, ~expected)
def test_comparison_of_ordered_categorical_with_nan_to_scalar(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# BUG: fix ordered categorical comparison with missing values (#26504 )
# and following comparisons with scalars in categories with missing
# values should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
scalar = 2
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)
actual = getattr(cat, compare_operators_no_eq_ne)(scalar)
tm.assert_numpy_array_equal(actual, expected)
def test_comparison_of_ordered_categorical_with_nan_to_listlike(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# and following comparisons of missing values in ordered Categorical
# with listlike should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
actual = getattr(cat, compare_operators_no_eq_ne)(other)
tm.assert_numpy_array_equal(actual, expected)
@pytest.mark.parametrize(
"data,reverse,base",
[(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True)
)
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
with pytest.raises(TypeError, match=msg):
a < cat
with pytest.raises(TypeError, match=msg):
a < cat_rev
@pytest.mark.parametrize(
"ctor",
[
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
],
)
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
assert (c1 == c2).all()
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "a"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "b"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False)
c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False)
with pytest.raises(TypeError, match=("Categoricals can only be compared")):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=["a", "b"])
c2 = Categorical([], categories=["a"])
msg = "Categoricals can only be compared if 'categories' are the same."
with pytest.raises(TypeError, match=msg):
c1 == c2
def test_compare_unordered_different_order(self):
# https://github.com/pandas-dev/pandas/issues/16603#issuecomment-
# 349290078
a = Categorical(["a"], categories=["a", "b"])
b = Categorical(["b"], categories=["b", "a"])
assert not a.equals(b)
def test_numeric_like_ops(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
# numeric ops should not succeed
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df["value_group"]
for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:
msg = f"'Categorical' does not implement reduction '{op}'"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
# mad technically works because it takes always the numeric data
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
with pytest.raises(
TypeError, match="'Categorical' does not implement reduction 'sum'"
):
np.sum(s)
# numeric ops on a Series
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
# invalid ufunc
msg = "Object with dtype category cannot perform the numpy op log"
with pytest.raises(TypeError, match=msg):
np.log(s)
| 39.655087 | 89 | 0.586321 | import operator
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalOpsWithFactor(TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == "a"]
expected = self.factor[np.asarray(self.factor) == "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != "a"]
expected = self.factor[np.asarray(self.factor) != "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < "c"]
expected = self.factor[np.asarray(self.factor) < "c"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > "a"]
expected = self.factor[np.asarray(self.factor) > "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= "b"]
expected = self.factor[np.asarray(self.factor) >= "b"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= "b"]
expected = self.factor[np.asarray(self.factor) <= "b"]
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == "d"
expected = np.zeros(len(self.factor), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True
)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True)
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"])
with pytest.raises(TypeError, match=msg):
cat_rev > cat_rev_base2
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
with pytest.raises(TypeError, match=msg):
cat > cat_unorderd
s = Series(["b", "b", "b"])
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
a = np.array(["b", "b", "b"])
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
res = cat_rev > np.array("b")
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps:
def test_compare_frame(self):
a)
df = DataFrame(cat)
result = cat == df.T
expected = DataFrame([[True, True, True, True]])
tm.assert_frame_equal(result, expected)
result = cat[::-1] != df.T
expected = DataFrame([[False, True, True, False]])
tm.assert_frame_equal(result, expected)
def test_compare_frame_raises(self, all_compare_operators):
op = getattr(operator, all_compare_operators)
cat = Categorical(["a", "b", 2, "a"])
df = DataFrame(cat)
msg = "Unable to coerce to Series, length must be 1: given 4"
with pytest.raises(ValueError, match=msg):
op(cat, df)
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
cat = Categorical([1, 2, 3], ordered=True)
msg = "Invalid comparison between dtype=category and int"
with pytest.raises(TypeError, match=msg):
cat < 4
with pytest.raises(TypeError, match=msg):
cat > 4
with pytest.raises(TypeError, match=msg):
4 < cat
with pytest.raises(TypeError, match=msg):
4 > cat
tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))
def test_comparison_with_tuple(self):
cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object))
result = cat == "foo"
expected = np.array([True, False, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat == (0, 1)
expected = np.array([False, True, False, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat != (0, 1)
tm.assert_numpy_array_equal(result, ~expected)
def test_comparison_of_ordered_categorical_with_nan_to_scalar(
self, compare_operators_no_eq_ne
):
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
scalar = 2
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)
actual = getattr(cat, compare_operators_no_eq_ne)(scalar)
tm.assert_numpy_array_equal(actual, expected)
def test_comparison_of_ordered_categorical_with_nan_to_listlike(
self, compare_operators_no_eq_ne
):
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
actual = getattr(cat, compare_operators_no_eq_ne)(other)
tm.assert_numpy_array_equal(actual, expected)
@pytest.mark.parametrize(
"data,reverse,base",
[(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True)
)
s = Series(base)
a = np.array(base)
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
with pytest.raises(TypeError, match=msg):
a < cat
with pytest.raises(TypeError, match=msg):
a < cat_rev
@pytest.mark.parametrize(
"ctor",
[
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
],
)
def test_unordered_different_order_equal(self, ctor):
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
assert (c1 == c2).all()
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "a"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "b"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False)
c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False)
with pytest.raises(TypeError, match=("Categoricals can only be compared")):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=["a", "b"])
c2 = Categorical([], categories=["a"])
msg = "Categoricals can only be compared if 'categories' are the same."
with pytest.raises(TypeError, match=msg):
c1 == c2
def test_compare_unordered_different_order(self):
a = Categorical(["a"], categories=["a", "b"])
b = Categorical(["b"], categories=["b", "a"])
assert not a.equals(b)
def test_numeric_like_ops(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
s = df["value_group"]
for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:
msg = f"'Categorical' does not implement reduction '{op}'"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
s = Series(Categorical([1, 2, 3, 4]))
with pytest.raises(
TypeError, match="'Categorical' does not implement reduction 'sum'"
):
np.sum(s)
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
msg = "Object with dtype category cannot perform the numpy op log"
with pytest.raises(TypeError, match=msg):
np.log(s)
| true | true |
f73a8113777d6c7e57eeb813963b99ce41b53ebc | 1,822 | py | Python | code/utils/general.py | jamesrosstwo/idr-dif | e900af5f440b943a7a46134a5afe7a81dd888a05 | [
"MIT"
] | null | null | null | code/utils/general.py | jamesrosstwo/idr-dif | e900af5f440b943a7a46134a5afe7a81dd888a05 | [
"MIT"
] | null | null | null | code/utils/general.py | jamesrosstwo/idr-dif | e900af5f440b943a7a46134a5afe7a81dd888a05 | [
"MIT"
] | null | null | null | import os
from glob import glob
from pathlib import Path
import torch
_root_dir = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = Path(_root_dir).parent.parent
def mkdir_ifnotexists(directory):
if not os.path.exists(directory):
os.mkdir(directory)
def get_class(kls):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def glob_imgs(path):
imgs = []
for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:
imgs.extend(glob(os.path.join(path, ext)))
return imgs
def split_input(model_input, total_pixels):
'''
Split the input to fit Cuda memory for large resolution.
Can decrease the value of n_pixels in case of cuda out of memory error.
'''
n_pixels = 10000
split = []
for i, indx in enumerate(torch.split(torch.arange(total_pixels).cuda(), n_pixels, dim=0)):
data = model_input.copy()
data['uv'] = torch.index_select(model_input['uv'], 1, indx)
data['object_mask'] = torch.index_select(model_input['object_mask'], 1, indx)
split.append(data)
return split
def merge_output(res, total_pixels, batch_size):
''' Merge the split output. '''
model_outputs = {}
for entry in res[0]:
if res[0][entry] is None:
continue
if len(res[0][entry].shape) == 1:
model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, 1) for r in res],
1).reshape(batch_size * total_pixels)
else:
model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, r[entry].shape[-1]) for r in res],
1).reshape(batch_size * total_pixels, -1)
return model_outputs | 31.964912 | 113 | 0.604281 | import os
from glob import glob
from pathlib import Path
import torch
_root_dir = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = Path(_root_dir).parent.parent
def mkdir_ifnotexists(directory):
if not os.path.exists(directory):
os.mkdir(directory)
def get_class(kls):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def glob_imgs(path):
imgs = []
for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:
imgs.extend(glob(os.path.join(path, ext)))
return imgs
def split_input(model_input, total_pixels):
n_pixels = 10000
split = []
for i, indx in enumerate(torch.split(torch.arange(total_pixels).cuda(), n_pixels, dim=0)):
data = model_input.copy()
data['uv'] = torch.index_select(model_input['uv'], 1, indx)
data['object_mask'] = torch.index_select(model_input['object_mask'], 1, indx)
split.append(data)
return split
def merge_output(res, total_pixels, batch_size):
model_outputs = {}
for entry in res[0]:
if res[0][entry] is None:
continue
if len(res[0][entry].shape) == 1:
model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, 1) for r in res],
1).reshape(batch_size * total_pixels)
else:
model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, r[entry].shape[-1]) for r in res],
1).reshape(batch_size * total_pixels, -1)
return model_outputs | true | true |
f73a81dd873068d4e99238758e3b64799c2c540c | 3,828 | py | Python | recipes/pranav-csv2/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | 1 | 2021-11-11T03:07:13.000Z | 2021-11-11T03:07:13.000Z | recipes/pranav-csv2/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | null | null | null | recipes/pranav-csv2/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | null | null | null | import os
import functools
import textwrap
from conans.errors import ConanInvalidConfiguration
from conans import ConanFile, CMake, tools
required_conan_version = ">=1.33.0"
class PranavCSV2Conan(ConanFile):
name = "pranav-csv2"
license = "MIT"
description = "Various header libraries mostly future std lib, replacements for(e.g. visit), or some misc"
topics = ("csv", "iterator", "header-only", )
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/p-ranav/csv2"
settings = "os", "arch", "compiler", "build_type",
generators = "cmake",
no_copy_source = True
_compiler_required_cpp11 = {
"Visual Studio": "16",
"gcc": "8",
"clang": "7",
"apple-clang": "12.0",
}
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.get_safe("compiler.cppstd"):
tools.check_min_cppstd(self, "11")
minimum_version = self._compiler_required_cpp11.get(str(self.settings.compiler), False)
if minimum_version:
if tools.Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration("{} requires C++11, which your compiler does not support.".format(self.name))
else:
self.output.warn("{0} requires C++11. Your compiler is unknown. Assuming it supports C++11.".format(self.name))
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.configure(source_folder=self._source_subfolder)
return cmake
@property
def _module_subfolder(self):
return os.path.join("lib", "cmake")
@property
def _module_file_rel_path(self):
return os.path.join(self._module_subfolder,
"conan-official-{}-targets.cmake".format(self.name))
@staticmethod
def _create_cmake_module_alias_targets(module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
tools.save(module_file, content)
def package(self):
self.copy("LICENSE*", "licenses", self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"csv2": "csv2::csv2"}
)
def package_id(self):
self.info.header_only()
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "csv2")
self.cpp_info.set_property("cmake_target_name", "csv2::csv2")
self.cpp_info.filenames["cmake_find_package"] = "csv2"
self.cpp_info.filenames["cmake_find_package_multi"] = "csv2"
self.cpp_info.names["cmake_find_package"] = "csv2"
self.cpp_info.names["cmake_find_package_multi"] = "csv2"
self.cpp_info.builddirs.append(self._module_subfolder)
self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
| 37.90099 | 125 | 0.653605 | import os
import functools
import textwrap
from conans.errors import ConanInvalidConfiguration
from conans import ConanFile, CMake, tools
required_conan_version = ">=1.33.0"
class PranavCSV2Conan(ConanFile):
name = "pranav-csv2"
license = "MIT"
description = "Various header libraries mostly future std lib, replacements for(e.g. visit), or some misc"
topics = ("csv", "iterator", "header-only", )
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/p-ranav/csv2"
settings = "os", "arch", "compiler", "build_type",
generators = "cmake",
no_copy_source = True
_compiler_required_cpp11 = {
"Visual Studio": "16",
"gcc": "8",
"clang": "7",
"apple-clang": "12.0",
}
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.get_safe("compiler.cppstd"):
tools.check_min_cppstd(self, "11")
minimum_version = self._compiler_required_cpp11.get(str(self.settings.compiler), False)
if minimum_version:
if tools.Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration("{} requires C++11, which your compiler does not support.".format(self.name))
else:
self.output.warn("{0} requires C++11. Your compiler is unknown. Assuming it supports C++11.".format(self.name))
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.configure(source_folder=self._source_subfolder)
return cmake
@property
def _module_subfolder(self):
return os.path.join("lib", "cmake")
@property
def _module_file_rel_path(self):
return os.path.join(self._module_subfolder,
"conan-official-{}-targets.cmake".format(self.name))
@staticmethod
def _create_cmake_module_alias_targets(module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
tools.save(module_file, content)
def package(self):
self.copy("LICENSE*", "licenses", self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"csv2": "csv2::csv2"}
)
def package_id(self):
self.info.header_only()
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "csv2")
self.cpp_info.set_property("cmake_target_name", "csv2::csv2")
self.cpp_info.filenames["cmake_find_package"] = "csv2"
self.cpp_info.filenames["cmake_find_package_multi"] = "csv2"
self.cpp_info.names["cmake_find_package"] = "csv2"
self.cpp_info.names["cmake_find_package_multi"] = "csv2"
self.cpp_info.builddirs.append(self._module_subfolder)
self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
| true | true |
f73a825bdfab72caa0a7eaccf050084272c9d858 | 2,976 | py | Python | Day2/day2.py | lukasHD/adventofcode2019 | 409a2008de43ee70258e02de5a9aad27aa18c67a | [
"MIT"
] | null | null | null | Day2/day2.py | lukasHD/adventofcode2019 | 409a2008de43ee70258e02de5a9aad27aa18c67a | [
"MIT"
] | null | null | null | Day2/day2.py | lukasHD/adventofcode2019 | 409a2008de43ee70258e02de5a9aad27aa18c67a | [
"MIT"
] | null | null | null | # https://adventofcode.com/2019/day/2
#
# --- Day 2: 1202 Program Alarm ---
#
#
#
def readOpCode(op):
if op == 1:
print("add")
return
elif op == 2:
print("mul")
elif op == 99:
print("break")
intInput = [1,9,10,3,2,3,11,0,99,30,40,50]
def loadintCode(fname='input'):
with open(fname, 'r') as f:
l = list(f.read().split(','))
p = [int(x) for x in l]
return p
def printIndexValue(L, pos=0):
longest = len(str(max(L)))
print("[",end='')
for idx, val in enumerate(L):
print("{:{width}d},".format(val, width=longest+1),end='')
print("]")
indices = list(range(len(L)))
indices[pos] = "^"*(longest+1)
print("(",end='')
for idx in indices:
print("{:^{width}s},".format(str(idx), width=longest+1),end='')
print(")")
def runIntcode(intInput, debug=True):
ignore = 0
for idx, val in enumerate(intInput):
if ignore > 0:
ignore -= 1
continue
#print("index is %d and value is %s" % (idx, val))
#print("Index: {}".format(idx))
#print(intInput)
if debug: print("")
if debug: printIndexValue(intInput, idx)
#readOpCode(val)
if val == 1:
if debug: print("add({}, {}, {})".format(intInput[idx+1], intInput[idx+2], intInput[idx+3]))
if debug: print("L[{}] = {} + {} = {}".format(intInput[idx+3], intInput[intInput[idx+1]], intInput[intInput[idx+2]], intInput[intInput[idx+1]] + intInput[intInput[idx+2]]))
intInput[intInput[idx+3]] = intInput[intInput[idx+1]] + intInput[intInput[idx+2]]
ignore = 3
elif val == 2:
if debug: print("mul({}, {}, {})".format(intInput[idx+1], intInput[idx+2], intInput[idx+3]))
if debug: print("L[{}] = {} * {} = {}".format(intInput[idx+3], intInput[intInput[idx+1]], intInput[intInput[idx+2]], intInput[intInput[idx+1]] * intInput[intInput[idx+2]]))
intInput[intInput[idx+3]] = intInput[intInput[idx+1]] * intInput[intInput[idx+2]]
ignore = 3
elif val == 99:
if debug: print("break")
return(intInput)
def runPartOne():
intInput2 = [1,1,1,4,99,5,6,0,99]
runIntcode(intInput2)
intCode = loadintCode()
print(intCode)
intCode[1] = 12
intCode[2] = 2
print(intCode)
print("**************************************************")
runIntcode(intCode)
print("result should be:")
print([30,1,1,4,2,5,6,0,99])
def runPartTwo():
for noun in range(100):
for verb in range(100):
print("noun: {:3d} verb: {:3d}".format(noun, verb), end='')
intCode = loadintCode()
intCode[1] = noun
intCode[2] = verb
result = runIntcode(intCode, False)
print(" {}".format(result[0]))
if result[0] == 19690720:
return 100*noun + verb
if __name__ == '__main__':
runPartTwo() | 32.347826 | 184 | 0.525874 |
def readOpCode(op):
if op == 1:
print("add")
return
elif op == 2:
print("mul")
elif op == 99:
print("break")
intInput = [1,9,10,3,2,3,11,0,99,30,40,50]
def loadintCode(fname='input'):
with open(fname, 'r') as f:
l = list(f.read().split(','))
p = [int(x) for x in l]
return p
def printIndexValue(L, pos=0):
longest = len(str(max(L)))
print("[",end='')
for idx, val in enumerate(L):
print("{:{width}d},".format(val, width=longest+1),end='')
print("]")
indices = list(range(len(L)))
indices[pos] = "^"*(longest+1)
print("(",end='')
for idx in indices:
print("{:^{width}s},".format(str(idx), width=longest+1),end='')
print(")")
def runIntcode(intInput, debug=True):
ignore = 0
for idx, val in enumerate(intInput):
if ignore > 0:
ignore -= 1
continue
if debug: print("")
if debug: printIndexValue(intInput, idx)
if val == 1:
if debug: print("add({}, {}, {})".format(intInput[idx+1], intInput[idx+2], intInput[idx+3]))
if debug: print("L[{}] = {} + {} = {}".format(intInput[idx+3], intInput[intInput[idx+1]], intInput[intInput[idx+2]], intInput[intInput[idx+1]] + intInput[intInput[idx+2]]))
intInput[intInput[idx+3]] = intInput[intInput[idx+1]] + intInput[intInput[idx+2]]
ignore = 3
elif val == 2:
if debug: print("mul({}, {}, {})".format(intInput[idx+1], intInput[idx+2], intInput[idx+3]))
if debug: print("L[{}] = {} * {} = {}".format(intInput[idx+3], intInput[intInput[idx+1]], intInput[intInput[idx+2]], intInput[intInput[idx+1]] * intInput[intInput[idx+2]]))
intInput[intInput[idx+3]] = intInput[intInput[idx+1]] * intInput[intInput[idx+2]]
ignore = 3
elif val == 99:
if debug: print("break")
return(intInput)
def runPartOne():
intInput2 = [1,1,1,4,99,5,6,0,99]
runIntcode(intInput2)
intCode = loadintCode()
print(intCode)
intCode[1] = 12
intCode[2] = 2
print(intCode)
print("**************************************************")
runIntcode(intCode)
print("result should be:")
print([30,1,1,4,2,5,6,0,99])
def runPartTwo():
for noun in range(100):
for verb in range(100):
print("noun: {:3d} verb: {:3d}".format(noun, verb), end='')
intCode = loadintCode()
intCode[1] = noun
intCode[2] = verb
result = runIntcode(intCode, False)
print(" {}".format(result[0]))
if result[0] == 19690720:
return 100*noun + verb
if __name__ == '__main__':
runPartTwo() | true | true |
f73a8270beebae29f2fda3eb7c30679fa893c102 | 2,612 | py | Python | built-in/ACL_TensorFlow/Official/recommendation/DCN_for_ACL/scripts/eval.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/ACL_TensorFlow/Official/recommendation/DCN_for_ACL/scripts/eval.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/ACL_TensorFlow/Official/recommendation/DCN_for_ACL/scripts/eval.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from sklearn.metrics import average_precision_score, roc_auc_score
import numpy as np
import sys
def aucPerformance(mse, labels):
"""
:param mse:
:param labels:
:return:
"""
roc_auc = roc_auc_score(labels, mse)
ap = average_precision_score(labels, mse)
print("AUC-ROC: %.4f, AUC-PR: %.4f" % (roc_auc, ap))
return roc_auc, ap
def eval_om(label_dir, om_output_dir):
"""
:param label_dir:
:param om_output_dir:
:return:
"""
label, score = read_directory(label_dir, om_output_dir)
aucPerformance(score, label)
def read_directory(label_dir, om_output_dir):
"""
:param label_dir:
:param om_output_dir:
:return:
"""
# get label bin files
labels = os.listdir(label_dir)
labels.sort()
labels_data = list()
# get om output files
outputs = os.listdir(om_output_dir)
outputs.sort()
outputs_data = list()
for i in range(len(outputs)):
label_data = np.fromfile(os.path.join(label_dir, labels[i]), dtype=np.int32)
labels_data.extend(label_data)
output_data = np.fromfile(os.path.join(om_output_dir, outputs[i]), dtype=np.float32)
outputs_data.extend(output_data)
return labels_data, outputs_data
gt_dir = sys.argv[1]
predict_dir = sys.argv[2]
eval_om(gt_dir, predict_dir)
| 31.853659 | 92 | 0.692573 |
import os
from sklearn.metrics import average_precision_score, roc_auc_score
import numpy as np
import sys
def aucPerformance(mse, labels):
roc_auc = roc_auc_score(labels, mse)
ap = average_precision_score(labels, mse)
print("AUC-ROC: %.4f, AUC-PR: %.4f" % (roc_auc, ap))
return roc_auc, ap
def eval_om(label_dir, om_output_dir):
label, score = read_directory(label_dir, om_output_dir)
aucPerformance(score, label)
def read_directory(label_dir, om_output_dir):
labels = os.listdir(label_dir)
labels.sort()
labels_data = list()
outputs = os.listdir(om_output_dir)
outputs.sort()
outputs_data = list()
for i in range(len(outputs)):
label_data = np.fromfile(os.path.join(label_dir, labels[i]), dtype=np.int32)
labels_data.extend(label_data)
output_data = np.fromfile(os.path.join(om_output_dir, outputs[i]), dtype=np.float32)
outputs_data.extend(output_data)
return labels_data, outputs_data
gt_dir = sys.argv[1]
predict_dir = sys.argv[2]
eval_om(gt_dir, predict_dir)
| true | true |
f73a8417d338205aefaaed6325e19e807ee13d7e | 196 | py | Python | cashfree_sdk/payouts/validations/__upi_validation.py | cashfree/cashfree-sdk-python | f59f706a9ef6bc5d34c5933045b526dc9b0eea57 | [
"MIT"
] | 1 | 2020-06-24T20:53:57.000Z | 2020-06-24T20:53:57.000Z | cashfree_sdk/payouts/validations/__upi_validation.py | cashfree/cashfree-sdk-python | f59f706a9ef6bc5d34c5933045b526dc9b0eea57 | [
"MIT"
] | 8 | 2020-01-09T11:22:18.000Z | 2021-05-23T09:39:03.000Z | cashfree_sdk/payouts/validations/__upi_validation.py | cashfree/cashfree-sdk-python | f59f706a9ef6bc5d34c5933045b526dc9b0eea57 | [
"MIT"
] | 3 | 2020-05-11T04:47:06.000Z | 2022-03-31T00:56:10.000Z |
class UPIValidation:
end_point = "/payout/v1/validation/upiDetails"
req_type = "GET"
def __init__(self, **kwargs):
self.name = kwargs["name"]
self.vpa = kwargs["vpa"] | 24.5 | 50 | 0.617347 |
class UPIValidation:
end_point = "/payout/v1/validation/upiDetails"
req_type = "GET"
def __init__(self, **kwargs):
self.name = kwargs["name"]
self.vpa = kwargs["vpa"] | true | true |
f73a842c4e9e3317787dcc216be5d79bc14512a9 | 3,028 | py | Python | daemon/core/gui/appconfig.py | geraldolsribeiro/core | d07635f23c349bab6b61bd3629d2721344423dd1 | [
"BSD-2-Clause"
] | null | null | null | daemon/core/gui/appconfig.py | geraldolsribeiro/core | d07635f23c349bab6b61bd3629d2721344423dd1 | [
"BSD-2-Clause"
] | null | null | null | daemon/core/gui/appconfig.py | geraldolsribeiro/core | d07635f23c349bab6b61bd3629d2721344423dd1 | [
"BSD-2-Clause"
] | null | null | null | import os
import shutil
from pathlib import Path
import yaml
# gui home paths
from core.gui import themes
HOME_PATH = Path.home().joinpath(".coretk")
BACKGROUNDS_PATH = HOME_PATH.joinpath("backgrounds")
CUSTOM_EMANE_PATH = HOME_PATH.joinpath("custom_emane")
CUSTOM_SERVICE_PATH = HOME_PATH.joinpath("custom_services")
ICONS_PATH = HOME_PATH.joinpath("icons")
MOBILITY_PATH = HOME_PATH.joinpath("mobility")
XMLS_PATH = HOME_PATH.joinpath("xmls")
CONFIG_PATH = HOME_PATH.joinpath("gui.yaml")
LOG_PATH = HOME_PATH.joinpath("gui.log")
# local paths
DATA_PATH = Path(__file__).parent.joinpath("data")
LOCAL_ICONS_PATH = DATA_PATH.joinpath("icons").absolute()
LOCAL_BACKGROUND_PATH = DATA_PATH.joinpath("backgrounds").absolute()
LOCAL_XMLS_PATH = DATA_PATH.joinpath("xmls").absolute()
LOCAL_MOBILITY_PATH = DATA_PATH.joinpath("mobility").absolute()
# configuration data
TERMINALS = [
"$TERM",
"gnome-terminal --window --",
"lxterminal -e",
"konsole -e",
"xterm -e",
"aterm -e",
"eterm -e",
"rxvt -e",
"xfce4-terminal -x",
]
EDITORS = ["$EDITOR", "vim", "emacs", "gedit", "nano", "vi"]
class IndentDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super().increase_indent(flow, False)
def copy_files(current_path, new_path):
for current_file in current_path.glob("*"):
new_file = new_path.joinpath(current_file.name)
shutil.copy(current_file, new_file)
def check_directory():
if HOME_PATH.exists():
return
HOME_PATH.mkdir()
BACKGROUNDS_PATH.mkdir()
CUSTOM_EMANE_PATH.mkdir()
CUSTOM_SERVICE_PATH.mkdir()
ICONS_PATH.mkdir()
MOBILITY_PATH.mkdir()
XMLS_PATH.mkdir()
copy_files(LOCAL_ICONS_PATH, ICONS_PATH)
copy_files(LOCAL_BACKGROUND_PATH, BACKGROUNDS_PATH)
copy_files(LOCAL_XMLS_PATH, XMLS_PATH)
copy_files(LOCAL_MOBILITY_PATH, MOBILITY_PATH)
if "TERM" in os.environ:
terminal = TERMINALS[0]
else:
terminal = TERMINALS[1]
if "EDITOR" in os.environ:
editor = EDITORS[0]
else:
editor = EDITORS[1]
config = {
"preferences": {
"theme": themes.THEME_DARK,
"editor": editor,
"terminal": terminal,
"gui3d": "/usr/local/bin/std3d.sh",
"width": 1000,
"height": 750,
},
"location": {
"x": 0.0,
"y": 0.0,
"z": 0.0,
"lat": 47.5791667,
"lon": -122.132322,
"alt": 2.0,
"scale": 150.0,
},
"servers": [{"name": "example", "address": "127.0.0.1", "port": 50051}],
"nodes": [],
"recentfiles": [],
"observers": [{"name": "hello", "cmd": "echo hello"}],
}
save(config)
def read():
with CONFIG_PATH.open("r") as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def save(config):
with CONFIG_PATH.open("w") as f:
yaml.dump(config, f, Dumper=IndentDumper, default_flow_style=False)
| 27.279279 | 80 | 0.628798 | import os
import shutil
from pathlib import Path
import yaml
from core.gui import themes
HOME_PATH = Path.home().joinpath(".coretk")
BACKGROUNDS_PATH = HOME_PATH.joinpath("backgrounds")
CUSTOM_EMANE_PATH = HOME_PATH.joinpath("custom_emane")
CUSTOM_SERVICE_PATH = HOME_PATH.joinpath("custom_services")
ICONS_PATH = HOME_PATH.joinpath("icons")
MOBILITY_PATH = HOME_PATH.joinpath("mobility")
XMLS_PATH = HOME_PATH.joinpath("xmls")
CONFIG_PATH = HOME_PATH.joinpath("gui.yaml")
LOG_PATH = HOME_PATH.joinpath("gui.log")
DATA_PATH = Path(__file__).parent.joinpath("data")
LOCAL_ICONS_PATH = DATA_PATH.joinpath("icons").absolute()
LOCAL_BACKGROUND_PATH = DATA_PATH.joinpath("backgrounds").absolute()
LOCAL_XMLS_PATH = DATA_PATH.joinpath("xmls").absolute()
LOCAL_MOBILITY_PATH = DATA_PATH.joinpath("mobility").absolute()
TERMINALS = [
"$TERM",
"gnome-terminal --window --",
"lxterminal -e",
"konsole -e",
"xterm -e",
"aterm -e",
"eterm -e",
"rxvt -e",
"xfce4-terminal -x",
]
EDITORS = ["$EDITOR", "vim", "emacs", "gedit", "nano", "vi"]
class IndentDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super().increase_indent(flow, False)
def copy_files(current_path, new_path):
for current_file in current_path.glob("*"):
new_file = new_path.joinpath(current_file.name)
shutil.copy(current_file, new_file)
def check_directory():
if HOME_PATH.exists():
return
HOME_PATH.mkdir()
BACKGROUNDS_PATH.mkdir()
CUSTOM_EMANE_PATH.mkdir()
CUSTOM_SERVICE_PATH.mkdir()
ICONS_PATH.mkdir()
MOBILITY_PATH.mkdir()
XMLS_PATH.mkdir()
copy_files(LOCAL_ICONS_PATH, ICONS_PATH)
copy_files(LOCAL_BACKGROUND_PATH, BACKGROUNDS_PATH)
copy_files(LOCAL_XMLS_PATH, XMLS_PATH)
copy_files(LOCAL_MOBILITY_PATH, MOBILITY_PATH)
if "TERM" in os.environ:
terminal = TERMINALS[0]
else:
terminal = TERMINALS[1]
if "EDITOR" in os.environ:
editor = EDITORS[0]
else:
editor = EDITORS[1]
config = {
"preferences": {
"theme": themes.THEME_DARK,
"editor": editor,
"terminal": terminal,
"gui3d": "/usr/local/bin/std3d.sh",
"width": 1000,
"height": 750,
},
"location": {
"x": 0.0,
"y": 0.0,
"z": 0.0,
"lat": 47.5791667,
"lon": -122.132322,
"alt": 2.0,
"scale": 150.0,
},
"servers": [{"name": "example", "address": "127.0.0.1", "port": 50051}],
"nodes": [],
"recentfiles": [],
"observers": [{"name": "hello", "cmd": "echo hello"}],
}
save(config)
def read():
with CONFIG_PATH.open("r") as f:
return yaml.load(f, Loader=yaml.SafeLoader)
def save(config):
with CONFIG_PATH.open("w") as f:
yaml.dump(config, f, Dumper=IndentDumper, default_flow_style=False)
| true | true |
f73a8497644d3673fe8705d01605ef1349f13878 | 17,140 | py | Python | shanhe/iaas/constants.py | shanhe-nsccjn/shanhe-sdk-python | efead12f08d93a7ebb986d137da9fbfc7a43ad02 | [
"Apache-2.0"
] | null | null | null | shanhe/iaas/constants.py | shanhe-nsccjn/shanhe-sdk-python | efead12f08d93a7ebb986d137da9fbfc7a43ad02 | [
"Apache-2.0"
] | null | null | null | shanhe/iaas/constants.py | shanhe-nsccjn/shanhe-sdk-python | efead12f08d93a7ebb986d137da9fbfc7a43ad02 | [
"Apache-2.0"
] | null | null | null | # =========================================================================
# Copyright 2021-present ShanHe, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
# --------- API Actions ---------
# Access Key
ACTION_DESCRIBE_ACCESS_KEYS = "DescribeAccessKeys"
# User
ACTION_DESCRIBE_SUB_USERS = "DescribeSubUsers"
ACTION_CREATE_SUB_USER = "CreateSubUser"
ACTION_MODIFY_SUB_USER_ATTRIBUTES = "ModifySubUserAttributes"
ACTION_DELETE_SUB_USERS = "DeleteSubUsers"
ACTION_RESTORE_SUB_USERS = "RestoreSubUsers"
# Notification Center
ACTION_DESCRIBE_NOTIFICATION_CENTER_USER_POSTS = "DescribeNotificationCenterUserPosts"
ACTION_CREATE_NOTIFICATION_LIST = "CreateNotificationList"
ACTION_DESCRIBE_NOTIFICATION_LISTS = "DescribeNotificationLists"
ACTION_MODIFY_NOTIFICATION_LIST_ATTRIBUTES = "ModifyNotificationListAttributes"
ACTION_DELETE_NOTIFICATION_LISTS = "DeleteNotificationLists"
ACTION_CREATE_NOTIFICATION_ITEMS = "CreateNotificationItems"
ACTION_DESCRIBE_NOTIFICATION_ITEMS = "DescribeNotificationItems"
ACTION_DELETE_NOTIFICATION_ITEMS = "DeleteNotificationItems"
ACTION_VERIFY_NOTIFICATION_ITEM = "VerifyNotificationItem"
# zones
ACTION_DESCRIBE_ZONES = "DescribeZones"
# jobs
ACTION_DESCRIBE_JOBS = "DescribeJobs"
# images
ACTION_DESCRIBE_IMAGES = "DescribeImages"
ACTION_CAPTURE_INSTANCE = "CaptureInstance"
ACTION_DELETE_IMAGES = "DeleteImages"
ACTION_MODIFY_IMAGE_ATTRIBUTES = "ModifyImageAttributes"
# instances
ACTION_DESCRIBE_INSTANCES = "DescribeInstances"
ACTION_RUN_INSTANCES = "RunInstances"
ACTION_RUN_INSTANCES_BY_CONFIGURATION = "RunInstancesByConfiguration"
ACTION_TERMINATE_INSTANCES = "TerminateInstances"
ACTION_START_INSTANCES = "StartInstances"
ACTION_RESTART_INSTANCES = "RestartInstances"
ACTION_STOP_INSTANCES = "StopInstances"
ACTION_RESIZE_INSTANCES = "ResizeInstances"
ACTION_RESET_INSTANCES = "ResetInstances"
ACTION_MODIFY_INSTANCE_ATTRIBUTES = "ModifyInstanceAttributes"
ACTION_CLONE_INSTANCES = "CloneInstances"
# instance groups
ACTION_CREATE_INSTANCE_GROUPS = "CreateInstanceGroups"
ACTION_DELETE_INSTANCE_GROUPS = "DeleteInstanceGroups"
ACTION_JOIN_INSTANCE_GROUP = "JoinInstanceGroup"
ACTION_LEAVE_INSTANCE_GROUP = "LeaveInstanceGroup"
ACTION_DESCRIBE_INSTANCE_GROUPS = "DescribeInstanceGroups"
# user data
ACTION_UPLOAD_USERDATA_ATTACHMENT = "UploadUserDataAttachment"
# volumes
ACTION_CLONE_VOLUMES = "CloneVolumes"
ACTION_DESCRIBE_VOLUMES = "DescribeVolumes"
ACTION_CREATE_VOLUMES = "CreateVolumes"
ACTION_DELETE_VOLUMES = "DeleteVolumes"
ACTION_ATTACH_VOLUMES = "AttachVolumes"
ACTION_DETACH_VOLUMES = "DetachVolumes"
ACTION_RESIZE_VOLUMES = "ResizeVolumes"
ACTION_MODIFY_VOLUME_ATTRIBUTES = "ModifyVolumeAttributes"
# key pair
ACTION_DESCRIBE_KEY_PAIRS = "DescribeKeyPairs"
ACTION_CREATE_KEY_PAIR = "CreateKeyPair"
ACTION_DELETE_KEY_PAIRS = "DeleteKeyPairs"
ACTION_ATTACH_KEY_PAIRS = "AttachKeyPairs"
ACTION_DETACH_KEY_PAIRS = "DetachKeyPairs"
ACTION_MODIFY_KEYPAIR_ATTRIBUTES = "ModifyKeyPairAttributes"
# security group
ACTION_DESCRIBE_SECURITY_GROUPS = "DescribeSecurityGroups"
ACTION_CREATE_SECURITY_GROUP = "CreateSecurityGroup"
ACTION_MODIFY_SECURITY_GROUP_ATTRIBUTES = "ModifySecurityGroupAttributes"
ACTION_APPLY_SECURITY_GROUP = "ApplySecurityGroup"
ACTION_REMOVE_SECURITY_GROUP = "RemoveSecurityGroup"
ACTION_DELETE_SECURITY_GROUPS = "DeleteSecurityGroups"
ACTION_DESCRIBE_SECURITY_GROUP_RULES = "DescribeSecurityGroupRules"
ACTION_ADD_SECURITY_GROUP_RULES = "AddSecurityGroupRules"
ACTION_DELETE_SECURITY_GROUP_RULES = "DeleteSecurityGroupRules"
ACTION_MODIFY_SECURITY_GROUP_RULE_ATTRIBUTES = "ModifySecurityGroupRuleAttributes"
ACTION_DESCRIBE_SECURITY_GROUP_IPSETS = "DescribeSecurityGroupIPSets"
ACTION_CREATE_SECURITY_GROUP_IPSET = "CreateSecurityGroupIPSet"
ACTION_DELETE_SECURITY_GROUP_IPSETS = "DeleteSecurityGroupIPSets"
ACTION_MODIFY_SECURITY_GROUP_IPSET_ATTRIBUTES = "ModifySecurityGroupIPSetAttributes"
# vxnets
ACTION_DESCRIBE_VXNETS = "DescribeVxnets"
ACTION_CREATE_VXNETS = "CreateVxnets"
ACTION_DELETE_VXNETS = "DeleteVxnets"
ACTION_JOIN_VXNET = "JoinVxnet"
ACTION_LEAVE_VXNET = "LeaveVxnet"
ACTION_MODIFY_VXNET_ATTRIBUTES = "ModifyVxnetAttributes"
ACTION_DESCRIBE_VXNET_INSTANCES = "DescribeVxnetInstances"
# router
ACTION_CREATE_ROUTERS = "CreateRouters"
ACTION_UPDATE_ROUTERS = "UpdateRouters"
ACTION_DELETE_ROUTERS = "DeleteRouters"
ACTION_JOIN_ROUTER = "JoinRouter"
ACTION_LEAVE_ROUTER = "LeaveRouter"
ACTION_POWEROFF_ROUTERS = "PowerOffRouters"
ACTION_POWERON_ROUTERS = "PowerOnRouters"
ACTION_DESCRIBE_ROUTERS = "DescribeRouters"
ACTION_DESCRIBE_ROUTER_VXNETS = "DescribeRouterVxnets"
ACTION_MODIFY_ROUTER_ATTRIBUTES = "ModifyRouterAttributes"
ACTION_MODIFY_ROUTER_STATIC_ATTRIBUTES = "ModifyRouterStaticAttributes"
ACTION_DESCRIBE_ROUTER_STATICS = "DescribeRouterStatics"
ACTION_ADD_ROUTER_STATICS = "AddRouterStatics"
ACTION_DELETE_ROUTER_STATICS = "DeleteRouterStatics"
ACTION_MODIFY_ROUTER_STATIC_ENTRY_ATTRIBUTES = "ModifyRouterStaticEntryAttributes"
ACTION_DESCRIBE_ROUTER_STATIC_ENTRIES = "DescribeRouterStaticEntries"
ACTION_ADD_ROUTER_STATIC_ENTRIES = "AddRouterStaticEntries"
ACTION_DELETE_ROUTER_STATIC_ENTRIES = "DeleteRouterStaticEntries"
# eip
ACTION_ASSOCIATE_EIP = "AssociateEip"
ACTION_DISSOCIATE_EIPS = "DissociateEips"
ACTION_ALLOCATE_EIPS = "AllocateEips"
ACTION_RELEASE_EIPS = "ReleaseEips"
ACTION_DESCRIBE_EIPS = "DescribeEips"
ACTION_MODIFY_EIP_ATTRIBUTES = "ModifyEipAttributes"
ACTION_CHANGE_EIPS_BANDWIDTH = "ChangeEipsBandwidth"
ACTION_CHANGE_EIPS_BILLING_MODE = "ChangeEipsBillingMode"
# dns alias
ACTION_DESCRIBE_DNS_ALIASES = "DescribeDNSAliases"
ACTION_ASSOCIATE_DNS_ALIAS = "AssociateDNSAlias"
ACTION_DISSOCIATE_DNS_ALIASES = "DissociateDNSAliases"
ACTION_GET_DNS_LABEL = "GetDNSLabel"
# lb
ACTION_DESCRIBE_LOADBALANCERS = "DescribeLoadBalancers"
ACTION_CREATE_LOADBALANCER = "CreateLoadBalancer"
ACTION_DELETE_LOADBALANCERS = "DeleteLoadBalancers"
ACTION_ASSOCIATE_EIPS_TO_LOADBALANCER = "AssociateEipsToLoadBalancer"
ACTION_DISSOCIATE_EIPS_FROM_LOADBALANCER = "DissociateEipsFromLoadBalancer"
ACTION_UPDATE_LOADBALANCERS = "UpdateLoadBalancers"
ACTION_STOP_LOADBALANCERS = "StopLoadBalancers"
ACTION_START_LOADBALANCERS = "StartLoadBalancers"
ACTION_MODIFY_LOADBALANCER_ATTRIBUTES = "ModifyLoadBalancerAttributes"
ACTION_DESCRIBE_LOADBALANCER_LISTENERS = "DescribeLoadBalancerListeners"
ACTION_ADD_LOADBALANCER_LISTENERS = "AddLoadBalancerListeners"
ACTION_DELETE_LOADBALANCER_LISTENERS = "DeleteLoadBalancerListeners"
ACTION_MODIFY_LOADBALANCER_LISTENER_ATTRIBUTES = "ModifyLoadBalancerListenerAttributes"
ACTION_ADD_LOADBALANCER_BACKENDS = "AddLoadBalancerBackends"
ACTION_DELETE_LOADBALANCER_BACKENDS = "DeleteLoadBalancerBackends"
ACTION_MODIFY_LOADBALANCER_BACKEND_ATTRIBUTES = "ModifyLoadBalancerBackendAttributes"
ACTION_DESCRIBE_LOADBALANCER_BACKENDS = "DescribeLoadBalancerBackends"
ACTION_CREATE_LOADBALANCER_POLICY = "CreateLoadBalancerPolicy"
ACTION_MODIFY_LOADBALANCER_POLICY_ATTRIBUTES = "ModifyLoadBalancerPolicyAttributes"
ACTION_DESCRIBE_LOADBALANCER_POLICIES = "DescribeLoadBalancerPolicies"
ACTION_DELETE_LOADBALANCER_POLICIES = "DeleteLoadBalancerPolicies"
ACTION_APPLY_LOADBALANCER_POLICY = "ApplyLoadBalancerPolicy"
ACTION_DESCRIBE_LOADBALANCER_POLICY_RULES = "DescribeLoadBalancerPolicyRules"
ACTION_ADD_LOADBALANCER_POLICY_RULES = "AddLoadBalancerPolicyRules"
ACTION_MODIFY_LOADBALANCER_POLICY_RULE_ATTRIBUTES = "ModifyLoadBalancerPolicyRuleAttributes"
ACTION_DELETE_LOADBALANCER_POLICY_RULES = "DeleteLoadBalancerPolicyRules"
ACTION_CREATE_SERVER_CERTIFICATE = "CreateServerCertificate"
ACTION_DESCRIBE_SERVER_CERTIFICATES = "DescribeServerCertificates"
ACTION_MODIFY_SERVER_CERTIFICATE_ATTRIBUTES = "ModifyServerCertificateAttributes"
ACTION_DELETE_SERVER_CERTIFICATES = "DeleteServerCertificates"
# monitor
ACTION_GET_MONITOR = "GetMonitor"
ACTION_GET_LOADBALANCER_MONITOR = "GetLoadBalancerMonitor"
# snapshot
ACTION_CREATE_SNAPSHOTS = "CreateSnapshots"
ACTION_DELETE_SNAPSHOTS = "DeleteSnapshots"
ACTION_APPLY_SNAPSHOTS = "ApplySnapshots"
ACTION_DESCRIBE_SNAPSHOTS = "DescribeSnapshots"
ACTION_MODIFY_SNAPSHOT_ATTRIBUTES = "ModifySnapshotAttributes"
ACTION_CAPTURE_INSTANCE_FROM_SNAPSHOT = "CaptureInstanceFromSnapshot"
ACTION_CREATE_VOLUME_FROM_SNAPSHOT = "CreateVolumeFromSnapshot"
# rdb
ACTION_DESCRIBE_RDBS = "DescribeRDBs"
ACTION_CREATE_RDB = "CreateRDB"
ACTION_RESIZE_RDBS = "ResizeRDBs"
ACTION_START_RDBS = "StartRDBs"
ACTION_STOP_RDBS = "StopRDBs"
# mongo
ACTION_DESCRIBE_MONGOS = "DescribeMongos"
ACTION_RESIZE_MONGOS = "ResizeMongos"
ACTION_START_MONGOS = "StartMongos"
ACTION_STOP_MONGOS = "StopMongos"
# cache
ACTION_DESCRIBE_CACHES = "DescribeCaches"
ACTION_CREATE_CACHE = "CreateCache"
ACTION_RESIZE_CACHES = "ResizeCaches"
ACTION_START_CACHES = "StartCaches"
ACTION_STOP_CACHES = "StopCaches"
# spark
ACTION_DESCRIBE_SPARKS = "DescribeSparks"
ACTION_START_SPARKS = "StartSparks"
ACTION_STOP_SPARKS = "StopSparks"
ACTION_ADD_SPARK_NODES = "AddSparkNodes"
ACTION_DELETE_SPARK_NODES = "DeleteSparkNodes"
ACTION_CREATE_SPARK = "CreateSpark"
ACTION_DELETE_SPARKS = "DeleteSparks"
# hadoop
ACTION_DESCRIBE_HADOOPS = "DescribeHadoops"
ACTION_START_HADOOPS = "StartHadoops"
ACTION_STOP_HADOOPS = "StopHadoops"
ACTION_ADD_HADOOP_NODES = "AddHadoopNodes"
ACTION_DELETE_HADOOP_NODES = "DeleteHadoopNodes"
ACTION_CREATE_HADOOP = "CreateHadoop"
ACTION_DELETE_HADOOPS = "DeleteHadoops"
# zk
ACTION_DESCRIBE_ZOOKEEPERS = "DescribeZookeepers"
ACTION_START_ZOOKEEPERS = "StartZookeepers"
ACTION_STOP_ZOOKEEPERS = "StopZookeepers"
# elasticsearch
ACTION_DESCRIBE_ELASTICSEARCHS = "DescribeElasticsearchs"
ACTION_START_ELASTICSEARCHS = "StartElasticsearchs"
ACTION_STOP_ELASTICSEARCHS = "StopElasticsearchs"
# queue
ACTION_DESCRIBE_QUEUES = "DescribeQueues"
ACTION_START_QUEUES = "StartQueues"
ACTION_STOP_QUEUES = "StopQueues"
# tag
ACTION_DESCRIBE_TAGS = "DescribeTags"
ACTION_CREATE_TAG = "CreateTag"
ACTION_DELETE_TAGS = "DeleteTags"
ACTION_MODIFY_TAG_ATTRIBUTES = "ModifyTagAttributes"
ACTION_ATTACH_TAGS = "AttachTags"
ACTION_DETACH_TAGS = "DetachTags"
# nic
ACTION_DESCRIBE_NICS = "DescribeNics"
ACTION_CREATE_NICS = "CreateNics"
ACTION_ATTACH_NICS = "AttachNics"
ACTION_DETACH_NICS = "DetachNics"
ACTION_MODIFY_NIC_ATTRIBUTES = "ModifyNicAttributes"
ACTION_DELETE_NICS = "DeleteNics"
# S2
ACTION_CREATE_S2_SERVER = "CreateS2Server"
ACTION_DESCRIBE_S2_SERVERS = "DescribeS2Servers"
ACTION_MODIFY_S2_SERVER = "ModifyS2ServerAttributes"
ACTION_RESIZE_S2_SERVERS = "ResizeS2Servers"
ACTION_DELETE_S2_SERVERS = "DeleteS2Servers"
ACTION_POWERON_S2_SERVERS = "PowerOnS2Servers"
ACTION_POWEROFF_S2_SERVERS = "PowerOffS2Servers"
ACTION_UPDATE_S2_SERVERS = "UpdateS2Servers"
ACTION_CHANGE_S2_SERVER_VXNET = "ChangeS2ServerVxnet"
ACTION_CREATE_S2_SHARED_TARGET = "CreateS2SharedTarget"
ACTION_DESCRIBE_S2_SHARED_TARGETS = "DescribeS2SharedTargets"
ACTION_DELETE_S2_SHARED_TARGETS = "DeleteS2SharedTargets"
ACTION_ENABLE_S2_SHARED_TARGETS = "EnableS2SharedTargets"
ACTION_DISABLE_S2_SHARED_TARGETS = "DisableS2SharedTargets"
ACTION_MODIFY_S2_SHARED_TARGET = "ModifyS2SharedTargetAttributes"
ACTION_ATTACH_TO_S2_SHARED_TARGET = "AttachToS2SharedTarget"
ACTION_DETACH_FROM_S2_SHARED_TARGET = "DetachFromS2SharedTarget"
ACTION_DESCRIBE_S2_DEFAULT_PARAMETERS = "DescribeS2DefaultParameters"
ACTION_CREATE_S2_GROUP = "CreateS2Group"
ACTION_DESCRIBE_S2_GROUPS = "DescribeS2Groups"
ACTION_MODIFY_S2_GROUP = "ModifyS2Group"
ACTION_DELETE_S2_GROUPS = "DeleteS2Groups"
ACTION_CREATE_S2_ACCOUNT = "CreateS2Account"
ACTION_DESCRIBE_S2_ACCOUNTS = "DescribeS2Accounts"
ACTION_MODIFY_S2_ACCOUNT = "ModifyS2Account"
ACTION_DELETE_S2_ACCOUNTS = "DeleteS2Accounts"
ACTION_ASSOCIATE_S2_ACCOUNT_GROUP = "AssociateS2AccountGroup"
ACTION_DISSOCIATE_S2_ACCOUNT_GROUP = "DissociateS2AccountGroup"
# Alarm
ACTION_DESCRIBE_ALARM_POLICIES = "DescribeAlarmPolicies"
ACTION_CREATE_ALARM_POLICY = "CreateAlarmPolicy"
ACTION_MODIFY_ALARM_POLICY_ATTRIBUTES = "ModifyAlarmPolicyAttributes"
ACTION_DELETE_ALARM_POLICIES = "DeleteAlarmPolicies"
ACTION_DESCRIBE_ALARM_POLICY_RULES = "DescribeAlarmPolicyRules"
ACTION_ADD_ALARM_POLICY_RULES = "AddAlarmPolicyRules"
ACTION_MODIFY_ALARM_POLICY_RULE_ATTRIBUTES = "ModifyAlarmPolicyRuleAttributes"
ACTION_DELETE_ALARM_POLICY_RULES = "DeleteAlarmPolicyRules"
ACTION_DESCRIBE_ALARM_POLICY_ACTIONS = "DescribeAlarmPolicyActions"
ACTION_ADD_ALARM_POLICY_ACTIONS = "AddAlarmPolicyActions"
ACTION_MODIFY_ALARM_POLICY_ACTION_ATTRIBUTES = "ModifyAlarmPolicyActionAttributes"
ACTION_DELETE_ALARM_POLICY_ACTIONS = "DeleteAlarmPolicyActions"
ACTION_ASSOCIATE_ALARM_POLICY = "AssociateAlarmPolicy"
ACTION_DISSOCIATE_ALARM_POLICY = "DissociateAlarmPolicy"
ACTION_APPLY_ALARM_POLICY = "ApplyAlarmPolicy"
ACTION_DESCRIBE_ALARMS = "DescribeAlarms"
ACTION_DESCRIBE_ALARM_HISTORY = "DescribeAlarmHistory"
# Billing
ACTION_GET_BALANCE = "GetBalance"
ACTION_GET_LEASE_INFO = "GetLeaseInfo"
# Collaboration
ACTION_DESCRIBE_SHARED_RESOURCE_GROUPS = "DescribeSharedResourceGroups"
ACTION_DESCRIBE_RESOURCE_GROUPS = "DescribeResourceGroups"
ACTION_CREATE_RESOURCE_GROUPS = "CreateResourceGroups"
ACTION_MODIFY_RESOURCE_GROUP_ATTRIBUTES = "ModifyResourceGroupAttributes"
ACTION_DELETE_RESOURCE_GROUPS = "DeleteResourceGroups"
ACTION_DESCRIBE_RESOURCE_GROUP_ITEMS = "DescribeResourceGroupItems"
ACTION_ADD_RESOURCE_GROUP_ITEMS = "AddResourceGroupItems"
ACTION_DELETE_RESOURCE_GROUP_ITEMS = "DeleteResourceGroupItems"
ACTION_DESCRIBE_USER_GROUPS = "DescribeUserGroups"
ACTION_CREATE_USER_GROUPS = "CreateUserGroups"
ACTION_MODIFY_USER_GROUP_ATTRIBUTES = "ModifyUserGroupAttributes"
ACTION_DELETE_USER_GROUPS = "DeleteUserGroups"
ACTION_DESCRIBE_USER_GROUP_MEMBERS = "DescribeUserGroupMembers"
ACTION_ADD_USER_GROUP_MEMBERS = "AddUserGroupMembers"
ACTION_MODIFY_USER_GROUP_MEMBER_ATTRIBUTES = "ModifyUserGroupMemberAttributes"
ACTION_DELETE_USER_GROUP_MEMBERS = "DeleteUserGroupMembers"
ACTION_DESCRIBE_GROUP_ROLES = "DescribeGroupRoles"
ACTION_CREATE_GROUP_ROLES = "CreateGroupRoles"
ACTION_MODIFY_GROUP_ROLE_ATTRIBUTES = "ModifyGroupRoleAttributes"
ACTION_DELETE_GROUP_ROLES = "DeleteGroupRoles"
ACTION_DESCRIBE_GROUP_ROLE_RULES = "DescribeGroupRoleRules"
ACTION_ADD_GROUP_ROLE_RULES = "AddGroupRoleRules"
ACTION_MODIFY_GROUP_ROLE_RULE_ATTRIBUTES = "ModifyGroupRoleRuleAttributes"
ACTION_DELETE_GROUP_ROLE_RULES = "DeleteGroupRoleRules"
ACTION_GRANT_RESOURCE_GROUPS_TO_USER_GROUPS = "GrantResourceGroupsToUserGroups"
ACTION_REVOKE_RESOURCE_GROUPS_FROM_USER_GROUPS = "RevokeResourceGroupsFromUserGroups"
ACTION_DESCRIBE_RESOURCE_USER_GROUPS = "DescribeResourceUserGroups"
# sdwan
ACTION_DESCRIBE_WAN_ACCESS = "DescribeWanAccesss"
ACTION_CHANGE_WAN_ACCESS_BANDWIDTH = "ChangeWanAccessBandwidth"
ACTION_UPGRADE_WAN_ACCESS = "UpgradeWanAccess"
ACTION_GET_WAN_MONITOR = "GetWanMonitor"
ACTION_GET_WAN_INFO = "GetWanInfo"
# migrate
ACTION_MIGRATE_RESOURCES = "MigrateResources"
# VPC Border
ACTION_CREATE_VPC_BORDERS = "CreateVpcBorders"
ACTION_DELETE_VPC_BORDERS = "DeleteVpcBorders"
ACTION_DESCRIBE_VPC_BORDERS = "DescribeVpcBorders"
ACTION_JOIN_BORDER = "JoinBorder"
ACTION_LEAVE_BORDER = "LeaveBorder"
ACTION_CONFIG_BORDER = "ConfigBorder"
ACTION_MODIFY_BORDER_ATTRIBUTES = "ModifyBorderAttributes"
ACTION_DESCRIBE_BORDER_VXNETS = "DescribeBorderVxnets"
ACTION_ASSOCIATE_BORDER = "AssociateBorder"
ACTION_DISSOCIATE_BORDER = "DissociateBorder"
ACTION_DESCRIBE_BORDER_STATICS = "DescribeBorderStatics"
ACTION_ADD_BORDER_STATICS = "AddBorderStatics"
ACTION_DELETE_BORDER_STATICS = "DeleteBorderStatics"
ACTION_MODIFY_BORDER_STATIC_ATTRIBUTES = "ModifyBorderStaticAttributes"
ACTION_CANCEL_BORDER_STATIC_CHANGES = "CancelBorderStaticChanges"
# --------- Constants for resource ---------
# sg
DIRECTION_EGRESS = 1
DIRECTION_INGRESS = 0
# vxnet
VXNET_TYPE_MANAGED = 1
VXNET_TYPE_UNMANAGED = 0
# lb
BALANCE_ROUNDROBIN = "roundrobin"
BALANCE_LEASTCONN = "leastconn"
HEADER_X_FORWARD_FOR = 1
HEADER_QC_LBID = 2
HEADER_QC_LBIP = 4
LB_TYPE_MAXCONN_5k = 0
LB_TYPE_MAXCONN_20k = 1
LB_TYPE_MAXCONN_40k = 2
LB_TYPE_MAXCONN_100k = 3
LB_TYPE_MAXCONN_200k = 4
LB_TYPE_MAXCONN_500k = 5
# eip
EIP_BILLING_MODE_BANDWIDTH = "bandwidth"
EIP_BILLING_MODE_TRAFFIC = "traffic"
# cluster
ACTION_START_CLUSTERS = "StartClusters"
ACTION_STOP_CLUSTERS = "StopClusters"
ACTION_RESIZE_CLUSTER = "ResizeCluster"
ACTION_DESCRIBE_CLUSTERS = "DescribeClusters"
ACTION_ADD_CLUSTER_NODES = "AddClusterNodes"
ACTION_DELETE_CLUSTER_NODES = "DeleteClusterNodes"
ACTION_DELETE_CLUSTERS = "DeleteClusters"
ACTION_DEPLOY_APP_VERSION = "DeployAppVersion"
| 41.004785 | 92 | 0.857818 |
ACTION_DESCRIBE_ACCESS_KEYS = "DescribeAccessKeys"
ACTION_DESCRIBE_SUB_USERS = "DescribeSubUsers"
ACTION_CREATE_SUB_USER = "CreateSubUser"
ACTION_MODIFY_SUB_USER_ATTRIBUTES = "ModifySubUserAttributes"
ACTION_DELETE_SUB_USERS = "DeleteSubUsers"
ACTION_RESTORE_SUB_USERS = "RestoreSubUsers"
ACTION_DESCRIBE_NOTIFICATION_CENTER_USER_POSTS = "DescribeNotificationCenterUserPosts"
ACTION_CREATE_NOTIFICATION_LIST = "CreateNotificationList"
ACTION_DESCRIBE_NOTIFICATION_LISTS = "DescribeNotificationLists"
ACTION_MODIFY_NOTIFICATION_LIST_ATTRIBUTES = "ModifyNotificationListAttributes"
ACTION_DELETE_NOTIFICATION_LISTS = "DeleteNotificationLists"
ACTION_CREATE_NOTIFICATION_ITEMS = "CreateNotificationItems"
ACTION_DESCRIBE_NOTIFICATION_ITEMS = "DescribeNotificationItems"
ACTION_DELETE_NOTIFICATION_ITEMS = "DeleteNotificationItems"
ACTION_VERIFY_NOTIFICATION_ITEM = "VerifyNotificationItem"
ACTION_DESCRIBE_ZONES = "DescribeZones"
ACTION_DESCRIBE_JOBS = "DescribeJobs"
ACTION_DESCRIBE_IMAGES = "DescribeImages"
ACTION_CAPTURE_INSTANCE = "CaptureInstance"
ACTION_DELETE_IMAGES = "DeleteImages"
ACTION_MODIFY_IMAGE_ATTRIBUTES = "ModifyImageAttributes"
ACTION_DESCRIBE_INSTANCES = "DescribeInstances"
ACTION_RUN_INSTANCES = "RunInstances"
ACTION_RUN_INSTANCES_BY_CONFIGURATION = "RunInstancesByConfiguration"
ACTION_TERMINATE_INSTANCES = "TerminateInstances"
ACTION_START_INSTANCES = "StartInstances"
ACTION_RESTART_INSTANCES = "RestartInstances"
ACTION_STOP_INSTANCES = "StopInstances"
ACTION_RESIZE_INSTANCES = "ResizeInstances"
ACTION_RESET_INSTANCES = "ResetInstances"
ACTION_MODIFY_INSTANCE_ATTRIBUTES = "ModifyInstanceAttributes"
ACTION_CLONE_INSTANCES = "CloneInstances"
ACTION_CREATE_INSTANCE_GROUPS = "CreateInstanceGroups"
ACTION_DELETE_INSTANCE_GROUPS = "DeleteInstanceGroups"
ACTION_JOIN_INSTANCE_GROUP = "JoinInstanceGroup"
ACTION_LEAVE_INSTANCE_GROUP = "LeaveInstanceGroup"
ACTION_DESCRIBE_INSTANCE_GROUPS = "DescribeInstanceGroups"
ACTION_UPLOAD_USERDATA_ATTACHMENT = "UploadUserDataAttachment"
ACTION_CLONE_VOLUMES = "CloneVolumes"
ACTION_DESCRIBE_VOLUMES = "DescribeVolumes"
ACTION_CREATE_VOLUMES = "CreateVolumes"
ACTION_DELETE_VOLUMES = "DeleteVolumes"
ACTION_ATTACH_VOLUMES = "AttachVolumes"
ACTION_DETACH_VOLUMES = "DetachVolumes"
ACTION_RESIZE_VOLUMES = "ResizeVolumes"
ACTION_MODIFY_VOLUME_ATTRIBUTES = "ModifyVolumeAttributes"
ACTION_DESCRIBE_KEY_PAIRS = "DescribeKeyPairs"
ACTION_CREATE_KEY_PAIR = "CreateKeyPair"
ACTION_DELETE_KEY_PAIRS = "DeleteKeyPairs"
ACTION_ATTACH_KEY_PAIRS = "AttachKeyPairs"
ACTION_DETACH_KEY_PAIRS = "DetachKeyPairs"
ACTION_MODIFY_KEYPAIR_ATTRIBUTES = "ModifyKeyPairAttributes"
ACTION_DESCRIBE_SECURITY_GROUPS = "DescribeSecurityGroups"
ACTION_CREATE_SECURITY_GROUP = "CreateSecurityGroup"
ACTION_MODIFY_SECURITY_GROUP_ATTRIBUTES = "ModifySecurityGroupAttributes"
ACTION_APPLY_SECURITY_GROUP = "ApplySecurityGroup"
ACTION_REMOVE_SECURITY_GROUP = "RemoveSecurityGroup"
ACTION_DELETE_SECURITY_GROUPS = "DeleteSecurityGroups"
ACTION_DESCRIBE_SECURITY_GROUP_RULES = "DescribeSecurityGroupRules"
ACTION_ADD_SECURITY_GROUP_RULES = "AddSecurityGroupRules"
ACTION_DELETE_SECURITY_GROUP_RULES = "DeleteSecurityGroupRules"
ACTION_MODIFY_SECURITY_GROUP_RULE_ATTRIBUTES = "ModifySecurityGroupRuleAttributes"
ACTION_DESCRIBE_SECURITY_GROUP_IPSETS = "DescribeSecurityGroupIPSets"
ACTION_CREATE_SECURITY_GROUP_IPSET = "CreateSecurityGroupIPSet"
ACTION_DELETE_SECURITY_GROUP_IPSETS = "DeleteSecurityGroupIPSets"
ACTION_MODIFY_SECURITY_GROUP_IPSET_ATTRIBUTES = "ModifySecurityGroupIPSetAttributes"
ACTION_DESCRIBE_VXNETS = "DescribeVxnets"
ACTION_CREATE_VXNETS = "CreateVxnets"
ACTION_DELETE_VXNETS = "DeleteVxnets"
ACTION_JOIN_VXNET = "JoinVxnet"
ACTION_LEAVE_VXNET = "LeaveVxnet"
ACTION_MODIFY_VXNET_ATTRIBUTES = "ModifyVxnetAttributes"
ACTION_DESCRIBE_VXNET_INSTANCES = "DescribeVxnetInstances"
ACTION_CREATE_ROUTERS = "CreateRouters"
ACTION_UPDATE_ROUTERS = "UpdateRouters"
ACTION_DELETE_ROUTERS = "DeleteRouters"
ACTION_JOIN_ROUTER = "JoinRouter"
ACTION_LEAVE_ROUTER = "LeaveRouter"
ACTION_POWEROFF_ROUTERS = "PowerOffRouters"
ACTION_POWERON_ROUTERS = "PowerOnRouters"
ACTION_DESCRIBE_ROUTERS = "DescribeRouters"
ACTION_DESCRIBE_ROUTER_VXNETS = "DescribeRouterVxnets"
ACTION_MODIFY_ROUTER_ATTRIBUTES = "ModifyRouterAttributes"
ACTION_MODIFY_ROUTER_STATIC_ATTRIBUTES = "ModifyRouterStaticAttributes"
ACTION_DESCRIBE_ROUTER_STATICS = "DescribeRouterStatics"
ACTION_ADD_ROUTER_STATICS = "AddRouterStatics"
ACTION_DELETE_ROUTER_STATICS = "DeleteRouterStatics"
ACTION_MODIFY_ROUTER_STATIC_ENTRY_ATTRIBUTES = "ModifyRouterStaticEntryAttributes"
ACTION_DESCRIBE_ROUTER_STATIC_ENTRIES = "DescribeRouterStaticEntries"
ACTION_ADD_ROUTER_STATIC_ENTRIES = "AddRouterStaticEntries"
ACTION_DELETE_ROUTER_STATIC_ENTRIES = "DeleteRouterStaticEntries"
ACTION_ASSOCIATE_EIP = "AssociateEip"
ACTION_DISSOCIATE_EIPS = "DissociateEips"
ACTION_ALLOCATE_EIPS = "AllocateEips"
ACTION_RELEASE_EIPS = "ReleaseEips"
ACTION_DESCRIBE_EIPS = "DescribeEips"
ACTION_MODIFY_EIP_ATTRIBUTES = "ModifyEipAttributes"
ACTION_CHANGE_EIPS_BANDWIDTH = "ChangeEipsBandwidth"
ACTION_CHANGE_EIPS_BILLING_MODE = "ChangeEipsBillingMode"
ACTION_DESCRIBE_DNS_ALIASES = "DescribeDNSAliases"
ACTION_ASSOCIATE_DNS_ALIAS = "AssociateDNSAlias"
ACTION_DISSOCIATE_DNS_ALIASES = "DissociateDNSAliases"
ACTION_GET_DNS_LABEL = "GetDNSLabel"
ACTION_DESCRIBE_LOADBALANCERS = "DescribeLoadBalancers"
ACTION_CREATE_LOADBALANCER = "CreateLoadBalancer"
ACTION_DELETE_LOADBALANCERS = "DeleteLoadBalancers"
ACTION_ASSOCIATE_EIPS_TO_LOADBALANCER = "AssociateEipsToLoadBalancer"
ACTION_DISSOCIATE_EIPS_FROM_LOADBALANCER = "DissociateEipsFromLoadBalancer"
ACTION_UPDATE_LOADBALANCERS = "UpdateLoadBalancers"
ACTION_STOP_LOADBALANCERS = "StopLoadBalancers"
ACTION_START_LOADBALANCERS = "StartLoadBalancers"
ACTION_MODIFY_LOADBALANCER_ATTRIBUTES = "ModifyLoadBalancerAttributes"
ACTION_DESCRIBE_LOADBALANCER_LISTENERS = "DescribeLoadBalancerListeners"
ACTION_ADD_LOADBALANCER_LISTENERS = "AddLoadBalancerListeners"
ACTION_DELETE_LOADBALANCER_LISTENERS = "DeleteLoadBalancerListeners"
ACTION_MODIFY_LOADBALANCER_LISTENER_ATTRIBUTES = "ModifyLoadBalancerListenerAttributes"
ACTION_ADD_LOADBALANCER_BACKENDS = "AddLoadBalancerBackends"
ACTION_DELETE_LOADBALANCER_BACKENDS = "DeleteLoadBalancerBackends"
ACTION_MODIFY_LOADBALANCER_BACKEND_ATTRIBUTES = "ModifyLoadBalancerBackendAttributes"
ACTION_DESCRIBE_LOADBALANCER_BACKENDS = "DescribeLoadBalancerBackends"
ACTION_CREATE_LOADBALANCER_POLICY = "CreateLoadBalancerPolicy"
ACTION_MODIFY_LOADBALANCER_POLICY_ATTRIBUTES = "ModifyLoadBalancerPolicyAttributes"
ACTION_DESCRIBE_LOADBALANCER_POLICIES = "DescribeLoadBalancerPolicies"
ACTION_DELETE_LOADBALANCER_POLICIES = "DeleteLoadBalancerPolicies"
ACTION_APPLY_LOADBALANCER_POLICY = "ApplyLoadBalancerPolicy"
ACTION_DESCRIBE_LOADBALANCER_POLICY_RULES = "DescribeLoadBalancerPolicyRules"
ACTION_ADD_LOADBALANCER_POLICY_RULES = "AddLoadBalancerPolicyRules"
ACTION_MODIFY_LOADBALANCER_POLICY_RULE_ATTRIBUTES = "ModifyLoadBalancerPolicyRuleAttributes"
ACTION_DELETE_LOADBALANCER_POLICY_RULES = "DeleteLoadBalancerPolicyRules"
ACTION_CREATE_SERVER_CERTIFICATE = "CreateServerCertificate"
ACTION_DESCRIBE_SERVER_CERTIFICATES = "DescribeServerCertificates"
ACTION_MODIFY_SERVER_CERTIFICATE_ATTRIBUTES = "ModifyServerCertificateAttributes"
ACTION_DELETE_SERVER_CERTIFICATES = "DeleteServerCertificates"
ACTION_GET_MONITOR = "GetMonitor"
ACTION_GET_LOADBALANCER_MONITOR = "GetLoadBalancerMonitor"
ACTION_CREATE_SNAPSHOTS = "CreateSnapshots"
ACTION_DELETE_SNAPSHOTS = "DeleteSnapshots"
ACTION_APPLY_SNAPSHOTS = "ApplySnapshots"
ACTION_DESCRIBE_SNAPSHOTS = "DescribeSnapshots"
ACTION_MODIFY_SNAPSHOT_ATTRIBUTES = "ModifySnapshotAttributes"
ACTION_CAPTURE_INSTANCE_FROM_SNAPSHOT = "CaptureInstanceFromSnapshot"
ACTION_CREATE_VOLUME_FROM_SNAPSHOT = "CreateVolumeFromSnapshot"
ACTION_DESCRIBE_RDBS = "DescribeRDBs"
ACTION_CREATE_RDB = "CreateRDB"
ACTION_RESIZE_RDBS = "ResizeRDBs"
ACTION_START_RDBS = "StartRDBs"
ACTION_STOP_RDBS = "StopRDBs"
ACTION_DESCRIBE_MONGOS = "DescribeMongos"
ACTION_RESIZE_MONGOS = "ResizeMongos"
ACTION_START_MONGOS = "StartMongos"
ACTION_STOP_MONGOS = "StopMongos"
ACTION_DESCRIBE_CACHES = "DescribeCaches"
ACTION_CREATE_CACHE = "CreateCache"
ACTION_RESIZE_CACHES = "ResizeCaches"
ACTION_START_CACHES = "StartCaches"
ACTION_STOP_CACHES = "StopCaches"
ACTION_DESCRIBE_SPARKS = "DescribeSparks"
ACTION_START_SPARKS = "StartSparks"
ACTION_STOP_SPARKS = "StopSparks"
ACTION_ADD_SPARK_NODES = "AddSparkNodes"
ACTION_DELETE_SPARK_NODES = "DeleteSparkNodes"
ACTION_CREATE_SPARK = "CreateSpark"
ACTION_DELETE_SPARKS = "DeleteSparks"
ACTION_DESCRIBE_HADOOPS = "DescribeHadoops"
ACTION_START_HADOOPS = "StartHadoops"
ACTION_STOP_HADOOPS = "StopHadoops"
ACTION_ADD_HADOOP_NODES = "AddHadoopNodes"
ACTION_DELETE_HADOOP_NODES = "DeleteHadoopNodes"
ACTION_CREATE_HADOOP = "CreateHadoop"
ACTION_DELETE_HADOOPS = "DeleteHadoops"
ACTION_DESCRIBE_ZOOKEEPERS = "DescribeZookeepers"
ACTION_START_ZOOKEEPERS = "StartZookeepers"
ACTION_STOP_ZOOKEEPERS = "StopZookeepers"
ACTION_DESCRIBE_ELASTICSEARCHS = "DescribeElasticsearchs"
ACTION_START_ELASTICSEARCHS = "StartElasticsearchs"
ACTION_STOP_ELASTICSEARCHS = "StopElasticsearchs"
ACTION_DESCRIBE_QUEUES = "DescribeQueues"
ACTION_START_QUEUES = "StartQueues"
ACTION_STOP_QUEUES = "StopQueues"
ACTION_DESCRIBE_TAGS = "DescribeTags"
ACTION_CREATE_TAG = "CreateTag"
ACTION_DELETE_TAGS = "DeleteTags"
ACTION_MODIFY_TAG_ATTRIBUTES = "ModifyTagAttributes"
ACTION_ATTACH_TAGS = "AttachTags"
ACTION_DETACH_TAGS = "DetachTags"
ACTION_DESCRIBE_NICS = "DescribeNics"
ACTION_CREATE_NICS = "CreateNics"
ACTION_ATTACH_NICS = "AttachNics"
ACTION_DETACH_NICS = "DetachNics"
ACTION_MODIFY_NIC_ATTRIBUTES = "ModifyNicAttributes"
ACTION_DELETE_NICS = "DeleteNics"
ACTION_CREATE_S2_SERVER = "CreateS2Server"
ACTION_DESCRIBE_S2_SERVERS = "DescribeS2Servers"
ACTION_MODIFY_S2_SERVER = "ModifyS2ServerAttributes"
ACTION_RESIZE_S2_SERVERS = "ResizeS2Servers"
ACTION_DELETE_S2_SERVERS = "DeleteS2Servers"
ACTION_POWERON_S2_SERVERS = "PowerOnS2Servers"
ACTION_POWEROFF_S2_SERVERS = "PowerOffS2Servers"
ACTION_UPDATE_S2_SERVERS = "UpdateS2Servers"
ACTION_CHANGE_S2_SERVER_VXNET = "ChangeS2ServerVxnet"
ACTION_CREATE_S2_SHARED_TARGET = "CreateS2SharedTarget"
ACTION_DESCRIBE_S2_SHARED_TARGETS = "DescribeS2SharedTargets"
ACTION_DELETE_S2_SHARED_TARGETS = "DeleteS2SharedTargets"
ACTION_ENABLE_S2_SHARED_TARGETS = "EnableS2SharedTargets"
ACTION_DISABLE_S2_SHARED_TARGETS = "DisableS2SharedTargets"
ACTION_MODIFY_S2_SHARED_TARGET = "ModifyS2SharedTargetAttributes"
ACTION_ATTACH_TO_S2_SHARED_TARGET = "AttachToS2SharedTarget"
ACTION_DETACH_FROM_S2_SHARED_TARGET = "DetachFromS2SharedTarget"
ACTION_DESCRIBE_S2_DEFAULT_PARAMETERS = "DescribeS2DefaultParameters"
ACTION_CREATE_S2_GROUP = "CreateS2Group"
ACTION_DESCRIBE_S2_GROUPS = "DescribeS2Groups"
ACTION_MODIFY_S2_GROUP = "ModifyS2Group"
ACTION_DELETE_S2_GROUPS = "DeleteS2Groups"
ACTION_CREATE_S2_ACCOUNT = "CreateS2Account"
ACTION_DESCRIBE_S2_ACCOUNTS = "DescribeS2Accounts"
ACTION_MODIFY_S2_ACCOUNT = "ModifyS2Account"
ACTION_DELETE_S2_ACCOUNTS = "DeleteS2Accounts"
ACTION_ASSOCIATE_S2_ACCOUNT_GROUP = "AssociateS2AccountGroup"
ACTION_DISSOCIATE_S2_ACCOUNT_GROUP = "DissociateS2AccountGroup"
ACTION_DESCRIBE_ALARM_POLICIES = "DescribeAlarmPolicies"
ACTION_CREATE_ALARM_POLICY = "CreateAlarmPolicy"
ACTION_MODIFY_ALARM_POLICY_ATTRIBUTES = "ModifyAlarmPolicyAttributes"
ACTION_DELETE_ALARM_POLICIES = "DeleteAlarmPolicies"
ACTION_DESCRIBE_ALARM_POLICY_RULES = "DescribeAlarmPolicyRules"
ACTION_ADD_ALARM_POLICY_RULES = "AddAlarmPolicyRules"
ACTION_MODIFY_ALARM_POLICY_RULE_ATTRIBUTES = "ModifyAlarmPolicyRuleAttributes"
ACTION_DELETE_ALARM_POLICY_RULES = "DeleteAlarmPolicyRules"
ACTION_DESCRIBE_ALARM_POLICY_ACTIONS = "DescribeAlarmPolicyActions"
ACTION_ADD_ALARM_POLICY_ACTIONS = "AddAlarmPolicyActions"
ACTION_MODIFY_ALARM_POLICY_ACTION_ATTRIBUTES = "ModifyAlarmPolicyActionAttributes"
ACTION_DELETE_ALARM_POLICY_ACTIONS = "DeleteAlarmPolicyActions"
ACTION_ASSOCIATE_ALARM_POLICY = "AssociateAlarmPolicy"
ACTION_DISSOCIATE_ALARM_POLICY = "DissociateAlarmPolicy"
ACTION_APPLY_ALARM_POLICY = "ApplyAlarmPolicy"
ACTION_DESCRIBE_ALARMS = "DescribeAlarms"
ACTION_DESCRIBE_ALARM_HISTORY = "DescribeAlarmHistory"
ACTION_GET_BALANCE = "GetBalance"
ACTION_GET_LEASE_INFO = "GetLeaseInfo"
ACTION_DESCRIBE_SHARED_RESOURCE_GROUPS = "DescribeSharedResourceGroups"
ACTION_DESCRIBE_RESOURCE_GROUPS = "DescribeResourceGroups"
ACTION_CREATE_RESOURCE_GROUPS = "CreateResourceGroups"
ACTION_MODIFY_RESOURCE_GROUP_ATTRIBUTES = "ModifyResourceGroupAttributes"
ACTION_DELETE_RESOURCE_GROUPS = "DeleteResourceGroups"
ACTION_DESCRIBE_RESOURCE_GROUP_ITEMS = "DescribeResourceGroupItems"
ACTION_ADD_RESOURCE_GROUP_ITEMS = "AddResourceGroupItems"
ACTION_DELETE_RESOURCE_GROUP_ITEMS = "DeleteResourceGroupItems"
ACTION_DESCRIBE_USER_GROUPS = "DescribeUserGroups"
ACTION_CREATE_USER_GROUPS = "CreateUserGroups"
ACTION_MODIFY_USER_GROUP_ATTRIBUTES = "ModifyUserGroupAttributes"
ACTION_DELETE_USER_GROUPS = "DeleteUserGroups"
ACTION_DESCRIBE_USER_GROUP_MEMBERS = "DescribeUserGroupMembers"
ACTION_ADD_USER_GROUP_MEMBERS = "AddUserGroupMembers"
ACTION_MODIFY_USER_GROUP_MEMBER_ATTRIBUTES = "ModifyUserGroupMemberAttributes"
ACTION_DELETE_USER_GROUP_MEMBERS = "DeleteUserGroupMembers"
ACTION_DESCRIBE_GROUP_ROLES = "DescribeGroupRoles"
ACTION_CREATE_GROUP_ROLES = "CreateGroupRoles"
ACTION_MODIFY_GROUP_ROLE_ATTRIBUTES = "ModifyGroupRoleAttributes"
ACTION_DELETE_GROUP_ROLES = "DeleteGroupRoles"
ACTION_DESCRIBE_GROUP_ROLE_RULES = "DescribeGroupRoleRules"
ACTION_ADD_GROUP_ROLE_RULES = "AddGroupRoleRules"
ACTION_MODIFY_GROUP_ROLE_RULE_ATTRIBUTES = "ModifyGroupRoleRuleAttributes"
ACTION_DELETE_GROUP_ROLE_RULES = "DeleteGroupRoleRules"
ACTION_GRANT_RESOURCE_GROUPS_TO_USER_GROUPS = "GrantResourceGroupsToUserGroups"
ACTION_REVOKE_RESOURCE_GROUPS_FROM_USER_GROUPS = "RevokeResourceGroupsFromUserGroups"
ACTION_DESCRIBE_RESOURCE_USER_GROUPS = "DescribeResourceUserGroups"
ACTION_DESCRIBE_WAN_ACCESS = "DescribeWanAccesss"
ACTION_CHANGE_WAN_ACCESS_BANDWIDTH = "ChangeWanAccessBandwidth"
ACTION_UPGRADE_WAN_ACCESS = "UpgradeWanAccess"
ACTION_GET_WAN_MONITOR = "GetWanMonitor"
ACTION_GET_WAN_INFO = "GetWanInfo"
ACTION_MIGRATE_RESOURCES = "MigrateResources"
ACTION_CREATE_VPC_BORDERS = "CreateVpcBorders"
ACTION_DELETE_VPC_BORDERS = "DeleteVpcBorders"
ACTION_DESCRIBE_VPC_BORDERS = "DescribeVpcBorders"
ACTION_JOIN_BORDER = "JoinBorder"
ACTION_LEAVE_BORDER = "LeaveBorder"
ACTION_CONFIG_BORDER = "ConfigBorder"
ACTION_MODIFY_BORDER_ATTRIBUTES = "ModifyBorderAttributes"
ACTION_DESCRIBE_BORDER_VXNETS = "DescribeBorderVxnets"
ACTION_ASSOCIATE_BORDER = "AssociateBorder"
ACTION_DISSOCIATE_BORDER = "DissociateBorder"
ACTION_DESCRIBE_BORDER_STATICS = "DescribeBorderStatics"
ACTION_ADD_BORDER_STATICS = "AddBorderStatics"
ACTION_DELETE_BORDER_STATICS = "DeleteBorderStatics"
ACTION_MODIFY_BORDER_STATIC_ATTRIBUTES = "ModifyBorderStaticAttributes"
ACTION_CANCEL_BORDER_STATIC_CHANGES = "CancelBorderStaticChanges"
DIRECTION_EGRESS = 1
DIRECTION_INGRESS = 0
VXNET_TYPE_MANAGED = 1
VXNET_TYPE_UNMANAGED = 0
BALANCE_ROUNDROBIN = "roundrobin"
BALANCE_LEASTCONN = "leastconn"
HEADER_X_FORWARD_FOR = 1
HEADER_QC_LBID = 2
HEADER_QC_LBIP = 4
LB_TYPE_MAXCONN_5k = 0
LB_TYPE_MAXCONN_20k = 1
LB_TYPE_MAXCONN_40k = 2
LB_TYPE_MAXCONN_100k = 3
LB_TYPE_MAXCONN_200k = 4
LB_TYPE_MAXCONN_500k = 5
EIP_BILLING_MODE_BANDWIDTH = "bandwidth"
EIP_BILLING_MODE_TRAFFIC = "traffic"
ACTION_START_CLUSTERS = "StartClusters"
ACTION_STOP_CLUSTERS = "StopClusters"
ACTION_RESIZE_CLUSTER = "ResizeCluster"
ACTION_DESCRIBE_CLUSTERS = "DescribeClusters"
ACTION_ADD_CLUSTER_NODES = "AddClusterNodes"
ACTION_DELETE_CLUSTER_NODES = "DeleteClusterNodes"
ACTION_DELETE_CLUSTERS = "DeleteClusters"
ACTION_DEPLOY_APP_VERSION = "DeployAppVersion"
| true | true |
f73a86a2a0fca14124a88912fef6a4e530de6041 | 1,414 | py | Python | 0000 hihoOnce/172 Matrix Sum/main.py | SLAPaper/hihoCoder | 3f64d678c5dd46db36345736eb56880fb2d2c5fe | [
"MIT"
] | null | null | null | 0000 hihoOnce/172 Matrix Sum/main.py | SLAPaper/hihoCoder | 3f64d678c5dd46db36345736eb56880fb2d2c5fe | [
"MIT"
] | null | null | null | 0000 hihoOnce/172 Matrix Sum/main.py | SLAPaper/hihoCoder | 3f64d678c5dd46db36345736eb56880fb2d2c5fe | [
"MIT"
] | null | null | null | from __future__ import print_function
from ctypes import c_int
BASE = int(1e9 + 7)
class TreeMatrix:
def __init__(self, N):
self.mat = (c_int * ((N + 1) * (N + 1)))()
self.N = N
@staticmethod
def lowbit(x):
return x & (-x)
def add(self, x, y, val):
x, y = x + 1, y + 1
i = x
while i <= self.N:
j = y
while j <= self.N:
self.mat[i * (self.N + 1) + j] += val
j += self.lowbit(j)
i += self.lowbit(i)
def _sum(self, x, y):
x, y = x + 1, y + 1
ret = 0
i = x
while i > 0:
j = y
while j > 0:
ret += self.mat[i * (self.N + 1) + j]
j -= self.lowbit(j)
i -= self.lowbit(i)
return ret
def sum(self, x1, y1, x2, y2):
return (self._sum(x2, y2) - self._sum(x1 - 1, y2) -
self._sum(x2, y1 - 1) + self._sum(x1 - 1, y1 - 1)) % BASE
if __name__ == '__main__':
N, M = (int(x) for x in raw_input().split())
tmat = TreeMatrix(N)
for _ in range(M):
op, param = raw_input().split(None, 1)
if op == 'Add':
x, y, val = (int(x) for x in param.split())
tmat.add(x, y, val)
elif op == 'Sum':
x1, y1, x2, y2 = (int(x) for x in param.split())
print(tmat.sum(x1, y1, x2, y2))
| 24.37931 | 73 | 0.429986 | from __future__ import print_function
from ctypes import c_int
BASE = int(1e9 + 7)
class TreeMatrix:
def __init__(self, N):
self.mat = (c_int * ((N + 1) * (N + 1)))()
self.N = N
@staticmethod
def lowbit(x):
return x & (-x)
def add(self, x, y, val):
x, y = x + 1, y + 1
i = x
while i <= self.N:
j = y
while j <= self.N:
self.mat[i * (self.N + 1) + j] += val
j += self.lowbit(j)
i += self.lowbit(i)
def _sum(self, x, y):
x, y = x + 1, y + 1
ret = 0
i = x
while i > 0:
j = y
while j > 0:
ret += self.mat[i * (self.N + 1) + j]
j -= self.lowbit(j)
i -= self.lowbit(i)
return ret
def sum(self, x1, y1, x2, y2):
return (self._sum(x2, y2) - self._sum(x1 - 1, y2) -
self._sum(x2, y1 - 1) + self._sum(x1 - 1, y1 - 1)) % BASE
if __name__ == '__main__':
N, M = (int(x) for x in raw_input().split())
tmat = TreeMatrix(N)
for _ in range(M):
op, param = raw_input().split(None, 1)
if op == 'Add':
x, y, val = (int(x) for x in param.split())
tmat.add(x, y, val)
elif op == 'Sum':
x1, y1, x2, y2 = (int(x) for x in param.split())
print(tmat.sum(x1, y1, x2, y2))
| true | true |
f73a874ef26473f1fa68985645a9b4496e174d0c | 12,017 | py | Python | ansys/dpf/core/examples/downloads.py | pyansys/pydpf-core | 27f1042a316e4f6a523bcd893c2365825464d731 | [
"MIT"
] | 18 | 2021-10-16T10:38:29.000Z | 2022-03-29T11:26:42.000Z | ansys/dpf/core/examples/downloads.py | lynch1972/pydpf-core | 3d560f479c2904866851dc9f2b0f9c490c97d365 | [
"MIT"
] | 79 | 2021-10-11T23:18:54.000Z | 2022-03-29T14:53:14.000Z | ansys/dpf/core/examples/downloads.py | lynch1972/pydpf-core | 3d560f479c2904866851dc9f2b0f9c490c97d365 | [
"MIT"
] | 5 | 2021-11-29T18:35:37.000Z | 2022-03-16T16:49:21.000Z | """Download example datasets from https://github.com/pyansys/example-data"""
import shutil
import os
import urllib.request
EXAMPLE_REPO = "https://github.com/pyansys/example-data/raw/master/result_files/"
def delete_downloads():
"""Delete all downloaded examples to free space or update the files"""
from ansys.dpf.core import LOCAL_DOWNLOADED_EXAMPLES_PATH
shutil.rmtree(LOCAL_DOWNLOADED_EXAMPLES_PATH)
os.makedirs(LOCAL_DOWNLOADED_EXAMPLES_PATH)
def _get_file_url(directory, filename):
return EXAMPLE_REPO + "/".join([directory, filename])
def _retrieve_file(url, filename, directory):
"""Download a file from a url"""
from ansys.dpf.core import LOCAL_DOWNLOADED_EXAMPLES_PATH, path_utilities
# First check if file has already been downloaded
local_path = os.path.join(LOCAL_DOWNLOADED_EXAMPLES_PATH, directory,
os.path.basename(filename))
local_path_no_zip = local_path.replace(".zip", "")
if os.path.isfile(local_path_no_zip) or os.path.isdir(local_path_no_zip):
return path_utilities.to_server_os(local_path_no_zip.replace(
LOCAL_DOWNLOADED_EXAMPLES_PATH,
path_utilities.downloaded_example_path()))
# grab the correct url retriever
urlretrieve = urllib.request.urlretrieve
dirpath = os.path.dirname(local_path)
if not os.path.isdir(dirpath):
os.mkdir(dirpath)
# Perform download
_, resp = urlretrieve(url, local_path)
return path_utilities.to_server_os(local_path.replace(
LOCAL_DOWNLOADED_EXAMPLES_PATH,
path_utilities.downloaded_example_path()))
def _download_file(directory, filename):
url = _get_file_url(directory, filename)
local_path = _retrieve_file(url, filename, directory)
return local_path
###############################################################################
# front-facing functions
def download_transient_result() -> str:
"""Download an example transient result file and return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.transient_result
>>> path
'C:/Users/user/AppData/local/temp/transient.rst'
"""
return _download_file("transient", "transient.rst")
def download_all_kinds_of_complexity() -> str:
"""Download an example static result and return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_all_kinds_of_complexity
>>> path
'C:/Users/user/AppData/local/temp/allKindOfComplexity.rst'
"""
return _download_file("testing", "allKindOfComplexity.rst")
def download_all_kinds_of_complexity_modal() -> str:
"""Download an example result file from a static modal analysis and
return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_all_kinds_of_complexity_modal()
>>> path
'C:/Users/user/AppData/local/temp/modal_allKindOfComplexity.rst'
"""
return _download_file("testing", "modal_allKindOfComplexity.rst")
def download_pontoon() -> str:
"""Download an example result file from a static modal analsys and
return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_pontoon()
>>> path
'C:/Users/user/AppData/local/temp/pontoon.rst'
"""
return _download_file("docs", "pontoon.rst")
def download_multi_harmonic_result() -> str:
"""Download an example multi-harmonic result file and return the
download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_multi_harmonic_result()
>>> path
'C:/Users/user/AppData/local/temp/file_harmonic_5rpms.rst'
"""
return _download_file("harmonic", "file_harmonic_5rpms.rst")
def download_multi_stage_cyclic_result() -> str:
"""Download an example multi stage result file and return the
download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_multi_stage_cyclic_result()
>>> path
'C:/Users/user/AppData/local/temp/multistage.rst'
"""
return _download_file("multistage", "multistage.rst")
def download_sub_file() -> str:
"""Download an example .sub result file containing matrices and return the
download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_sub_file()
>>> path
'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\sub\\cp56.sub'
"""
return _download_file("sub", "cp56.sub")
def download_msup_files_to_dict() -> dict:
"""Download all the files necessary for a msup expansion and return the
download paths into a dictionary extension->path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict[str:str]
Path to the example files.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> paths = examples.download_msup_files_to_dict()
>>> paths
{'rfrq': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\msup\\file.rfrq',
'mode': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\msup\\file.mode',
'rst': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\msup\\file.rst'} # noqa: E501
"""
return {
"rfrq": _download_file("msup", "file.rfrq"),
"mode": _download_file("msup", "file.mode"),
"rst": _download_file("msup", "file.rst"),
}
def download_distributed_files() -> dict:
"""Download distributed rst files and return the
download paths into a dictionary domain id->path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict[int:str]
Path to the example files.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> paths = examples.download_distributed_files()
>>> paths
{0: 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\distributed\\file0.rst',
1: 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\distributed\\file1.rst'} # noqa: E501
"""
return {
0: _download_file("distributed", "file0.rst"),
1: _download_file("distributed", "file1.rst"),
}
def download_fluent_files() -> dict:
"""Download the cas and dat file of a fluent analysis and return the
download paths into a dictionary extension->path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict[str:str]
Path to the example files.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> paths = examples.download_fluent_files()
>>> paths
{'cas': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\fluent\\FFF.cas.h5',
'dat': 'C:\\Users\\user\\AppData\\Local\\ansys-dpf-core\\ansys-dpf-core\\examples\\fluent\\FFF.dat.h5'} # noqa: E501
"""
return {
"cas": _download_file("fluent", "FFF.cas.h5"),
"dat": _download_file("fluent", "FFF.dat.h5"),
}
def download_extrapolation_3d_result() -> dict:
"""Download example static results of reference and integrated points
for extrapolation of 3d-element and return return the dictionary of 2 download paths.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict
containing path to the example file of ref and path to the example
file of integrated points.
Examples
--------
Download 2 example result files and return the dictionary containing 2 files
>>> from ansys.dpf.core import examples
>>> dict = examples.download_extrapolation_ref_result
>>> dict
{
'file_ref': 'C:/Users/user/AppData/local/temp/file_ref.rst',
'file_integrated': 'C:/Users/user/AppData/local/temp/file.rst'
}
"""
dict = {
"file_ref": _download_file("extrapolate", "file_ref.rst"),
"file_integrated": _download_file("extrapolate", "file.rst"),
}
return dict
def download_extrapolation_2d_result() -> dict:
"""Download example static results of reference and integrated points
for extrapolation of 2d-element and return the dictionary of 2 download paths.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
dict
Contains path to the example file of ref and path to the example
file of integrated points.
Examples
--------
Download 2 example result files and return the dictionary containing 2 files
>>> from ansys.dpf.core import examples
>>> dict = examples.download_extrapolation_ref_result
>>> dict
{
'file_ref': 'C:/Users/user/AppData/local/temp/extrapolate_2d_ref.rst',
'file_integrated': 'C:/Users/user/AppData/local/temp/extrapolate_2d.rst'
}
"""
dict = {
"file_ref": _download_file("extrapolate", "extrapolate_2d_ref.rst"),
"file_integrated": _download_file("extrapolate", "extrapolate_2d.rst"),
}
return dict
def download_hemisphere() -> str:
"""Download an example result file from a static analysis and
return the download path.
Examples files are downloaded to a persistent cache to avoid
re-downloading the same file twice.
Returns
-------
str
Path to the example file.
Examples
--------
Download an example result file and return the path of the file
>>> from ansys.dpf.core import examples
>>> path = examples.download_hemisphere()
>>> path
'C:/Users/user/AppData/local/temp/hemisphere.rst'
"""
return _download_file("hemisphere", "hemisphere.rst")
| 29.453431 | 121 | 0.669884 | import shutil
import os
import urllib.request
EXAMPLE_REPO = "https://github.com/pyansys/example-data/raw/master/result_files/"
def delete_downloads():
from ansys.dpf.core import LOCAL_DOWNLOADED_EXAMPLES_PATH
shutil.rmtree(LOCAL_DOWNLOADED_EXAMPLES_PATH)
os.makedirs(LOCAL_DOWNLOADED_EXAMPLES_PATH)
def _get_file_url(directory, filename):
return EXAMPLE_REPO + "/".join([directory, filename])
def _retrieve_file(url, filename, directory):
from ansys.dpf.core import LOCAL_DOWNLOADED_EXAMPLES_PATH, path_utilities
local_path = os.path.join(LOCAL_DOWNLOADED_EXAMPLES_PATH, directory,
os.path.basename(filename))
local_path_no_zip = local_path.replace(".zip", "")
if os.path.isfile(local_path_no_zip) or os.path.isdir(local_path_no_zip):
return path_utilities.to_server_os(local_path_no_zip.replace(
LOCAL_DOWNLOADED_EXAMPLES_PATH,
path_utilities.downloaded_example_path()))
urlretrieve = urllib.request.urlretrieve
dirpath = os.path.dirname(local_path)
if not os.path.isdir(dirpath):
os.mkdir(dirpath)
_, resp = urlretrieve(url, local_path)
return path_utilities.to_server_os(local_path.replace(
LOCAL_DOWNLOADED_EXAMPLES_PATH,
path_utilities.downloaded_example_path()))
def _download_file(directory, filename):
url = _get_file_url(directory, filename)
local_path = _retrieve_file(url, filename, directory)
return local_path
| true | true |
f73a879390416a38e44e29ae743c5ffca668ef1c | 3,865 | py | Python | sasegan/datasets/test_dataset.py | usimarit/selfattention-segan | 563a86e825f1e4067ec1fd3bed36e89e11434388 | [
"Apache-2.0"
] | 6 | 2020-12-07T14:58:36.000Z | 2022-01-07T19:58:28.000Z | sasegan/datasets/test_dataset.py | usimarit/TiramisuSE | 563a86e825f1e4067ec1fd3bed36e89e11434388 | [
"Apache-2.0"
] | 2 | 2020-12-04T09:09:13.000Z | 2021-09-26T23:46:43.000Z | sasegan/datasets/test_dataset.py | usimarit/TiramisuSE | 563a86e825f1e4067ec1fd3bed36e89e11434388 | [
"Apache-2.0"
] | 5 | 2021-01-10T14:02:13.000Z | 2021-11-15T08:36:37.000Z | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow_asr.featurizers.speech_featurizers import read_raw_audio
from .train_dataset import SeganAugTrainDataset, SeganTrainDataset
from ..featurizers.speech_featurizer import SpeechFeaturizer
class SeganAugTestDataset(SeganAugTrainDataset):
def __init__(self,
speech_featurizer: SpeechFeaturizer,
clean_dir: str,
noises_config: dict):
super(SeganAugTestDataset, self).__init__(
stage="test", speech_featurizer=speech_featurizer, clean_dir=clean_dir, noises_config=noises_config)
def parse(self, clean_wav):
noisy_wav = self.noises.augment(clean_wav)
noisy_slices = self.speech_featurizer.extract(noisy_wav)
clean_slices = self.speech_featurizer.extract(clean_wav)
return clean_slices, noisy_slices
def create(self):
def _gen_data():
for clean_wav_path in self.data_paths:
clean_wav = read_raw_audio(clean_wav_path, sample_rate=self.speech_featurizer.sample_rate)
clean_slices, noisy_slices = self.parse(clean_wav)
yield clean_wav_path, clean_slices, noisy_slices
dataset = tf.data.Dataset.from_generator(
_gen_data,
output_types=(tf.string, tf.float32),
output_shapes=(
tf.TensorShape([]),
tf.TensorShape([None, *self.speech_featurizer.shape]),
tf.TensorShape([None, *self.speech_featurizer.shape])
)
)
# Prefetch to improve speed of input length
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
class SeganTestDataset(SeganTrainDataset):
def __init__(self,
speech_featurizer: SpeechFeaturizer,
clean_dir: str,
noisy_dir: str):
super(SeganTestDataset, self).__init__(
stage="test", speech_featurizer=speech_featurizer, clean_dir=clean_dir, noisy_dir=noisy_dir)
def parse(self, clean_wav, noisy_wav):
clean_slices = self.speech_featurizer.extract(clean_wav)
noisy_slices = self.speech_featurizer.extract(noisy_wav)
return clean_slices, noisy_slices
def create(self):
def _gen_data():
for clean_wav_path in self.data_paths:
clean_wav = read_raw_audio(clean_wav_path, sample_rate=self.speech_featurizer.sample_rate)
noisy_wav_path = clean_wav_path.replace(self.clean_dir, self.noisy_dir)
noisy_wav = read_raw_audio(noisy_wav_path, sample_rate=self.speech_featurizer.sample_rate)
clean_slices, noisy_slices = self.parse(clean_wav, noisy_wav)
yield clean_wav_path, clean_slices, noisy_slices
dataset = tf.data.Dataset.from_generator(
_gen_data,
output_types=(tf.string, tf.float32),
output_shapes=(
tf.TensorShape([]),
tf.TensorShape([None, *self.speech_featurizer.shape]),
tf.TensorShape([None, *self.speech_featurizer.shape])
)
)
# Prefetch to improve speed of input length
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
| 42.01087 | 112 | 0.676067 |
import tensorflow as tf
from tensorflow_asr.featurizers.speech_featurizers import read_raw_audio
from .train_dataset import SeganAugTrainDataset, SeganTrainDataset
from ..featurizers.speech_featurizer import SpeechFeaturizer
class SeganAugTestDataset(SeganAugTrainDataset):
def __init__(self,
speech_featurizer: SpeechFeaturizer,
clean_dir: str,
noises_config: dict):
super(SeganAugTestDataset, self).__init__(
stage="test", speech_featurizer=speech_featurizer, clean_dir=clean_dir, noises_config=noises_config)
def parse(self, clean_wav):
noisy_wav = self.noises.augment(clean_wav)
noisy_slices = self.speech_featurizer.extract(noisy_wav)
clean_slices = self.speech_featurizer.extract(clean_wav)
return clean_slices, noisy_slices
def create(self):
def _gen_data():
for clean_wav_path in self.data_paths:
clean_wav = read_raw_audio(clean_wav_path, sample_rate=self.speech_featurizer.sample_rate)
clean_slices, noisy_slices = self.parse(clean_wav)
yield clean_wav_path, clean_slices, noisy_slices
dataset = tf.data.Dataset.from_generator(
_gen_data,
output_types=(tf.string, tf.float32),
output_shapes=(
tf.TensorShape([]),
tf.TensorShape([None, *self.speech_featurizer.shape]),
tf.TensorShape([None, *self.speech_featurizer.shape])
)
)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
class SeganTestDataset(SeganTrainDataset):
def __init__(self,
speech_featurizer: SpeechFeaturizer,
clean_dir: str,
noisy_dir: str):
super(SeganTestDataset, self).__init__(
stage="test", speech_featurizer=speech_featurizer, clean_dir=clean_dir, noisy_dir=noisy_dir)
def parse(self, clean_wav, noisy_wav):
clean_slices = self.speech_featurizer.extract(clean_wav)
noisy_slices = self.speech_featurizer.extract(noisy_wav)
return clean_slices, noisy_slices
def create(self):
def _gen_data():
for clean_wav_path in self.data_paths:
clean_wav = read_raw_audio(clean_wav_path, sample_rate=self.speech_featurizer.sample_rate)
noisy_wav_path = clean_wav_path.replace(self.clean_dir, self.noisy_dir)
noisy_wav = read_raw_audio(noisy_wav_path, sample_rate=self.speech_featurizer.sample_rate)
clean_slices, noisy_slices = self.parse(clean_wav, noisy_wav)
yield clean_wav_path, clean_slices, noisy_slices
dataset = tf.data.Dataset.from_generator(
_gen_data,
output_types=(tf.string, tf.float32),
output_shapes=(
tf.TensorShape([]),
tf.TensorShape([None, *self.speech_featurizer.shape]),
tf.TensorShape([None, *self.speech_featurizer.shape])
)
)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
| true | true |
f73a8813e7dd3069c8586a565e9a2e225044ebca | 430 | py | Python | {{cookiecutter.project_slug}}/apps/cms/management/commands/initcontent.py | ukwahlula/django-server-boilerplate | 6bd4b83511ea7e3370349957cf0b6dbff4003ab1 | [
"BSD-3-Clause"
] | 2 | 2020-10-30T09:47:07.000Z | 2020-10-30T09:48:11.000Z | {{cookiecutter.project_slug}}/apps/cms/management/commands/initcontent.py | ukwahlula/django-server-boilerplate | 6bd4b83511ea7e3370349957cf0b6dbff4003ab1 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/apps/cms/management/commands/initcontent.py | ukwahlula/django-server-boilerplate | 6bd4b83511ea7e3370349957cf0b6dbff4003ab1 | [
"BSD-3-Clause"
] | null | null | null | from django.core.management.base import BaseCommand
from apps.cms.models import Content
from apps.cms.presets import CONTENT_PRESETS
class Command(BaseCommand):
help = "Initialize cms data from hardcoded presets"
def handle(self, *args, **kwargs):
for content_type, value in CONTENT_PRESETS.items():
Content.objects.get_or_create(content_type=content_type, defaults=dict(content=value["content"]))
| 33.076923 | 109 | 0.755814 | from django.core.management.base import BaseCommand
from apps.cms.models import Content
from apps.cms.presets import CONTENT_PRESETS
class Command(BaseCommand):
help = "Initialize cms data from hardcoded presets"
def handle(self, *args, **kwargs):
for content_type, value in CONTENT_PRESETS.items():
Content.objects.get_or_create(content_type=content_type, defaults=dict(content=value["content"]))
| true | true |
f73a888a66e0c135699426bce9cda49f9d94b9ac | 1,940 | py | Python | usr/share/pyshared/ajenti/plugins/smartctl/widget.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | usr/share/pyshared/ajenti/plugins/smartctl/widget.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | null | null | null | usr/share/pyshared/ajenti/plugins/smartctl/widget.py | lupyuen/RaspberryPiImage | 664e8a74b4628d710feab5582ef59b344b9ffddd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | import subprocess
import re
import os
from ajenti.api import plugin
from ajenti.api.sensors import Sensor
from ajenti.plugins.dashboard.api import ConfigurableWidget
@plugin
class SMARTSensor (Sensor):
id = 'smart'
timeout = 5
def get_variants(self):
r = []
for s in os.listdir('/dev'):
if re.match('sd.$|hd.$|scd.$|fd.$|ad.+$', s):
r.append(s)
return sorted(r)
def measure(self, path):
"""
-1 = No SMART
0 = DISK FAILING
1 = PRE-FAIL
2 = Unknown error
3 = Errors in log
4 = DISK OK
"""
if not path:
return -1
r = subprocess.call(['smartctl', '-H', '/dev/' + path])
if r & 2:
return -1
if r & 8:
return 0
if r & 16:
return 1
if r & 64:
return 3
if r == 0:
return 4
return 2
@plugin
class SMARTWidget (ConfigurableWidget):
name = 'S.M.A.R.T.'
icon = 'hdd'
def on_prepare(self):
self.sensor = Sensor.find('smart')
self.append(self.ui.inflate('smartctl:widget'))
def on_start(self):
self.find('device').text = self.config['device']
v = self.sensor.value(self.config['device'])
v = {
-1: _('No data'),
0: _('FAILING'),
1: _('PRE-FAIL'),
2: _('Unknown error'),
3: _('Errors in log'),
4: 'OK'
}[v]
self.find('value').text = v
def create_config(self):
return {'device': ''}
def on_config_start(self):
device_list = self.dialog.find('device')
lst = self.sensor.get_variants()
device_list.labels = lst
device_list.values = lst
device_list.value = self.config['device']
def on_config_save(self):
self.config['device'] = self.dialog.find('device').value
| 23.950617 | 64 | 0.510309 | import subprocess
import re
import os
from ajenti.api import plugin
from ajenti.api.sensors import Sensor
from ajenti.plugins.dashboard.api import ConfigurableWidget
@plugin
class SMARTSensor (Sensor):
id = 'smart'
timeout = 5
def get_variants(self):
r = []
for s in os.listdir('/dev'):
if re.match('sd.$|hd.$|scd.$|fd.$|ad.+$', s):
r.append(s)
return sorted(r)
def measure(self, path):
if not path:
return -1
r = subprocess.call(['smartctl', '-H', '/dev/' + path])
if r & 2:
return -1
if r & 8:
return 0
if r & 16:
return 1
if r & 64:
return 3
if r == 0:
return 4
return 2
@plugin
class SMARTWidget (ConfigurableWidget):
name = 'S.M.A.R.T.'
icon = 'hdd'
def on_prepare(self):
self.sensor = Sensor.find('smart')
self.append(self.ui.inflate('smartctl:widget'))
def on_start(self):
self.find('device').text = self.config['device']
v = self.sensor.value(self.config['device'])
v = {
-1: _('No data'),
0: _('FAILING'),
1: _('PRE-FAIL'),
2: _('Unknown error'),
3: _('Errors in log'),
4: 'OK'
}[v]
self.find('value').text = v
def create_config(self):
return {'device': ''}
def on_config_start(self):
device_list = self.dialog.find('device')
lst = self.sensor.get_variants()
device_list.labels = lst
device_list.values = lst
device_list.value = self.config['device']
def on_config_save(self):
self.config['device'] = self.dialog.find('device').value
| true | true |
f73a8893e53a67203d15a50f22759c8c791fbcb2 | 1,647 | py | Python | tests/system/action/poll/test_delete.py | OpenSlides/openslides-backend | 57f58a4ca0e5ca113ff104efa9db3e2c66e3aeab | [
"MIT"
] | 5 | 2020-01-20T13:57:15.000Z | 2021-03-27T14:14:44.000Z | tests/system/action/poll/test_delete.py | OpenSlides/openslides-backend | 57f58a4ca0e5ca113ff104efa9db3e2c66e3aeab | [
"MIT"
] | 859 | 2020-01-11T22:58:37.000Z | 2022-03-30T14:54:06.000Z | tests/system/action/poll/test_delete.py | OpenSlides/openslides-backend | 57f58a4ca0e5ca113ff104efa9db3e2c66e3aeab | [
"MIT"
] | 16 | 2020-01-04T20:28:57.000Z | 2022-02-10T12:06:54.000Z | from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class PollDeleteTest(BaseActionTestCase):
def test_delete_correct(self) -> None:
self.set_models({"poll/111": {"meeting_id": 1}, "meeting/1": {}})
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 200)
self.assert_model_deleted("poll/111")
def test_delete_wrong_id(self) -> None:
self.set_models({"poll/112": {"meeting_id": 1}, "meeting/1": {}})
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 400)
self.assert_model_exists("poll/112")
def test_delete_correct_cascading(self) -> None:
self.set_models(
{
"poll/111": {
"option_ids": [42],
"meeting_id": 1,
},
"option/42": {"poll_id": 111, "meeting_id": 1},
"meeting/1": {},
}
)
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 200)
self.assert_model_deleted("poll/111")
self.assert_model_deleted("option/42")
def test_delete_no_permissions(self) -> None:
self.base_permission_test(
{"poll/111": {"meeting_id": 1}}, "poll.delete", {"id": 111}
)
def test_delete_permissions(self) -> None:
self.base_permission_test(
{"poll/111": {"meeting_id": 1}},
"poll.delete",
{"id": 111},
Permissions.Poll.CAN_MANAGE,
)
| 35.804348 | 73 | 0.573163 | from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class PollDeleteTest(BaseActionTestCase):
def test_delete_correct(self) -> None:
self.set_models({"poll/111": {"meeting_id": 1}, "meeting/1": {}})
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 200)
self.assert_model_deleted("poll/111")
def test_delete_wrong_id(self) -> None:
self.set_models({"poll/112": {"meeting_id": 1}, "meeting/1": {}})
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 400)
self.assert_model_exists("poll/112")
def test_delete_correct_cascading(self) -> None:
self.set_models(
{
"poll/111": {
"option_ids": [42],
"meeting_id": 1,
},
"option/42": {"poll_id": 111, "meeting_id": 1},
"meeting/1": {},
}
)
response = self.request("poll.delete", {"id": 111})
self.assert_status_code(response, 200)
self.assert_model_deleted("poll/111")
self.assert_model_deleted("option/42")
def test_delete_no_permissions(self) -> None:
self.base_permission_test(
{"poll/111": {"meeting_id": 1}}, "poll.delete", {"id": 111}
)
def test_delete_permissions(self) -> None:
self.base_permission_test(
{"poll/111": {"meeting_id": 1}},
"poll.delete",
{"id": 111},
Permissions.Poll.CAN_MANAGE,
)
| true | true |
f73a8894c85a2e09c6e39f338b1f124d0bad58e0 | 781 | py | Python | test/nn/conv/test_sg_conv.py | mwussow/pytorch_geometric | 01c68f9b58c94d9efd1f6e39b9c85177aae521bb | [
"MIT"
] | 9 | 2020-03-29T08:18:29.000Z | 2022-01-05T02:36:08.000Z | test/nn/conv/test_sg_conv.py | mwussow/pytorch_geometric | 01c68f9b58c94d9efd1f6e39b9c85177aae521bb | [
"MIT"
] | null | null | null | test/nn/conv/test_sg_conv.py | mwussow/pytorch_geometric | 01c68f9b58c94d9efd1f6e39b9c85177aae521bb | [
"MIT"
] | 3 | 2020-03-25T19:36:57.000Z | 2022-03-19T07:24:51.000Z | import torch
from torch_geometric.nn import SGConv
def test_sg_conv():
in_channels, out_channels = (16, 32)
edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
num_nodes = edge_index.max().item() + 1
x = torch.randn((num_nodes, in_channels))
conv = SGConv(in_channels, out_channels, K=10, cached=True)
assert conv.__repr__() == 'SGConv(16, 32, K=10)'
assert conv(x, edge_index).size() == (num_nodes, out_channels)
assert conv(x, edge_index).size() == (num_nodes, out_channels)
conv = SGConv(in_channels, out_channels, K=10, cached=False)
assert conv.__repr__() == 'SGConv(16, 32, K=10)'
assert conv(x, edge_index).size() == (num_nodes, out_channels)
assert conv(x, edge_index).size() == (num_nodes, out_channels)
| 39.05 | 71 | 0.660691 | import torch
from torch_geometric.nn import SGConv
def test_sg_conv():
in_channels, out_channels = (16, 32)
edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
num_nodes = edge_index.max().item() + 1
x = torch.randn((num_nodes, in_channels))
conv = SGConv(in_channels, out_channels, K=10, cached=True)
assert conv.__repr__() == 'SGConv(16, 32, K=10)'
assert conv(x, edge_index).size() == (num_nodes, out_channels)
assert conv(x, edge_index).size() == (num_nodes, out_channels)
conv = SGConv(in_channels, out_channels, K=10, cached=False)
assert conv.__repr__() == 'SGConv(16, 32, K=10)'
assert conv(x, edge_index).size() == (num_nodes, out_channels)
assert conv(x, edge_index).size() == (num_nodes, out_channels)
| true | true |
f73a89bb93e76edb90c80e3d22dfd2fd98da17c8 | 7,628 | py | Python | test_yolo_2.py | Hung-Jia-Jun/yolo_keras_camera_realtime | d74ea9a95ed625337765f4fea9e6f8881ee0a9cf | [
"MIT"
] | null | null | null | test_yolo_2.py | Hung-Jia-Jun/yolo_keras_camera_realtime | d74ea9a95ed625337765f4fea9e6f8881ee0a9cf | [
"MIT"
] | null | null | null | test_yolo_2.py | Hung-Jia-Jun/yolo_keras_camera_realtime | d74ea9a95ed625337765f4fea9e6f8881ee0a9cf | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""Run a YOLO_v2 style detection model on test images."""
import argparse
import colorsys
import imghdr
import os
import random
import numpy as np
from keras import backend as K
from keras.models import load_model
from PIL import Image, ImageDraw, ImageFont
from yad2k.models.keras_yolo import yolo_eval, yolo_head
# parser = argparse.ArgumentParser(
# description='Run a YOLO_v2 style detection model on test images..')
# parser.add_argument(
# 'model_path',
# help='path to h5 model file containing body of a YOLO_v2 model',
# default = "model_data\\yolo.h5"
# )
# parser.add_argument(
# '-a',
# '--anchors_path',
# help='path to anchors file, defaults to yolo_anchors.txt',
# default='model_data/yolo_anchors.txt')
# parser.add_argument(
# '-c',
# '--classes_path',
# help='path to classes file, defaults to coco_classes.txt',
# default='model_data/coco_classes.txt')
# parser.add_argument(
# '-t',
# '--test_path',
# help='path to directory of test images, defaults to images/',
# default='images')
# parser.add_argument(
# '-o',
# '--output_path',
# help='path to output test images, defaults to images/out',
# default='images\\out')
# parser.add_argument(
# '-s',
# '--score_threshold',
# type=float,
# help='threshold for bounding box scores, default .3',
# default=.3)
# parser.add_argument(
# '-iou',
# '--iou_threshold',
# type=float,
# help='threshold for non max suppression IOU, default .5',
# default=.5)
def _main():
score_threshold = .3
iou_threshold = .5
model_path = "model_data\\yolo.h5"
# model_path = os.path.expanduser(args.model_path)
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
anchors_path = "model_data/yolo_anchors.txt"
# anchors_path = os.path.expanduser(args.anchors_path)
# classes_path = os.path.expanduser(args.classes_path)
classes_path = 'model_data/coco_classes.txt'
# test_path = os.path.expanduser(args.test_path)
test_path = 'images'
# output_path = os.path.expanduser(args.output_path)
output_path = "images\out"
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=score_threshold,
iou_threshold=iou_threshold)
for image_file in os.listdir(test_path):
# try:
# image_type = imghdr.what(os.path.join(test_path, image_file))
# if not image_type:
# continue
# except IsADirectoryError:
# continue
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
# Due to skip connection + max pooling in YOLO_v2, inputs must have
# width and height as multiples of 32.
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
image.save(os.path.join(output_path, image_file), quality=90)
sess.close()
if __name__ == '__main__':
_main()
| 37.392157 | 79 | 0.618773 |
import argparse
import colorsys
import imghdr
import os
import random
import numpy as np
from keras import backend as K
from keras.models import load_model
from PIL import Image, ImageDraw, ImageFont
from yad2k.models.keras_yolo import yolo_eval, yolo_head
def _main():
score_threshold = .3
iou_threshold = .5
model_path = "model_data\\yolo.h5"
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
anchors_path = "model_data/yolo_anchors.txt"
classes_path = 'model_data/coco_classes.txt'
test_path = 'images'
output_path = "images\out"
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
sess = K.get_session()
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
num_classes = len(class_names)
num_anchors = len(anchors)
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101)
random.shuffle(colors)
random.seed(None)
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=score_threshold,
iou_threshold=iou_threshold)
for image_file in os.listdir(test_path):
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size:
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0)
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
image.save(os.path.join(output_path, image_file), quality=90)
sess.close()
if __name__ == '__main__':
_main()
| true | true |
f73a8abba459847c58bfe680ee22e895b1bcb0b4 | 921 | py | Python | libs/statsd/set.py | rbellary-vi/netuitive-statsd | bd2c1fd6a05476d432e8a584708d2bcb09350553 | [
"Apache-2.0"
] | null | null | null | libs/statsd/set.py | rbellary-vi/netuitive-statsd | bd2c1fd6a05476d432e8a584708d2bcb09350553 | [
"Apache-2.0"
] | 32 | 2016-05-09T13:05:13.000Z | 2021-02-09T12:44:47.000Z | libs/statsd/set.py | rbellary-vi/netuitive-statsd | bd2c1fd6a05476d432e8a584708d2bcb09350553 | [
"Apache-2.0"
] | 2 | 2018-10-19T03:34:17.000Z | 2021-02-08T11:24:04.000Z | import logging
from .util import get_timestamp
logger = logging.getLogger(__name__)
class Set(object):
def __init__(self, name, sparseDataStrategy='None', unit='', tags=[]):
self.name = name
self.sparseDataStrategy = sparseDataStrategy
self.unit = unit
self.tags = tags
self.values = []
self.timestamp = get_timestamp()
self.metricType = 'GAUGE'
self.orgtype = ['SET']
def add_value(self, value, ts, sign=None):
timestamp = get_timestamp()
if value not in self.values:
self.values.append(value)
self.timestamp = timestamp
def get_values(self, timestamp):
value = float(len(self.values))
ret = {
self.name: {
'timestamp': timestamp,
'value': value
}
}
return(ret)
def clear(self):
self.values = []
| 21.928571 | 74 | 0.559175 | import logging
from .util import get_timestamp
logger = logging.getLogger(__name__)
class Set(object):
def __init__(self, name, sparseDataStrategy='None', unit='', tags=[]):
self.name = name
self.sparseDataStrategy = sparseDataStrategy
self.unit = unit
self.tags = tags
self.values = []
self.timestamp = get_timestamp()
self.metricType = 'GAUGE'
self.orgtype = ['SET']
def add_value(self, value, ts, sign=None):
timestamp = get_timestamp()
if value not in self.values:
self.values.append(value)
self.timestamp = timestamp
def get_values(self, timestamp):
value = float(len(self.values))
ret = {
self.name: {
'timestamp': timestamp,
'value': value
}
}
return(ret)
def clear(self):
self.values = []
| true | true |
f73a8b085a753801ceb7deb600d9c5de355acfa6 | 5,961 | py | Python | gym_kuka_mujoco/controllers/impedance_controller_v2.py | leonmkim/gym-kuka-mujoco | ed45ae74d10e69f4e51439de2d1d0c0811623b6b | [
"MIT"
] | null | null | null | gym_kuka_mujoco/controllers/impedance_controller_v2.py | leonmkim/gym-kuka-mujoco | ed45ae74d10e69f4e51439de2d1d0c0811623b6b | [
"MIT"
] | null | null | null | gym_kuka_mujoco/controllers/impedance_controller_v2.py | leonmkim/gym-kuka-mujoco | ed45ae74d10e69f4e51439de2d1d0c0811623b6b | [
"MIT"
] | null | null | null | import os
import numpy as np
from gym import spaces
import mujoco_py
from gym_kuka_mujoco.envs.assets import kuka_asset_dir
from gym_kuka_mujoco.utils.quaternion import identity_quat, subQuat, quatAdd, mat2Quat
from gym_kuka_mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite
from .base_controller import BaseController
from . import register_controller
from gym_kuka_mujoco.utils.mujoco_utils import get_qpos_indices, get_qvel_indices, get_actuator_indices, get_joint_indices
class ImpedanceControllerV2(BaseController):
'''
An inverse dynamics controller that used PD gains to compute a desired acceleration.
'''
def __init__(self,
sim,
pos_scale=1.0,
rot_scale=0.3,
pos_limit=1.0,
rot_limit=1.0,
model_path='full_kuka_no_collision_no_gravity.xml',
site_name='ee_site',
stiffness=None,
damping='auto',
null_space_damping=1.0,
controlled_joints=None,
in_ee_frame=False):
super(ImpedanceControllerV2, self).__init__(sim)
# Create a model for control
print('Controller model imported from: {}'.format(model_path))
model_path = os.path.join(kuka_asset_dir(), model_path)
self.model = mujoco_py.load_model_from_path(model_path)
self.in_ee_frame = in_ee_frame
# Construct the action space.
high_pos = pos_limit*np.ones(3)
low_pos = -high_pos
high_rot = rot_limit*np.ones(3)
low_rot = -high_rot
high = np.concatenate((high_pos, high_rot))
low = np.concatenate((low_pos, low_rot))
self.action_space = spaces.Box(low, high, dtype=np.float32)
# Controller parameters.
self.scale = np.ones(6)
self.scale[:3] *= pos_scale
self.scale[3:6] *= rot_scale
self.site_name = site_name
self.pos_set = np.zeros(3)
self.quat_set = identity_quat.copy()
if stiffness is None:
self.stiffness = np.array([1.0, 1.0, 1.0, 0.3, 0.3, 0.3])
else:
self.stiffness = np.ones(6)*stiffness
if damping=='auto':
self.damping = 2*np.sqrt(self.stiffness)
else:
self.damping = np.ones(6)*damping
self.null_space_damping = null_space_damping
# Get the position, velocity, and actuator indices for the model.
if controlled_joints is not None:
self.sim_qpos_idx = get_qpos_indices(sim.model, controlled_joints)
self.sim_qvel_idx = get_qvel_indices(sim.model, controlled_joints)
self.sim_actuators_idx = get_actuator_indices(sim.model, controlled_joints)
self.sim_joint_idx = get_joint_indices(sim.model, controlled_joints)
self.self_qpos_idx = get_qpos_indices(self.model, controlled_joints)
self.self_qvel_idx = get_qvel_indices(self.model, controlled_joints)
self.self_actuators_idx = get_actuator_indices(self.model, controlled_joints)
else:
assert self.model.nv == self.model.nu, "if the number of degrees of freedom is different than the number of actuators you must specify the controlled_joints"
self.sim_qpos_idx = range(self.model.nq)
self.sim_qvel_idx = range(self.model.nv)
self.sim_actuators_idx = range(self.model.nu)
self.sim_joint_idx = range(self.model.nu)
self.self_qpos_idx = range(self.model.nq)
self.self_qvel_idx = range(self.model.nv)
self.self_actuators_idx = range(self.model.nu)
def set_action(self, action):
'''
Set the setpoint.
'''
action = action * self.scale
dx = action[0:3].astype(np.float64)
dr = action[3:6].astype(np.float64)
# print('dx is: {} and dr is: {}'.format(dx, dr) )
pos, mat = forwardKinSite(self.sim, self.site_name, recompute=False)
quat = mat2Quat(mat)
if self.in_ee_frame:
dx = mat.dot(dx)
self.pos_set = pos + dx
self.quat_set = quatAdd(quat, dr)
# print('pos setpoint updated: {}'.format(self.pos_set))
def get_torque(self):
'''
Update the impedance control setpoint and compute the torque.
'''
# Compute the pose difference.
pos, mat = forwardKinSite(self.sim, self.site_name, recompute=False)
quat = mat2Quat(mat)
dx = self.pos_set - pos
dr = subQuat(self.quat_set, quat) # Original
dframe = np.concatenate((dx,dr))
# Compute generalized forces from a virtual external force.
jpos, jrot = forwardKinJacobianSite(self.sim, self.site_name, recompute=False)
J = np.vstack((jpos[:,self.sim_qvel_idx], jrot[:,self.sim_qvel_idx]))
cartesian_acc_des = self.stiffness*dframe - self.damping*J.dot(self.sim.data.qvel[self.sim_qvel_idx])
impedance_acc_des = J.T.dot(np.linalg.solve(J.dot(J.T) + 1e-6*np.eye(6), cartesian_acc_des))
# Add damping in the null space of the the Jacobian
projection_matrix = J.T.dot(np.linalg.solve(J.dot(J.T), J))
projection_matrix = np.eye(projection_matrix.shape[0]) - projection_matrix
null_space_vel = projection_matrix.dot(self.sim.data.qvel[self.sim_qvel_idx])
impedance_acc_des += -self.null_space_damping*null_space_vel # null space damping
# Cancel other dynamics and add virtual damping using inverse dynamics.
acc_des = np.zeros(self.sim.model.nv)
acc_des[self.sim_qvel_idx] = impedance_acc_des
self.sim.data.qacc[:] = acc_des
mujoco_py.functions.mj_inverse(self.model, self.sim.data)
id_torque = self.sim.data.qfrc_inverse[self.sim_actuators_idx].copy()
return id_torque
register_controller(ImpedanceControllerV2, "ImpedanceControllerV2") | 39.476821 | 169 | 0.648717 | import os
import numpy as np
from gym import spaces
import mujoco_py
from gym_kuka_mujoco.envs.assets import kuka_asset_dir
from gym_kuka_mujoco.utils.quaternion import identity_quat, subQuat, quatAdd, mat2Quat
from gym_kuka_mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite
from .base_controller import BaseController
from . import register_controller
from gym_kuka_mujoco.utils.mujoco_utils import get_qpos_indices, get_qvel_indices, get_actuator_indices, get_joint_indices
class ImpedanceControllerV2(BaseController):
def __init__(self,
sim,
pos_scale=1.0,
rot_scale=0.3,
pos_limit=1.0,
rot_limit=1.0,
model_path='full_kuka_no_collision_no_gravity.xml',
site_name='ee_site',
stiffness=None,
damping='auto',
null_space_damping=1.0,
controlled_joints=None,
in_ee_frame=False):
super(ImpedanceControllerV2, self).__init__(sim)
print('Controller model imported from: {}'.format(model_path))
model_path = os.path.join(kuka_asset_dir(), model_path)
self.model = mujoco_py.load_model_from_path(model_path)
self.in_ee_frame = in_ee_frame
high_pos = pos_limit*np.ones(3)
low_pos = -high_pos
high_rot = rot_limit*np.ones(3)
low_rot = -high_rot
high = np.concatenate((high_pos, high_rot))
low = np.concatenate((low_pos, low_rot))
self.action_space = spaces.Box(low, high, dtype=np.float32)
self.scale = np.ones(6)
self.scale[:3] *= pos_scale
self.scale[3:6] *= rot_scale
self.site_name = site_name
self.pos_set = np.zeros(3)
self.quat_set = identity_quat.copy()
if stiffness is None:
self.stiffness = np.array([1.0, 1.0, 1.0, 0.3, 0.3, 0.3])
else:
self.stiffness = np.ones(6)*stiffness
if damping=='auto':
self.damping = 2*np.sqrt(self.stiffness)
else:
self.damping = np.ones(6)*damping
self.null_space_damping = null_space_damping
if controlled_joints is not None:
self.sim_qpos_idx = get_qpos_indices(sim.model, controlled_joints)
self.sim_qvel_idx = get_qvel_indices(sim.model, controlled_joints)
self.sim_actuators_idx = get_actuator_indices(sim.model, controlled_joints)
self.sim_joint_idx = get_joint_indices(sim.model, controlled_joints)
self.self_qpos_idx = get_qpos_indices(self.model, controlled_joints)
self.self_qvel_idx = get_qvel_indices(self.model, controlled_joints)
self.self_actuators_idx = get_actuator_indices(self.model, controlled_joints)
else:
assert self.model.nv == self.model.nu, "if the number of degrees of freedom is different than the number of actuators you must specify the controlled_joints"
self.sim_qpos_idx = range(self.model.nq)
self.sim_qvel_idx = range(self.model.nv)
self.sim_actuators_idx = range(self.model.nu)
self.sim_joint_idx = range(self.model.nu)
self.self_qpos_idx = range(self.model.nq)
self.self_qvel_idx = range(self.model.nv)
self.self_actuators_idx = range(self.model.nu)
def set_action(self, action):
action = action * self.scale
dx = action[0:3].astype(np.float64)
dr = action[3:6].astype(np.float64)
pos, mat = forwardKinSite(self.sim, self.site_name, recompute=False)
quat = mat2Quat(mat)
if self.in_ee_frame:
dx = mat.dot(dx)
self.pos_set = pos + dx
self.quat_set = quatAdd(quat, dr)
def get_torque(self):
pos, mat = forwardKinSite(self.sim, self.site_name, recompute=False)
quat = mat2Quat(mat)
dx = self.pos_set - pos
dr = subQuat(self.quat_set, quat)
dframe = np.concatenate((dx,dr))
jpos, jrot = forwardKinJacobianSite(self.sim, self.site_name, recompute=False)
J = np.vstack((jpos[:,self.sim_qvel_idx], jrot[:,self.sim_qvel_idx]))
cartesian_acc_des = self.stiffness*dframe - self.damping*J.dot(self.sim.data.qvel[self.sim_qvel_idx])
impedance_acc_des = J.T.dot(np.linalg.solve(J.dot(J.T) + 1e-6*np.eye(6), cartesian_acc_des))
projection_matrix = J.T.dot(np.linalg.solve(J.dot(J.T), J))
projection_matrix = np.eye(projection_matrix.shape[0]) - projection_matrix
null_space_vel = projection_matrix.dot(self.sim.data.qvel[self.sim_qvel_idx])
impedance_acc_des += -self.null_space_damping*null_space_vel
acc_des = np.zeros(self.sim.model.nv)
acc_des[self.sim_qvel_idx] = impedance_acc_des
self.sim.data.qacc[:] = acc_des
mujoco_py.functions.mj_inverse(self.model, self.sim.data)
id_torque = self.sim.data.qfrc_inverse[self.sim_actuators_idx].copy()
return id_torque
register_controller(ImpedanceControllerV2, "ImpedanceControllerV2") | true | true |
f73a8d563c65119522e32835547199d5dccce95c | 1,089 | py | Python | dagger/pipeline/ios/athena_io.py | jorgetagle/dagger | dafcfb9df904e512f050aefdacf6581c571bac23 | [
"MIT"
] | 5 | 2020-09-09T11:44:49.000Z | 2021-12-31T14:07:00.000Z | dagger/pipeline/ios/athena_io.py | jorgetagle/dagger | dafcfb9df904e512f050aefdacf6581c571bac23 | [
"MIT"
] | null | null | null | dagger/pipeline/ios/athena_io.py | jorgetagle/dagger | dafcfb9df904e512f050aefdacf6581c571bac23 | [
"MIT"
] | 3 | 2021-08-31T10:14:42.000Z | 2022-02-28T17:03:39.000Z | from dagger.pipeline.io import IO
from dagger.utilities.config_validator import Attribute
class AthenaIO(IO):
ref_name = "athena"
@classmethod
def init_attributes(cls, orig_cls):
cls.add_config_attributes(
[
Attribute(
attribute_name="schema", comment="Leave it empty for system tables"
),
Attribute(attribute_name="table"),
]
)
def __init__(self, io_config, config_location):
super().__init__(io_config, config_location)
self._schema = self.parse_attribute("schema")
self._table = self.parse_attribute("table")
def alias(self):
return "athena://{}/{}".format(self._schema, self._table)
@property
def rendered_name(self):
return "{}.{}".format(self._schema, self._table)
@property
def airflow_name(self):
return "athena-{}-{}".format(self._schema, self._table)
@property
def schema(self):
return self._schema
@property
def table(self):
return self._table
| 25.325581 | 87 | 0.606061 | from dagger.pipeline.io import IO
from dagger.utilities.config_validator import Attribute
class AthenaIO(IO):
ref_name = "athena"
@classmethod
def init_attributes(cls, orig_cls):
cls.add_config_attributes(
[
Attribute(
attribute_name="schema", comment="Leave it empty for system tables"
),
Attribute(attribute_name="table"),
]
)
def __init__(self, io_config, config_location):
super().__init__(io_config, config_location)
self._schema = self.parse_attribute("schema")
self._table = self.parse_attribute("table")
def alias(self):
return "athena://{}/{}".format(self._schema, self._table)
@property
def rendered_name(self):
return "{}.{}".format(self._schema, self._table)
@property
def airflow_name(self):
return "athena-{}-{}".format(self._schema, self._table)
@property
def schema(self):
return self._schema
@property
def table(self):
return self._table
| true | true |
f73a8df692d91ed8b8518678673f255749f13bc4 | 10,221 | py | Python | shop/models/address.py | triggerfast/django-shop | 57f97291dbb389d833fc3e99da54cb888d3b006e | [
"BSD-3-Clause"
] | 1 | 2020-04-20T05:24:24.000Z | 2020-04-20T05:24:24.000Z | shop/models/address.py | triggerfast/django-shop | 57f97291dbb389d833fc3e99da54cb888d3b006e | [
"BSD-3-Clause"
] | null | null | null | shop/models/address.py | triggerfast/django-shop | 57f97291dbb389d833fc3e99da54cb888d3b006e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Holds all the information relevant to the client (addresses for instance)
"""
from six import with_metaclass
from django.db import models
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _
from shop import deferred
from shop.conf import app_settings
class AddressManager(models.Manager):
def get_max_priority(self, customer):
aggr = self.get_queryset().filter(customer=customer).aggregate(models.Max('priority'))
priority = aggr['priority__max'] or 0
return priority
def get_fallback(self, customer):
"""
Return a fallback address, whenever the customer has not declared one.
"""
return self.get_queryset().filter(customer=customer).order_by('priority').last()
class BaseAddress(models.Model):
customer = deferred.ForeignKey(
'BaseCustomer',
on_delete=models.CASCADE,
)
priority = models.SmallIntegerField(
default=0,
db_index=True,
help_text=_("Priority for using this address"),
)
class Meta:
abstract = True
objects = AddressManager()
def as_text(self):
"""
Return the address as plain text to be used for printing, etc.
"""
template_names = [
'{}/{}-address.txt'.format(app_settings.APP_LABEL, self.address_type),
'{}/address.txt'.format(app_settings.APP_LABEL),
'shop/address.txt',
]
template = select_template(template_names)
return template.render({'address': self})
class BaseShippingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)):
address_type = 'shipping'
class Meta:
abstract = True
ShippingAddressModel = deferred.MaterializedModel(BaseShippingAddress)
class BaseBillingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)):
address_type = 'billing'
class Meta:
abstract = True
BillingAddressModel = deferred.MaterializedModel(BaseBillingAddress)
ISO_3166_CODES = [
('AF', _("Afghanistan")),
('AX', _("Aland Islands")),
('AL', _("Albania")),
('DZ', _("Algeria")),
('AS', _("American Samoa")),
('AD', _("Andorra")),
('AO', _("Angola")),
('AI', _("Anguilla")),
('AQ', _("Antarctica")),
('AG', _("Antigua And Barbuda")),
('AR', _("Argentina")),
('AM', _("Armenia")),
('AW', _("Aruba")),
('AU', _("Australia")),
('AT', _("Austria")),
('AZ', _("Azerbaijan")),
('BS', _("Bahamas")),
('BH', _("Bahrain")),
('BD', _("Bangladesh")),
('BB', _("Barbados")),
('BY', _("Belarus")),
('BE', _("Belgium")),
('BZ', _("Belize")),
('BJ', _("Benin")),
('BM', _("Bermuda")),
('BT', _("Bhutan")),
('BO', _("Bolivia, Plurinational State Of")),
('BQ', _("Bonaire, Saint Eustatius And Saba")),
('BA', _("Bosnia And Herzegovina")),
('BW', _("Botswana")),
('BV', _("Bouvet Island")),
('BR', _("Brazil")),
('IO', _("British Indian Ocean Territory")),
('BN', _("Brunei Darussalam")),
('BG', _("Bulgaria")),
('BF', _("Burkina Faso")),
('BI', _("Burundi")),
('KH', _("Cambodia")),
('CM', _("Cameroon")),
('CA', _("Canada")),
('CV', _("Cape Verde")),
('KY', _("Cayman Islands")),
('CF', _("Central African Republic")),
('TD', _("Chad")),
('CL', _("Chile")),
('CN', _("China")),
('CX', _("Christmas Island")),
('CC', _("Cocos (Keeling) Islands")),
('CO', _("Colombia")),
('KM', _("Comoros")),
('CG', _("Congo")),
('CD', _("Congo, The Democratic Republic Of The")),
('CK', _("Cook Islands")),
('CR', _("Costa Rica")),
('HR', _("Croatia")),
('CU', _("Cuba")),
('CW', _("Curacao")),
('CY', _("Cyprus")),
('CZ', _("Czech Republic")),
('DK', _("Denmark")),
('DJ', _("Djibouti")),
('DM', _("Dominica")),
('DO', _("Dominican Republic")),
('EC', _("Ecuador")),
('EG', _("Egypt")),
('SV', _("El Salvador")),
('GQ', _("Equatorial Guinea")),
('ER', _("Eritrea")),
('EE', _("Estonia")),
('ET', _("Ethiopia")),
('FK', _("Falkland Islands (Malvinas)")),
('FO', _("Faroe Islands")),
('FJ', _("Fiji")),
('FI', _("Finland")),
('FR', _("France")),
('GF', _("French Guiana")),
('PF', _("French Polynesia")),
('TF', _("French Southern Territories")),
('GA', _("Gabon")),
('GM', _("Gambia")),
('DE', _("Germany")),
('GH', _("Ghana")),
('GI', _("Gibraltar")),
('GR', _("Greece")),
('GL', _("Greenland")),
('GD', _("Grenada")),
('GP', _("Guadeloupe")),
('GU', _("Guam")),
('GT', _("Guatemala")),
('GG', _("Guernsey")),
('GN', _("Guinea")),
('GW', _("Guinea-Bissau")),
('GY', _("Guyana")),
('HT', _("Haiti")),
('HM', _("Heard Island and McDonald Islands")),
('VA', _("Holy See (Vatican City State)")),
('HN', _("Honduras")),
('HK', _("Hong Kong")),
('HU', _("Hungary")),
('IS', _("Iceland")),
('IN', _("India")),
('ID', _("Indonesia")),
('IR', _("Iran, Islamic Republic Of")),
('IQ', _("Iraq")),
('IE', _("Ireland")),
('IL', _("Israel")),
('IT', _("Italy")),
('CI', _("Ivory Coast")),
('JM', _("Jamaica")),
('JP', _("Japan")),
('JE', _("Jersey")),
('JO', _("Jordan")),
('KZ', _("Kazakhstan")),
('KE', _("Kenya")),
('KI', _("Kiribati")),
('KP', _("Korea, Democratic People's Republic Of")),
('KR', _("Korea, Republic Of")),
('KS', _("Kosovo")),
('KW', _("Kuwait")),
('KG', _("Kyrgyzstan")),
('LA', _("Lao People's Democratic Republic")),
('LV', _("Latvia")),
('LB', _("Lebanon")),
('LS', _("Lesotho")),
('LR', _("Liberia")),
('LY', _("Libyan Arab Jamahiriya")),
('LI', _("Liechtenstein")),
('LT', _("Lithuania")),
('LU', _("Luxembourg")),
('MO', _("Macao")),
('MK', _("Macedonia")),
('MG', _("Madagascar")),
('MW', _("Malawi")),
('MY', _("Malaysia")),
('MV', _("Maldives")),
('ML', _("Mali")),
('ML', _("Malta")),
('MH', _("Marshall Islands")),
('MQ', _("Martinique")),
('MR', _("Mauritania")),
('MU', _("Mauritius")),
('YT', _("Mayotte")),
('MX', _("Mexico")),
('FM', _("Micronesia")),
('MD', _("Moldova")),
('MC', _("Monaco")),
('MN', _("Mongolia")),
('ME', _("Montenegro")),
('MS', _("Montserrat")),
('MA', _("Morocco")),
('MZ', _("Mozambique")),
('MM', _("Myanmar")),
('NA', _("Namibia")),
('NR', _("Nauru")),
('NP', _("Nepal")),
('NL', _("Netherlands")),
('AN', _("Netherlands Antilles")),
('NC', _("New Caledonia")),
('NZ', _("New Zealand")),
('NI', _("Nicaragua")),
('NE', _("Niger")),
('NG', _("Nigeria")),
('NU', _("Niue")),
('NF', _("Norfolk Island")),
('MP', _("Northern Mariana Islands")),
('NO', _("Norway")),
('OM', _("Oman")),
('PK', _("Pakistan")),
('PW', _("Palau")),
('PS', _("Palestinian Territory, Occupied")),
('PA', _("Panama")),
('PG', _("Papua New Guinea")),
('PY', _("Paraguay")),
('PE', _("Peru")),
('PH', _("Philippines")),
('PN', _("Pitcairn")),
('PL', _("Poland")),
('PT', _("Portugal")),
('PR', _("Puerto Rico")),
('QA', _("Qatar")),
('RE', _("Reunion")),
('RO', _("Romania")),
('RU', _("Russian Federation")),
('RW', _("Rwanda")),
('BL', _("Saint Barthelemy")),
('SH', _("Saint Helena, Ascension & Tristan Da Cunha")),
('KN', _("Saint Kitts and Nevis")),
('LC', _("Saint Lucia")),
('MF', _("Saint Martin (French Part)")),
('PM', _("Saint Pierre and Miquelon")),
('VC', _("Saint Vincent And The Grenadines")),
('WS', _("Samoa")),
('SM', _("San Marino")),
('ST', _("Sao Tome And Principe")),
('SA', _("Saudi Arabia")),
('SN', _("Senegal")),
('RS', _("Serbia")),
('SC', _("Seychelles")),
('SL', _("Sierra Leone")),
('SG', _("Singapore")),
('SX', _("Sint Maarten (Dutch Part)")),
('SK', _("Slovakia")),
('SI', _("Slovenia")),
('SB', _("Solomon Islands")),
('SO', _("Somalia")),
('ZA', _("South Africa")),
('GS', _("South Georgia And The South Sandwich Islands")),
('ES', _("Spain")),
('LK', _("Sri Lanka")),
('SD', _("Sudan")),
('SR', _("Suriname")),
('SJ', _("Svalbard And Jan Mayen")),
('SZ', _("Swaziland")),
('SE', _("Sweden")),
('CH', _("Switzerland")),
('SY', _("Syrian Arab Republic")),
('TW', _("Taiwan")),
('TJ', _("Tajikistan")),
('TZ', _("Tanzania")),
('TH', _("Thailand")),
('TL', _("Timor-Leste")),
('TG', _("Togo")),
('TK', _("Tokelau")),
('TO', _("Tonga")),
('TT', _("Trinidad and Tobago")),
('TN', _("Tunisia")),
('TR', _("Turkey")),
('TM', _("Turkmenistan")),
('TC', _("Turks And Caicos Islands")),
('TV', _("Tuvalu")),
('UG', _("Uganda")),
('UA', _("Ukraine")),
('AE', _("United Arab Emirates")),
('GB', _("United Kingdom")),
('US', _("United States")),
('UM', _("United States Minor Outlying Islands")),
('UY', _("Uruguay")),
('UZ', _("Uzbekistan")),
('VU', _("Vanuatu")),
('VE', _("Venezuela, Bolivarian Republic Of")),
('VN', _("Viet Nam")),
('VG', _("Virgin Islands, British")),
('VI', _("Virgin Islands, U.S.")),
('WF', _("Wallis and Futuna")),
('EH', _("Western Sahara")),
('YE', _("Yemen")),
('ZM', _("Zambia")),
('ZW', _("Zimbabwe")),
]
class CountryField(models.CharField):
"""
This creates a simple input field to choose a country.
"""
def __init__(self, *args, **kwargs):
defaults = {
'max_length': 3,
'choices': ISO_3166_CODES,
}
defaults.update(kwargs)
super(CountryField, self).__init__(*args, **defaults)
def deconstruct(self):
name, path, args, kwargs = super(CountryField, self).deconstruct()
if kwargs['max_length'] == 3:
kwargs.pop('max_length')
if kwargs['choices'] == ISO_3166_CODES:
kwargs.pop('choices')
return name, path, args, kwargs
| 29.540462 | 94 | 0.503571 |
from __future__ import unicode_literals
from six import with_metaclass
from django.db import models
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _
from shop import deferred
from shop.conf import app_settings
class AddressManager(models.Manager):
def get_max_priority(self, customer):
aggr = self.get_queryset().filter(customer=customer).aggregate(models.Max('priority'))
priority = aggr['priority__max'] or 0
return priority
def get_fallback(self, customer):
return self.get_queryset().filter(customer=customer).order_by('priority').last()
class BaseAddress(models.Model):
customer = deferred.ForeignKey(
'BaseCustomer',
on_delete=models.CASCADE,
)
priority = models.SmallIntegerField(
default=0,
db_index=True,
help_text=_("Priority for using this address"),
)
class Meta:
abstract = True
objects = AddressManager()
def as_text(self):
template_names = [
'{}/{}-address.txt'.format(app_settings.APP_LABEL, self.address_type),
'{}/address.txt'.format(app_settings.APP_LABEL),
'shop/address.txt',
]
template = select_template(template_names)
return template.render({'address': self})
class BaseShippingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)):
address_type = 'shipping'
class Meta:
abstract = True
ShippingAddressModel = deferred.MaterializedModel(BaseShippingAddress)
class BaseBillingAddress(with_metaclass(deferred.ForeignKeyBuilder, BaseAddress)):
address_type = 'billing'
class Meta:
abstract = True
BillingAddressModel = deferred.MaterializedModel(BaseBillingAddress)
ISO_3166_CODES = [
('AF', _("Afghanistan")),
('AX', _("Aland Islands")),
('AL', _("Albania")),
('DZ', _("Algeria")),
('AS', _("American Samoa")),
('AD', _("Andorra")),
('AO', _("Angola")),
('AI', _("Anguilla")),
('AQ', _("Antarctica")),
('AG', _("Antigua And Barbuda")),
('AR', _("Argentina")),
('AM', _("Armenia")),
('AW', _("Aruba")),
('AU', _("Australia")),
('AT', _("Austria")),
('AZ', _("Azerbaijan")),
('BS', _("Bahamas")),
('BH', _("Bahrain")),
('BD', _("Bangladesh")),
('BB', _("Barbados")),
('BY', _("Belarus")),
('BE', _("Belgium")),
('BZ', _("Belize")),
('BJ', _("Benin")),
('BM', _("Bermuda")),
('BT', _("Bhutan")),
('BO', _("Bolivia, Plurinational State Of")),
('BQ', _("Bonaire, Saint Eustatius And Saba")),
('BA', _("Bosnia And Herzegovina")),
('BW', _("Botswana")),
('BV', _("Bouvet Island")),
('BR', _("Brazil")),
('IO', _("British Indian Ocean Territory")),
('BN', _("Brunei Darussalam")),
('BG', _("Bulgaria")),
('BF', _("Burkina Faso")),
('BI', _("Burundi")),
('KH', _("Cambodia")),
('CM', _("Cameroon")),
('CA', _("Canada")),
('CV', _("Cape Verde")),
('KY', _("Cayman Islands")),
('CF', _("Central African Republic")),
('TD', _("Chad")),
('CL', _("Chile")),
('CN', _("China")),
('CX', _("Christmas Island")),
('CC', _("Cocos (Keeling) Islands")),
('CO', _("Colombia")),
('KM', _("Comoros")),
('CG', _("Congo")),
('CD', _("Congo, The Democratic Republic Of The")),
('CK', _("Cook Islands")),
('CR', _("Costa Rica")),
('HR', _("Croatia")),
('CU', _("Cuba")),
('CW', _("Curacao")),
('CY', _("Cyprus")),
('CZ', _("Czech Republic")),
('DK', _("Denmark")),
('DJ', _("Djibouti")),
('DM', _("Dominica")),
('DO', _("Dominican Republic")),
('EC', _("Ecuador")),
('EG', _("Egypt")),
('SV', _("El Salvador")),
('GQ', _("Equatorial Guinea")),
('ER', _("Eritrea")),
('EE', _("Estonia")),
('ET', _("Ethiopia")),
('FK', _("Falkland Islands (Malvinas)")),
('FO', _("Faroe Islands")),
('FJ', _("Fiji")),
('FI', _("Finland")),
('FR', _("France")),
('GF', _("French Guiana")),
('PF', _("French Polynesia")),
('TF', _("French Southern Territories")),
('GA', _("Gabon")),
('GM', _("Gambia")),
('DE', _("Germany")),
('GH', _("Ghana")),
('GI', _("Gibraltar")),
('GR', _("Greece")),
('GL', _("Greenland")),
('GD', _("Grenada")),
('GP', _("Guadeloupe")),
('GU', _("Guam")),
('GT', _("Guatemala")),
('GG', _("Guernsey")),
('GN', _("Guinea")),
('GW', _("Guinea-Bissau")),
('GY', _("Guyana")),
('HT', _("Haiti")),
('HM', _("Heard Island and McDonald Islands")),
('VA', _("Holy See (Vatican City State)")),
('HN', _("Honduras")),
('HK', _("Hong Kong")),
('HU', _("Hungary")),
('IS', _("Iceland")),
('IN', _("India")),
('ID', _("Indonesia")),
('IR', _("Iran, Islamic Republic Of")),
('IQ', _("Iraq")),
('IE', _("Ireland")),
('IL', _("Israel")),
('IT', _("Italy")),
('CI', _("Ivory Coast")),
('JM', _("Jamaica")),
('JP', _("Japan")),
('JE', _("Jersey")),
('JO', _("Jordan")),
('KZ', _("Kazakhstan")),
('KE', _("Kenya")),
('KI', _("Kiribati")),
('KP', _("Korea, Democratic People's Republic Of")),
('KR', _("Korea, Republic Of")),
('KS', _("Kosovo")),
('KW', _("Kuwait")),
('KG', _("Kyrgyzstan")),
('LA', _("Lao People's Democratic Republic")),
('LV', _("Latvia")),
('LB', _("Lebanon")),
('LS', _("Lesotho")),
('LR', _("Liberia")),
('LY', _("Libyan Arab Jamahiriya")),
('LI', _("Liechtenstein")),
('LT', _("Lithuania")),
('LU', _("Luxembourg")),
('MO', _("Macao")),
('MK', _("Macedonia")),
('MG', _("Madagascar")),
('MW', _("Malawi")),
('MY', _("Malaysia")),
('MV', _("Maldives")),
('ML', _("Mali")),
('ML', _("Malta")),
('MH', _("Marshall Islands")),
('MQ', _("Martinique")),
('MR', _("Mauritania")),
('MU', _("Mauritius")),
('YT', _("Mayotte")),
('MX', _("Mexico")),
('FM', _("Micronesia")),
('MD', _("Moldova")),
('MC', _("Monaco")),
('MN', _("Mongolia")),
('ME', _("Montenegro")),
('MS', _("Montserrat")),
('MA', _("Morocco")),
('MZ', _("Mozambique")),
('MM', _("Myanmar")),
('NA', _("Namibia")),
('NR', _("Nauru")),
('NP', _("Nepal")),
('NL', _("Netherlands")),
('AN', _("Netherlands Antilles")),
('NC', _("New Caledonia")),
('NZ', _("New Zealand")),
('NI', _("Nicaragua")),
('NE', _("Niger")),
('NG', _("Nigeria")),
('NU', _("Niue")),
('NF', _("Norfolk Island")),
('MP', _("Northern Mariana Islands")),
('NO', _("Norway")),
('OM', _("Oman")),
('PK', _("Pakistan")),
('PW', _("Palau")),
('PS', _("Palestinian Territory, Occupied")),
('PA', _("Panama")),
('PG', _("Papua New Guinea")),
('PY', _("Paraguay")),
('PE', _("Peru")),
('PH', _("Philippines")),
('PN', _("Pitcairn")),
('PL', _("Poland")),
('PT', _("Portugal")),
('PR', _("Puerto Rico")),
('QA', _("Qatar")),
('RE', _("Reunion")),
('RO', _("Romania")),
('RU', _("Russian Federation")),
('RW', _("Rwanda")),
('BL', _("Saint Barthelemy")),
('SH', _("Saint Helena, Ascension & Tristan Da Cunha")),
('KN', _("Saint Kitts and Nevis")),
('LC', _("Saint Lucia")),
('MF', _("Saint Martin (French Part)")),
('PM', _("Saint Pierre and Miquelon")),
('VC', _("Saint Vincent And The Grenadines")),
('WS', _("Samoa")),
('SM', _("San Marino")),
('ST', _("Sao Tome And Principe")),
('SA', _("Saudi Arabia")),
('SN', _("Senegal")),
('RS', _("Serbia")),
('SC', _("Seychelles")),
('SL', _("Sierra Leone")),
('SG', _("Singapore")),
('SX', _("Sint Maarten (Dutch Part)")),
('SK', _("Slovakia")),
('SI', _("Slovenia")),
('SB', _("Solomon Islands")),
('SO', _("Somalia")),
('ZA', _("South Africa")),
('GS', _("South Georgia And The South Sandwich Islands")),
('ES', _("Spain")),
('LK', _("Sri Lanka")),
('SD', _("Sudan")),
('SR', _("Suriname")),
('SJ', _("Svalbard And Jan Mayen")),
('SZ', _("Swaziland")),
('SE', _("Sweden")),
('CH', _("Switzerland")),
('SY', _("Syrian Arab Republic")),
('TW', _("Taiwan")),
('TJ', _("Tajikistan")),
('TZ', _("Tanzania")),
('TH', _("Thailand")),
('TL', _("Timor-Leste")),
('TG', _("Togo")),
('TK', _("Tokelau")),
('TO', _("Tonga")),
('TT', _("Trinidad and Tobago")),
('TN', _("Tunisia")),
('TR', _("Turkey")),
('TM', _("Turkmenistan")),
('TC', _("Turks And Caicos Islands")),
('TV', _("Tuvalu")),
('UG', _("Uganda")),
('UA', _("Ukraine")),
('AE', _("United Arab Emirates")),
('GB', _("United Kingdom")),
('US', _("United States")),
('UM', _("United States Minor Outlying Islands")),
('UY', _("Uruguay")),
('UZ', _("Uzbekistan")),
('VU', _("Vanuatu")),
('VE', _("Venezuela, Bolivarian Republic Of")),
('VN', _("Viet Nam")),
('VG', _("Virgin Islands, British")),
('VI', _("Virgin Islands, U.S.")),
('WF', _("Wallis and Futuna")),
('EH', _("Western Sahara")),
('YE', _("Yemen")),
('ZM', _("Zambia")),
('ZW', _("Zimbabwe")),
]
class CountryField(models.CharField):
def __init__(self, *args, **kwargs):
defaults = {
'max_length': 3,
'choices': ISO_3166_CODES,
}
defaults.update(kwargs)
super(CountryField, self).__init__(*args, **defaults)
def deconstruct(self):
name, path, args, kwargs = super(CountryField, self).deconstruct()
if kwargs['max_length'] == 3:
kwargs.pop('max_length')
if kwargs['choices'] == ISO_3166_CODES:
kwargs.pop('choices')
return name, path, args, kwargs
| true | true |
f73a90b94e2c9835ecf7d319e48fbe2a6dee55c1 | 13,960 | py | Python | model-optimizer/mo/utils/class_registration.py | fujunwei/dldt | 09497b7724de4be92629f7799b8538b483d809a2 | [
"Apache-2.0"
] | null | null | null | model-optimizer/mo/utils/class_registration.py | fujunwei/dldt | 09497b7724de4be92629f7799b8538b483d809a2 | [
"Apache-2.0"
] | null | null | null | model-optimizer/mo/utils/class_registration.py | fujunwei/dldt | 09497b7724de4be92629f7799b8538b483d809a2 | [
"Apache-2.0"
] | null | null | null | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import networkx as nx
import os
from enum import Enum
from mo.graph.graph import Graph
from mo.middle.passes.eliminate import shape_inference
from mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively
from mo.utils.error import Error, InternalError, FrameworkError
from mo.utils.logger import progress_bar
from mo.utils.utils import refer_to_faq_msg
_registered_classes_dict = {}
def _check_unique_ids():
"""
Check that idxs is unique for all registered replacements.
"""
unique_idxs = set()
for class_type, classes_set in _registered_classes_dict.items():
for cls in classes_set:
replacers = [c for c in cls.registered_cls if not hasattr(c, 'op')] + \
[c for op, c in cls.registered_ops.items() if c]
for replacer_cls in replacers:
if hasattr(replacer_cls, 'id'):
id_cls = getattr(replacer_cls, 'id')
if id_cls in unique_idxs:
raise Error('Found replacer {} with not unique id!'.format(replacer_cls))
unique_idxs.add(id_cls)
log.debug("All replacers has unique idxs.")
def get_enabled_and_disabled_transforms():
"""
:return: tuple of lists with force enabled and disabled id of transformations.
"""
disabled_transforms = os.environ['MO_DISABLED_TRANSFORMS'] if 'MO_DISABLED_TRANSFORMS' in os.environ else ''
enabled_transforms = os.environ['MO_ENABLED_TRANSFORMS'] if 'MO_ENABLED_TRANSFORMS' in os.environ else ''
assert isinstance(enabled_transforms, str)
assert isinstance(disabled_transforms, str)
disabled_transforms = disabled_transforms.split(',')
enabled_transforms = enabled_transforms.split(',')
return enabled_transforms, disabled_transforms
class ClassType(Enum):
EXTRACTOR = 0
OP = 1
FRONT_REPLACER = 2
MIDDLE_REPLACER = 3
BACK_REPLACER = 4
IR_READER_EXTENDER = 5
LOADER = 6
def _update(cls, registered_list: list, registered_dict: dict, key: str, enabled_transforms: list,
disabled_transforms: list):
new_keys = {} # maps a custom name to class
new_keys_lower = {} # translates lowered custom name to its original form
# print('Registering new subclasses for', cls)
for c in cls.__subclasses__():
# Force enabling operations
if hasattr(c, 'id') and c.id in enabled_transforms:
setattr(c, 'enabled', True)
# Force disabling operations
if hasattr(c, 'id') and c.id in disabled_transforms:
setattr(c, 'enabled', False)
if c not in registered_list:
if hasattr(cls, 'excluded_classes') and c in cls.excluded_classes:
continue
registered_list.append(c)
log.info('New subclass: {}'.format(c))
if hasattr(c, key) and getattr(c, key) is not None:
k = getattr(c, key)
if k.lower() in new_keys_lower:
raise Error(
'Attempt to register of custom name {} for the second time as class {}. ' \
'Note that custom names are case-insensitive. ' +
refer_to_faq_msg(55), k, c)
else:
new_keys_lower[k.lower()] = k
new_keys[k] = c
log.info('Registered a new subclass with key: {}'.format(k))
else:
log.warning('Skipped {} registration because it was already registered or it was disabled. '.format(c))
registered_dict.update(new_keys)
def update_registration(classes: list, enabled_transforms: list, disabled_transforms: list):
for cls in classes:
_update(cls, cls.registered_cls, cls.registered_ops, 'op', enabled_transforms, disabled_transforms)
_registered_classes_dict.setdefault(cls.class_type(), set()).add(cls)
class DependencyGraph(Graph):
def __init__(self, data=None, **attr):
super().__init__(data, **attr)
def dump_graph_for_graphviz(self, node_attrs: list = [], edge_attrs: list = [], nodes_to_dump: list = None,
save_to_svg=False, highlight_nodes: list = None):
log.debug("---- GRAPHVIZ OUTPUT STARTS ----")
if nodes_to_dump is None:
nodes_to_dump = self.nodes()
string = '\ndigraph {\n'
string += 'node [color=lightblue2, style=filled];\n'
for node in nodes_to_dump:
attrs = ""
if hasattr(node, 'enabled') and not node.enabled:
attrs += "color=gray70,"
string += '"{}" [{}];\n'.format(node, attrs)
visited_nodes = set()
for src_node_name, dst_node_name, attrs in self.edges(data=True):
visited_nodes.add(src_node_name)
visited_nodes.add(dst_node_name)
if src_node_name not in nodes_to_dump or dst_node_name not in nodes_to_dump:
continue
src_node = self.node[src_node_name]
dst_node = self.node[dst_node_name]
src_node_string = str(src_node_name) + '\\n'.join(
[str(key) + '=' + str(src_node.get(key, 'None')) for key in node_attrs if key in src_node])
dst_node_string = str(dst_node_name) + '\\n'.join(
[str(key) + '=' + str(dst_node.get(key, 'None')) for key in node_attrs if key in dst_node])
edge_string = ' '.join([str(key) + '=' + str(attrs.get(key, 'None')) for key in edge_attrs if key in attrs])
string += '"{}" -> "{}" [label = "{}"];\n'.format(src_node_string, dst_node_string, edge_string)
for node in nodes_to_dump:
if node not in visited_nodes:
string += '"{}";\n'.format(node)
visited_nodes.add(node)
string += '}'
log.debug(string)
log.debug("---- GRAPHVIZ OUTPUT ENDS ----")
if save_to_svg:
try:
import graphviz
import os
file_name = "{}_{}.txt".format(self.name.replace('/', '_'), 0)
id = 1
while os.path.exists(file_name):
file_name = "{}_{}.txt".format(self.name.replace('/', '_'), id)
id += 1
with open(file_name, "w") as f:
f.write(string)
graphviz.render('dot', 'svg', file_name)
print('Graph was saved to {}.{}'.format(file_name, 'svg'))
except ImportError:
raise ImportError('Can\'t import graphviz')
except Exception as e:
raise Error('Can\'t save graph to svg') from e
return string
def cycle_check(self):
try:
list(nx.topological_sort(self))
except nx.NetworkXUnfeasible as exception:
cycles = nx.simple_cycles(self)
raise Error(
'There is(are) cyclic dependency(ies) between replacers. One of the cycles is the following: {}',
' -> '.join([str(node) for node in list(cycles)[0]])) from exception
def repeated_cls_names_check(self):
name_to_class_map = {}
for transform_class in self.node:
transform_name = transform_class.__name__
assert transform_name not in name_to_class_map, \
'Transform name `{}` is not unique: at least {} and {} exist' \
''.format(transform_name, transform_class, name_to_class_map[transform_name])
name_to_class_map[transform_name] = transform_class
def sort_util(self, v, visited, stack):
visited.append(v)
for i in sorted([child for _, child in self.out_edges(v)], key=lambda x: x.__name__):
if i not in visited:
self.sort_util(i, visited, stack)
stack.insert(0, v)
def determined_sort(self):
self.cycle_check()
self.repeated_cls_names_check()
transforms = sorted([cls for cls in self.nodes() if len(self.in_edges(cls)) == 0], key=lambda x: x.__name__)
order, visited = [], []
for transform in transforms:
self.sort_util(transform, visited, order)
graph_copy = self.copy()
for i in range(len(order) - 1):
graph_copy.add_edge(order[i], order[i + 1])
try:
nx_order = list(nx.topological_sort(graph_copy))
except Exception as e:
raise InternalError(
"Internal DependencyGraph determined_sort function behaves unexpectedly: cycle found") from e
assert nx_order == order, \
"Internal DependencyGraph determined_sort function behaves unexpectedly: nx_order != order"
return order
def get_replacers_order(transform_types: list):
"""
Gets all transforms that do not have 'op'.
If two or more classes replaces the same op (both have op class attribute and values match), such
pattern is not applied (while registration it will warn user that we have a conflict).
"""
dependency_graph = DependencyGraph(name="UnifiedPipeline" if len(transform_types) != 1 else transform_types[0].name)
replacers = []
for class_type, classes_set in _registered_classes_dict.items():
if class_type in transform_types:
for cls in classes_set:
cur_cls_replacers = [c for c in cls.registered_cls if not hasattr(c, 'op')] + \
[c for op, c in cls.registered_ops.items() if c]
replacers.extend(
[replacer for replacer in cur_cls_replacers if replacer not in cls.excluded_replacers])
for replacer_cls in replacers:
dependency_graph.add_node(replacer_cls)
for i, replacer_cls in enumerate(replacers):
for cls_after in replacer_cls().run_before():
dependency_graph.add_edge(replacer_cls, cls_after)
for cls_before in replacer_cls().run_after():
dependency_graph.add_edge(cls_before, replacer_cls)
replacers_order = dependency_graph.determined_sort()
debug_msg_list = ['| id | enabled | class ']
for i, replacer_cls in enumerate(replacers_order):
debug_msg_list.append('|{:5} |{:^9}| {}'.format(i, str(getattr(replacer_cls, 'enabled', None)), replacer_cls))
log.debug('Replacers execution order: \n{}'.format('\n'.join(debug_msg_list)))
return replacers_order
@progress_bar
def apply_transform(graph: Graph, replacer_cls, **kwargs):
"""
Safely executes transform if it should be and validates graph after transform execution
"""
replacer = replacer_cls()
replacement_id = 'REPLACEMENT_ID'
if hasattr(replacer, 'replacement_id'):
replacement_id = replacer.replacement_id
if hasattr(replacer, 'enabled') and not replacer.enabled:
log.info("Skip replacer {} (enabled = False)".format(replacer_cls))
return
if hasattr(replacer, 'graph_condition') and \
not all([condition(graph) for condition in replacer.graph_condition]):
log.info("Skip replacer {} (graph_condition not satisfied)".format(replacer_cls))
return
log.debug("Run replacer {}".format(replacer_cls))
try:
if hasattr(replacer, 'run_not_recursively'):
replacer.find_and_replace_pattern(graph)
else:
for_graph_and_each_sub_graph_recursively(graph, replacer.find_and_replace_pattern)
if hasattr(replacer, 'force_clean_up') and replacer.force_clean_up:
for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())
if hasattr(replacer, 'force_shape_inference') and replacer.force_shape_inference:
shape_inference(graph)
for_graph_and_each_sub_graph_recursively(graph, lambda _: graph.check_empty_graph(replacer_cls))
for_graph_and_each_sub_graph_recursively(graph, lambda _: graph.check_shapes_consistency())
except Error as err:
raise Error('Exception occurred during running replacer "{}" ({}): {}'.format(
replacement_id,
replacer_cls,
str(err).replace('[REPLACEMENT_ID]', replacement_id),
)) from err
except FrameworkError as err:
raise FrameworkError('{}'.format(str(err))) from err
except Exception as err:
raise Exception('Exception occurred during running replacer "{} ({})": {}'.format(
replacement_id,
replacer_cls,
str(err).replace('[REPLACEMENT_ID]', replacement_id),
)) from err
def apply_replacements_list(graph: Graph, replacers_order: list):
"""
Apply all transformations from replacers_order
"""
for i, replacer_cls in enumerate(replacers_order):
apply_transform(
graph=graph,
replacer_cls=replacer_cls,
curr_transform_num=i,
num_transforms=len(replacers_order))
def apply_replacements(graph: Graph, replacements_type: list):
"""
Apply all patterns that do not have 'op' first, then apply patterns from registered_ops.
If two or more classes replaces the same op (both have op class attribute and values match), such
pattern is not applied (while registration it will warn user that we have a conflict).
"""
replacers_order = get_replacers_order(replacements_type)
apply_replacements_list(graph, replacers_order)
| 41.671642 | 120 | 0.636963 | import logging as log
import networkx as nx
import os
from enum import Enum
from mo.graph.graph import Graph
from mo.middle.passes.eliminate import shape_inference
from mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively
from mo.utils.error import Error, InternalError, FrameworkError
from mo.utils.logger import progress_bar
from mo.utils.utils import refer_to_faq_msg
_registered_classes_dict = {}
def _check_unique_ids():
unique_idxs = set()
for class_type, classes_set in _registered_classes_dict.items():
for cls in classes_set:
replacers = [c for c in cls.registered_cls if not hasattr(c, 'op')] + \
[c for op, c in cls.registered_ops.items() if c]
for replacer_cls in replacers:
if hasattr(replacer_cls, 'id'):
id_cls = getattr(replacer_cls, 'id')
if id_cls in unique_idxs:
raise Error('Found replacer {} with not unique id!'.format(replacer_cls))
unique_idxs.add(id_cls)
log.debug("All replacers has unique idxs.")
def get_enabled_and_disabled_transforms():
disabled_transforms = os.environ['MO_DISABLED_TRANSFORMS'] if 'MO_DISABLED_TRANSFORMS' in os.environ else ''
enabled_transforms = os.environ['MO_ENABLED_TRANSFORMS'] if 'MO_ENABLED_TRANSFORMS' in os.environ else ''
assert isinstance(enabled_transforms, str)
assert isinstance(disabled_transforms, str)
disabled_transforms = disabled_transforms.split(',')
enabled_transforms = enabled_transforms.split(',')
return enabled_transforms, disabled_transforms
class ClassType(Enum):
EXTRACTOR = 0
OP = 1
FRONT_REPLACER = 2
MIDDLE_REPLACER = 3
BACK_REPLACER = 4
IR_READER_EXTENDER = 5
LOADER = 6
def _update(cls, registered_list: list, registered_dict: dict, key: str, enabled_transforms: list,
disabled_transforms: list):
new_keys = {}
new_keys_lower = {}
for c in cls.__subclasses__():
if hasattr(c, 'id') and c.id in enabled_transforms:
setattr(c, 'enabled', True)
if hasattr(c, 'id') and c.id in disabled_transforms:
setattr(c, 'enabled', False)
if c not in registered_list:
if hasattr(cls, 'excluded_classes') and c in cls.excluded_classes:
continue
registered_list.append(c)
log.info('New subclass: {}'.format(c))
if hasattr(c, key) and getattr(c, key) is not None:
k = getattr(c, key)
if k.lower() in new_keys_lower:
raise Error(
'Attempt to register of custom name {} for the second time as class {}. ' \
'Note that custom names are case-insensitive. ' +
refer_to_faq_msg(55), k, c)
else:
new_keys_lower[k.lower()] = k
new_keys[k] = c
log.info('Registered a new subclass with key: {}'.format(k))
else:
log.warning('Skipped {} registration because it was already registered or it was disabled. '.format(c))
registered_dict.update(new_keys)
def update_registration(classes: list, enabled_transforms: list, disabled_transforms: list):
for cls in classes:
_update(cls, cls.registered_cls, cls.registered_ops, 'op', enabled_transforms, disabled_transforms)
_registered_classes_dict.setdefault(cls.class_type(), set()).add(cls)
class DependencyGraph(Graph):
def __init__(self, data=None, **attr):
super().__init__(data, **attr)
def dump_graph_for_graphviz(self, node_attrs: list = [], edge_attrs: list = [], nodes_to_dump: list = None,
save_to_svg=False, highlight_nodes: list = None):
log.debug("---- GRAPHVIZ OUTPUT STARTS ----")
if nodes_to_dump is None:
nodes_to_dump = self.nodes()
string = '\ndigraph {\n'
string += 'node [color=lightblue2, style=filled];\n'
for node in nodes_to_dump:
attrs = ""
if hasattr(node, 'enabled') and not node.enabled:
attrs += "color=gray70,"
string += '"{}" [{}];\n'.format(node, attrs)
visited_nodes = set()
for src_node_name, dst_node_name, attrs in self.edges(data=True):
visited_nodes.add(src_node_name)
visited_nodes.add(dst_node_name)
if src_node_name not in nodes_to_dump or dst_node_name not in nodes_to_dump:
continue
src_node = self.node[src_node_name]
dst_node = self.node[dst_node_name]
src_node_string = str(src_node_name) + '\\n'.join(
[str(key) + '=' + str(src_node.get(key, 'None')) for key in node_attrs if key in src_node])
dst_node_string = str(dst_node_name) + '\\n'.join(
[str(key) + '=' + str(dst_node.get(key, 'None')) for key in node_attrs if key in dst_node])
edge_string = ' '.join([str(key) + '=' + str(attrs.get(key, 'None')) for key in edge_attrs if key in attrs])
string += '"{}" -> "{}" [label = "{}"];\n'.format(src_node_string, dst_node_string, edge_string)
for node in nodes_to_dump:
if node not in visited_nodes:
string += '"{}";\n'.format(node)
visited_nodes.add(node)
string += '}'
log.debug(string)
log.debug("---- GRAPHVIZ OUTPUT ENDS ----")
if save_to_svg:
try:
import graphviz
import os
file_name = "{}_{}.txt".format(self.name.replace('/', '_'), 0)
id = 1
while os.path.exists(file_name):
file_name = "{}_{}.txt".format(self.name.replace('/', '_'), id)
id += 1
with open(file_name, "w") as f:
f.write(string)
graphviz.render('dot', 'svg', file_name)
print('Graph was saved to {}.{}'.format(file_name, 'svg'))
except ImportError:
raise ImportError('Can\'t import graphviz')
except Exception as e:
raise Error('Can\'t save graph to svg') from e
return string
def cycle_check(self):
try:
list(nx.topological_sort(self))
except nx.NetworkXUnfeasible as exception:
cycles = nx.simple_cycles(self)
raise Error(
'There is(are) cyclic dependency(ies) between replacers. One of the cycles is the following: {}',
' -> '.join([str(node) for node in list(cycles)[0]])) from exception
def repeated_cls_names_check(self):
name_to_class_map = {}
for transform_class in self.node:
transform_name = transform_class.__name__
assert transform_name not in name_to_class_map, \
'Transform name `{}` is not unique: at least {} and {} exist' \
''.format(transform_name, transform_class, name_to_class_map[transform_name])
name_to_class_map[transform_name] = transform_class
def sort_util(self, v, visited, stack):
visited.append(v)
for i in sorted([child for _, child in self.out_edges(v)], key=lambda x: x.__name__):
if i not in visited:
self.sort_util(i, visited, stack)
stack.insert(0, v)
def determined_sort(self):
self.cycle_check()
self.repeated_cls_names_check()
transforms = sorted([cls for cls in self.nodes() if len(self.in_edges(cls)) == 0], key=lambda x: x.__name__)
order, visited = [], []
for transform in transforms:
self.sort_util(transform, visited, order)
graph_copy = self.copy()
for i in range(len(order) - 1):
graph_copy.add_edge(order[i], order[i + 1])
try:
nx_order = list(nx.topological_sort(graph_copy))
except Exception as e:
raise InternalError(
"Internal DependencyGraph determined_sort function behaves unexpectedly: cycle found") from e
assert nx_order == order, \
"Internal DependencyGraph determined_sort function behaves unexpectedly: nx_order != order"
return order
def get_replacers_order(transform_types: list):
dependency_graph = DependencyGraph(name="UnifiedPipeline" if len(transform_types) != 1 else transform_types[0].name)
replacers = []
for class_type, classes_set in _registered_classes_dict.items():
if class_type in transform_types:
for cls in classes_set:
cur_cls_replacers = [c for c in cls.registered_cls if not hasattr(c, 'op')] + \
[c for op, c in cls.registered_ops.items() if c]
replacers.extend(
[replacer for replacer in cur_cls_replacers if replacer not in cls.excluded_replacers])
for replacer_cls in replacers:
dependency_graph.add_node(replacer_cls)
for i, replacer_cls in enumerate(replacers):
for cls_after in replacer_cls().run_before():
dependency_graph.add_edge(replacer_cls, cls_after)
for cls_before in replacer_cls().run_after():
dependency_graph.add_edge(cls_before, replacer_cls)
replacers_order = dependency_graph.determined_sort()
debug_msg_list = ['| id | enabled | class ']
for i, replacer_cls in enumerate(replacers_order):
debug_msg_list.append('|{:5} |{:^9}| {}'.format(i, str(getattr(replacer_cls, 'enabled', None)), replacer_cls))
log.debug('Replacers execution order: \n{}'.format('\n'.join(debug_msg_list)))
return replacers_order
@progress_bar
def apply_transform(graph: Graph, replacer_cls, **kwargs):
replacer = replacer_cls()
replacement_id = 'REPLACEMENT_ID'
if hasattr(replacer, 'replacement_id'):
replacement_id = replacer.replacement_id
if hasattr(replacer, 'enabled') and not replacer.enabled:
log.info("Skip replacer {} (enabled = False)".format(replacer_cls))
return
if hasattr(replacer, 'graph_condition') and \
not all([condition(graph) for condition in replacer.graph_condition]):
log.info("Skip replacer {} (graph_condition not satisfied)".format(replacer_cls))
return
log.debug("Run replacer {}".format(replacer_cls))
try:
if hasattr(replacer, 'run_not_recursively'):
replacer.find_and_replace_pattern(graph)
else:
for_graph_and_each_sub_graph_recursively(graph, replacer.find_and_replace_pattern)
if hasattr(replacer, 'force_clean_up') and replacer.force_clean_up:
for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())
if hasattr(replacer, 'force_shape_inference') and replacer.force_shape_inference:
shape_inference(graph)
for_graph_and_each_sub_graph_recursively(graph, lambda _: graph.check_empty_graph(replacer_cls))
for_graph_and_each_sub_graph_recursively(graph, lambda _: graph.check_shapes_consistency())
except Error as err:
raise Error('Exception occurred during running replacer "{}" ({}): {}'.format(
replacement_id,
replacer_cls,
str(err).replace('[REPLACEMENT_ID]', replacement_id),
)) from err
except FrameworkError as err:
raise FrameworkError('{}'.format(str(err))) from err
except Exception as err:
raise Exception('Exception occurred during running replacer "{} ({})": {}'.format(
replacement_id,
replacer_cls,
str(err).replace('[REPLACEMENT_ID]', replacement_id),
)) from err
def apply_replacements_list(graph: Graph, replacers_order: list):
for i, replacer_cls in enumerate(replacers_order):
apply_transform(
graph=graph,
replacer_cls=replacer_cls,
curr_transform_num=i,
num_transforms=len(replacers_order))
def apply_replacements(graph: Graph, replacements_type: list):
replacers_order = get_replacers_order(replacements_type)
apply_replacements_list(graph, replacers_order)
| true | true |
f73a9187b5d73b951569d273ef0dcec6ecc321e2 | 106 | py | Python | trinity/enumerator/__init__.py | chyanju/Poe | 7602f413378c0c3a88a5edac8125fb4dcb17765b | [
"MIT"
] | 1 | 2021-10-30T20:30:46.000Z | 2021-10-30T20:30:46.000Z | trinity/enumerator/__init__.py | chyanju/Poe | 7602f413378c0c3a88a5edac8125fb4dcb17765b | [
"MIT"
] | null | null | null | trinity/enumerator/__init__.py | chyanju/Poe | 7602f413378c0c3a88a5edac8125fb4dcb17765b | [
"MIT"
] | null | null | null | from .enumerator import Enumerator
from .line_skeleton import LineSkeletonEnumerator, LineSkeletonIterator | 53 | 71 | 0.896226 | from .enumerator import Enumerator
from .line_skeleton import LineSkeletonEnumerator, LineSkeletonIterator | true | true |
f73a919e4a25a75653a81f6c59be073fe1b891f8 | 10,060 | py | Python | utils/utils.py | surisdi/DPC | ce6fe25938c1bebb7f654d0c8f8479bf92ab4054 | [
"MIT"
] | null | null | null | utils/utils.py | surisdi/DPC | ce6fe25938c1bebb7f654d0c8f8479bf92ab4054 | [
"MIT"
] | null | null | null | utils/utils.py | surisdi/DPC | ce6fe25938c1bebb7f654d0c8f8479bf92ab4054 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import os
from datetime import datetime
import glob
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from collections import deque
from torchvision import transforms
def save_checkpoint(state, is_best=0, gap=1, filename='models/checkpoint.pth.tar', keep_all=False):
torch.save(state, filename)
last_epoch_path = os.path.join(os.path.dirname(filename),
'epoch%s.pth.tar' % str(state['epoch']-gap))
if not keep_all:
try: os.remove(last_epoch_path)
except: pass
if is_best:
past_best = glob.glob(os.path.join(os.path.dirname(filename), 'model_best_*.pth.tar'))
for i in past_best:
try: os.remove(i)
except: pass
path_best = os.path.join(os.path.dirname(filename), 'model_best_epoch%s.pth.tar' % str(state['epoch']))
torch.save(state, path_best)
print(f'Updating best model: {path_best}')
def write_log(content, epoch, filename):
if not os.path.exists(filename):
log_file = open(filename, 'w')
else:
log_file = open(filename, 'a')
log_file.write('## Epoch %d:\n' % epoch)
log_file.write('time: %s\n' % str(datetime.now()))
log_file.write(content + '\n\n')
log_file.close()
def calc_topk_accuracy(output, target, topk=(1,)):
'''
Modified from: https://gist.github.com/agermanidis/275b23ad7a10ee89adccf021536bb97e
Given predicted and ground truth labels,
calculate top-k accuracies.
'''
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1 / batch_size))
return res
def calc_accuracy(output, target):
'''output: (B, N); target: (B)'''
target = target.squeeze()
_, pred = torch.max(output, 1)
return torch.mean((pred == target).float())
def calc_accuracy_binary(output, target):
'''output, target: (B, N), output is logits, before sigmoid '''
pred = output > 0
acc = torch.mean((pred == target.byte()).float())
del pred, output, target
return acc
def denorm(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
assert len(mean)==len(std)==3
inv_mean = [-mean[i]/std[i] for i in range(3)]
inv_std = [1/i for i in std]
return transforms.Normalize(mean=inv_mean, std=inv_std)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.local_history = deque([])
self.local_avg = 0
self.history = []
self.dict = {} # save all data values here
self.save_dict = {} # save mean and std here, for summary table
self.avg_expanded = None
def update(self, val, n=1, history=0, step=5):
is_array = False
if type(val) == torch.Tensor:
if len(val.shape) > 0 and val.shape[0] > 1:
is_array = True
val = val.view(-1).cpu().data.detach().numpy()
else:
val = val.mean().item()
elif type(val) == np.ndarray:
if len(val.shape) > 0 and val.shape[0] > 1:
is_array = True
val = val.reshape(-1)
elif type(val) == float:
pass
else:
raise TypeError(f'{type(val)} type not supported in AverageMeter')
if type(n) == torch.Tensor:
n = n.float().mean().item()
self.val = np.mean(val)
self.sum += val * n
self.count += n
# self.avg = self.sum / self.count
if is_array:
self.avg_expanded = self.sum / self.count
self.avg = self.avg_expanded.mean()
else:
self.avg = self.sum / self.count
self.avg_expanded = np.array([self.avg])
if history:
self.history.append(val.mean())
if step > 0:
self.local_history.append(val)
if len(self.local_history) > step:
self.local_history.popleft()
self.local_avg = np.average(self.local_history)
def dict_update(self, val, key):
if key in self.dict.keys():
self.dict[key].append(val)
else:
self.dict[key] = [val]
def __len__(self):
return self.count
class AccuracyTable(object):
'''compute accuracy for each class'''
def __init__(self):
self.dict = {}
def update(self, pred, tar):
pred = torch.squeeze(pred)
tar = torch.squeeze(tar)
for i, j in zip(pred, tar):
i = int(i)
j = int(j)
if j not in self.dict.keys():
self.dict[j] = {'count':0,'correct':0}
self.dict[j]['count'] += 1
if i == j:
self.dict[j]['correct'] += 1
def print_table(self, label):
for key in self.dict.keys():
acc = self.dict[key]['correct'] / self.dict[key]['count']
print('%s: %2d, accuracy: %3d/%3d = %0.6f' \
% (label, key, self.dict[key]['correct'], self.dict[key]['count'], acc))
def neq_load_customized(args, model, pretrained_dict,
parts=['backbone', 'agg', 'network_pred', 'hyperbolic_linear', 'network-class'],
size_diff=False):
'''
load pre-trained model in a not-equal way, when new model has been partially modified
size_diff: some parameters may have the same name but different size. Cannot load these, but do not throw error, and
load all the rest
'''
model_dict = model.state_dict()
tmp = {}
print_r(args, '\n=======Check Weights Loading======')
print_r(args, ('loading the following parts:', ', '.join(parts)))
if parts == 'all':
if size_diff:
for k, v in pretrained_dict.items():
if k in model.state_dict() and model.state_dict()[k].shape == v.shape:
tmp[k] = v
else:
print_r(args, f'{k} not loaded')
else:
tmp = pretrained_dict
else:
for part in parts:
print_r(args, ('loading:', part))
print_r(args, '\n=======Check Weights Loading======')
print_r(args, 'Weights not used from pretrained file:')
for k, v in pretrained_dict.items():
if part in k:
if k in model_dict:
if not (size_diff and model.state_dict()[k].shape != v.shape):
tmp[k] = v
else:
print_r(args, k)
print_r(args, '---------------------------')
print_r(args, 'Weights not loaded into new model:')
for k, v in model_dict.items():
if part in k:
if k not in pretrained_dict:
print_r(args, k)
print_r(args, '===================================\n')
del pretrained_dict
if 'time_index.weight' in tmp and \
'time_index' in [a[0].split('.')[0] for a in list(model.named_parameters())] and \
model.time_index.weight.shape[0] < tmp['time_index.weight'].shape[0]:
tmp['time_index.weight'].data = tmp['time_index.weight'][:model.time_index.weight.shape[0]].data
model.load_state_dict(tmp, strict=False)
return model
def print_r(args, text, print_no_verbose=False):
""" Print only when the local rank is <=0 (only once)"""
if args.local_rank <= 0 and (args.verbose or print_no_verbose):
if type(text) == tuple:
print(*text)
else:
print(text)
class ConfusionMeter(object):
'''compute and show confusion matrix'''
def __init__(self, num_class):
self.num_class = num_class
self.mat = np.zeros((num_class, num_class))
self.precision = []
self.recall = []
def update(self, pred, tar):
pred, tar = pred.cpu().numpy(), tar.cpu().numpy()
pred = np.squeeze(pred)
tar = np.squeeze(tar)
for p,t in zip(pred.flat, tar.flat):
self.mat[p][t] += 1
def print_mat(self):
print('Confusion Matrix: (target in columns)')
print(self.mat)
def plot_mat(self, path, dictionary=None, annotate=False):
plt.figure(dpi=600)
plt.imshow(self.mat,
cmap=plt.cm.jet,
interpolation=None,
extent=(0.5, np.shape(self.mat)[0]+0.5, np.shape(self.mat)[1]+0.5, 0.5))
width, height = self.mat.shape
if annotate:
for x in range(width):
for y in range(height):
plt.annotate(str(int(self.mat[x][y])), xy=(y+1, x+1),
horizontalalignment='center',
verticalalignment='center',
fontsize=8)
if dictionary is not None:
plt.xticks([i+1 for i in range(width)],
[dictionary[i] for i in range(width)],
rotation='vertical')
plt.yticks([i+1 for i in range(height)],
[dictionary[i] for i in range(height)])
plt.xlabel('Ground Truth')
plt.ylabel('Prediction')
plt.colorbar()
plt.tight_layout()
plt.savefig(path, format='svg')
plt.clf()
# for i in range(width):
# if np.sum(self.mat[i,:]) != 0:
# self.precision.append(self.mat[i,i] / np.sum(self.mat[i,:]))
# if np.sum(self.mat[:,i]) != 0:
# self.recall.append(self.mat[i,i] / np.sum(self.mat[:,i]))
# print('Average Precision: %0.4f' % np.mean(self.precision))
# print('Average Recall: %0.4f' % np.mean(self.recall))
| 35.052265 | 120 | 0.544334 | import torch
import numpy as np
import os
from datetime import datetime
import glob
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from collections import deque
from torchvision import transforms
def save_checkpoint(state, is_best=0, gap=1, filename='models/checkpoint.pth.tar', keep_all=False):
torch.save(state, filename)
last_epoch_path = os.path.join(os.path.dirname(filename),
'epoch%s.pth.tar' % str(state['epoch']-gap))
if not keep_all:
try: os.remove(last_epoch_path)
except: pass
if is_best:
past_best = glob.glob(os.path.join(os.path.dirname(filename), 'model_best_*.pth.tar'))
for i in past_best:
try: os.remove(i)
except: pass
path_best = os.path.join(os.path.dirname(filename), 'model_best_epoch%s.pth.tar' % str(state['epoch']))
torch.save(state, path_best)
print(f'Updating best model: {path_best}')
def write_log(content, epoch, filename):
if not os.path.exists(filename):
log_file = open(filename, 'w')
else:
log_file = open(filename, 'a')
log_file.write('## Epoch %d:\n' % epoch)
log_file.write('time: %s\n' % str(datetime.now()))
log_file.write(content + '\n\n')
log_file.close()
def calc_topk_accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1 / batch_size))
return res
def calc_accuracy(output, target):
target = target.squeeze()
_, pred = torch.max(output, 1)
return torch.mean((pred == target).float())
def calc_accuracy_binary(output, target):
pred = output > 0
acc = torch.mean((pred == target.byte()).float())
del pred, output, target
return acc
def denorm(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
assert len(mean)==len(std)==3
inv_mean = [-mean[i]/std[i] for i in range(3)]
inv_std = [1/i for i in std]
return transforms.Normalize(mean=inv_mean, std=inv_std)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.local_history = deque([])
self.local_avg = 0
self.history = []
self.dict = {}
self.save_dict = {}
self.avg_expanded = None
def update(self, val, n=1, history=0, step=5):
is_array = False
if type(val) == torch.Tensor:
if len(val.shape) > 0 and val.shape[0] > 1:
is_array = True
val = val.view(-1).cpu().data.detach().numpy()
else:
val = val.mean().item()
elif type(val) == np.ndarray:
if len(val.shape) > 0 and val.shape[0] > 1:
is_array = True
val = val.reshape(-1)
elif type(val) == float:
pass
else:
raise TypeError(f'{type(val)} type not supported in AverageMeter')
if type(n) == torch.Tensor:
n = n.float().mean().item()
self.val = np.mean(val)
self.sum += val * n
self.count += n
if is_array:
self.avg_expanded = self.sum / self.count
self.avg = self.avg_expanded.mean()
else:
self.avg = self.sum / self.count
self.avg_expanded = np.array([self.avg])
if history:
self.history.append(val.mean())
if step > 0:
self.local_history.append(val)
if len(self.local_history) > step:
self.local_history.popleft()
self.local_avg = np.average(self.local_history)
def dict_update(self, val, key):
if key in self.dict.keys():
self.dict[key].append(val)
else:
self.dict[key] = [val]
def __len__(self):
return self.count
class AccuracyTable(object):
def __init__(self):
self.dict = {}
def update(self, pred, tar):
pred = torch.squeeze(pred)
tar = torch.squeeze(tar)
for i, j in zip(pred, tar):
i = int(i)
j = int(j)
if j not in self.dict.keys():
self.dict[j] = {'count':0,'correct':0}
self.dict[j]['count'] += 1
if i == j:
self.dict[j]['correct'] += 1
def print_table(self, label):
for key in self.dict.keys():
acc = self.dict[key]['correct'] / self.dict[key]['count']
print('%s: %2d, accuracy: %3d/%3d = %0.6f' \
% (label, key, self.dict[key]['correct'], self.dict[key]['count'], acc))
def neq_load_customized(args, model, pretrained_dict,
parts=['backbone', 'agg', 'network_pred', 'hyperbolic_linear', 'network-class'],
size_diff=False):
model_dict = model.state_dict()
tmp = {}
print_r(args, '\n=======Check Weights Loading======')
print_r(args, ('loading the following parts:', ', '.join(parts)))
if parts == 'all':
if size_diff:
for k, v in pretrained_dict.items():
if k in model.state_dict() and model.state_dict()[k].shape == v.shape:
tmp[k] = v
else:
print_r(args, f'{k} not loaded')
else:
tmp = pretrained_dict
else:
for part in parts:
print_r(args, ('loading:', part))
print_r(args, '\n=======Check Weights Loading======')
print_r(args, 'Weights not used from pretrained file:')
for k, v in pretrained_dict.items():
if part in k:
if k in model_dict:
if not (size_diff and model.state_dict()[k].shape != v.shape):
tmp[k] = v
else:
print_r(args, k)
print_r(args, '---------------------------')
print_r(args, 'Weights not loaded into new model:')
for k, v in model_dict.items():
if part in k:
if k not in pretrained_dict:
print_r(args, k)
print_r(args, '===================================\n')
del pretrained_dict
if 'time_index.weight' in tmp and \
'time_index' in [a[0].split('.')[0] for a in list(model.named_parameters())] and \
model.time_index.weight.shape[0] < tmp['time_index.weight'].shape[0]:
tmp['time_index.weight'].data = tmp['time_index.weight'][:model.time_index.weight.shape[0]].data
model.load_state_dict(tmp, strict=False)
return model
def print_r(args, text, print_no_verbose=False):
if args.local_rank <= 0 and (args.verbose or print_no_verbose):
if type(text) == tuple:
print(*text)
else:
print(text)
class ConfusionMeter(object):
def __init__(self, num_class):
self.num_class = num_class
self.mat = np.zeros((num_class, num_class))
self.precision = []
self.recall = []
def update(self, pred, tar):
pred, tar = pred.cpu().numpy(), tar.cpu().numpy()
pred = np.squeeze(pred)
tar = np.squeeze(tar)
for p,t in zip(pred.flat, tar.flat):
self.mat[p][t] += 1
def print_mat(self):
print('Confusion Matrix: (target in columns)')
print(self.mat)
def plot_mat(self, path, dictionary=None, annotate=False):
plt.figure(dpi=600)
plt.imshow(self.mat,
cmap=plt.cm.jet,
interpolation=None,
extent=(0.5, np.shape(self.mat)[0]+0.5, np.shape(self.mat)[1]+0.5, 0.5))
width, height = self.mat.shape
if annotate:
for x in range(width):
for y in range(height):
plt.annotate(str(int(self.mat[x][y])), xy=(y+1, x+1),
horizontalalignment='center',
verticalalignment='center',
fontsize=8)
if dictionary is not None:
plt.xticks([i+1 for i in range(width)],
[dictionary[i] for i in range(width)],
rotation='vertical')
plt.yticks([i+1 for i in range(height)],
[dictionary[i] for i in range(height)])
plt.xlabel('Ground Truth')
plt.ylabel('Prediction')
plt.colorbar()
plt.tight_layout()
plt.savefig(path, format='svg')
plt.clf()
| true | true |
f73a91c04d2f8f458bc742d94fe68e180a2dc473 | 24,060 | py | Python | featuretools/computational_backends/pandas_backend.py | JunweiPan3013/featuretools | b0c8478f9bf8f46217726e3a32de51e083d98351 | [
"BSD-3-Clause"
] | null | null | null | featuretools/computational_backends/pandas_backend.py | JunweiPan3013/featuretools | b0c8478f9bf8f46217726e3a32de51e083d98351 | [
"BSD-3-Clause"
] | 1 | 2019-03-22T00:30:54.000Z | 2019-03-22T00:30:54.000Z | featuretools/computational_backends/pandas_backend.py | JunweiPan3013/featuretools | b0c8478f9bf8f46217726e3a32de51e083d98351 | [
"BSD-3-Clause"
] | 1 | 2019-03-21T04:36:16.000Z | 2019-03-21T04:36:16.000Z | import cProfile
import logging
import os
import pstats
import sys
import warnings
from datetime import datetime
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from future import standard_library
from .base_backend import ComputationalBackend
from .feature_tree import FeatureTree
from featuretools import variable_types
from featuretools.entityset.relationship import Relationship
from featuretools.exceptions import UnknownFeature
from featuretools.primitives import (
AggregationPrimitive,
DirectFeature,
IdentityFeature,
TransformPrimitive
)
from featuretools.utils.gen_utils import make_tqdm_iterator
standard_library.install_aliases()
warnings.simplefilter('ignore', np.RankWarning)
warnings.simplefilter("ignore", category=RuntimeWarning)
logger = logging.getLogger('featuretools.computational_backend')
ROOT_DIR = os.path.expanduser("~")
class PandasBackend(ComputationalBackend):
def __init__(self, entityset, features):
assert len(set(f.entity.id for f in features)) == 1, \
"Features must all be defined on the same entity"
self.entityset = entityset
self.target_eid = features[0].entity.id
self.features = features
self.feature_tree = FeatureTree(entityset, features)
def __sizeof__(self):
return self.entityset.__sizeof__()
def calculate_all_features(self, instance_ids, time_last,
training_window=None, profile=False,
precalculated_features=None, ignored=None,
verbose=False):
"""
Given a list of instance ids and features with a shared time window,
generate and return a mapping of instance -> feature values.
Args:
instance_ids (list): List of instance id for which to build features.
time_last (pd.Timestamp): Last allowed time. Data from exactly this
time not allowed.
training_window (Timedelta, optional): Data older than
time_last by more than this will be ignored.
profile (bool): Enable profiler if True.
verbose (bool): Print output progress if True.
Returns:
pd.DataFrame : Pandas DataFrame of calculated feature values.
Indexed by instance_ids. Columns in same order as features
passed in.
"""
assert len(instance_ids) > 0, "0 instance ids provided"
self.instance_ids = instance_ids
self.time_last = time_last
if self.time_last is None:
self.time_last = datetime.now()
# For debugging
if profile:
pr = cProfile.Profile()
pr.enable()
if precalculated_features is None:
precalculated_features = {}
# Access the index to get the filtered data we need
target_entity = self.entityset[self.target_eid]
if ignored:
# TODO: Just want to remove entities if don't have any (sub)features defined
# on them anymore, rather than recreating
ordered_entities = FeatureTree(self.entityset, self.features, ignored=ignored).ordered_entities
else:
ordered_entities = self.feature_tree.ordered_entities
necessary_columns = self.feature_tree.necessary_columns
eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=instance_ids,
entity_columns=necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
large_eframes_by_filter = None
if any([f.uses_full_entity for f in self.feature_tree.all_features]):
large_necessary_columns = self.feature_tree.necessary_columns_for_all_values_features
large_eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=None,
entity_columns=large_necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
# Handle an empty time slice by returning a dataframe with defaults
if eframes_by_filter is None:
return self.generate_default_df(instance_ids=instance_ids)
finished_entity_ids = []
# Populate entity_frames with precalculated features
if len(precalculated_features) > 0:
for entity_id, precalc_feature_values in precalculated_features.items():
if entity_id in eframes_by_filter:
frame = eframes_by_filter[entity_id][entity_id]
eframes_by_filter[entity_id][entity_id] = pd.merge(frame,
precalc_feature_values,
left_index=True,
right_index=True)
else:
# Only features we're taking from this entity
# are precomputed
# Make sure the id variable is a column as well as an index
entity_id_var = self.entityset[entity_id].index
precalc_feature_values[entity_id_var] = precalc_feature_values.index.values
eframes_by_filter[entity_id] = {entity_id: precalc_feature_values}
finished_entity_ids.append(entity_id)
# Iterate over the top-level entities (filter entities) in sorted order
# and calculate all relevant features under each one.
if verbose:
total_groups_to_compute = sum(len(group)
for group in self.feature_tree.ordered_feature_groups.values())
pbar = make_tqdm_iterator(total=total_groups_to_compute,
desc="Computing features",
unit="feature group")
if verbose:
pbar.update(0)
for filter_eid in ordered_entities:
entity_frames = eframes_by_filter[filter_eid]
large_entity_frames = None
if large_eframes_by_filter is not None:
large_entity_frames = large_eframes_by_filter[filter_eid]
# update the current set of entity frames with the computed features
# from previously finished entities
for eid in finished_entity_ids:
# only include this frame if it's not from a descendent entity:
# descendent entity frames will have to be re-calculated.
# TODO: this check might not be necessary, depending on our
# constraints
if not self.entityset.find_backward_path(start_entity_id=filter_eid,
goal_entity_id=eid):
entity_frames[eid] = eframes_by_filter[eid][eid]
# TODO: look this over again
# precalculated features will only be placed in entity_frames,
# and it's possible that that they are the only features computed
# for an entity. In this case, the entity won't be present in
# large_eframes_by_filter. The relevant lines that this case passes
# through are 136-143
if (large_eframes_by_filter is not None and
eid in large_eframes_by_filter and eid in large_eframes_by_filter[eid]):
large_entity_frames[eid] = large_eframes_by_filter[eid][eid]
if filter_eid in self.feature_tree.ordered_feature_groups:
for group in self.feature_tree.ordered_feature_groups[filter_eid]:
if verbose:
pbar.set_postfix({'running': 0})
test_feature = group[0]
entity_id = test_feature.entity.id
input_frames_type = self.feature_tree.input_frames_type(test_feature)
input_frames = large_entity_frames
if input_frames_type == "subset_entity_frames":
input_frames = entity_frames
handler = self._feature_type_handler(test_feature)
result_frame = handler(group, input_frames)
output_frames_type = self.feature_tree.output_frames_type(test_feature)
if output_frames_type in ['full_and_subset_entity_frames', 'subset_entity_frames']:
index = entity_frames[entity_id].index
# If result_frame came from a uses_full_entity feature,
# and the input was large_entity_frames,
# then it's possible it doesn't contain some of the features
# in the output entity_frames
# We thus need to concatenate the existing frame with the result frame,
# making sure not to duplicate any columns
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in entity_frames[entity_id].columns]
entity_frames[entity_id] = pd.concat([entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if output_frames_type in ['full_and_subset_entity_frames', 'full_entity_frames']:
index = large_entity_frames[entity_id].index
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in large_entity_frames[entity_id].columns]
large_entity_frames[entity_id] = pd.concat([large_entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if verbose:
pbar.update(1)
finished_entity_ids.append(filter_eid)
if verbose:
pbar.set_postfix({'running': 0})
pbar.refresh()
sys.stdout.flush()
pbar.close()
# debugging
if profile:
pr.disable()
prof_folder_path = os.path.join(ROOT_DIR, 'prof')
if not os.path.exists(prof_folder_path):
os.mkdir(prof_folder_path)
with open(os.path.join(prof_folder_path, 'inst-%s.log' %
list(instance_ids)[0]), 'w') as f:
pstats.Stats(pr, stream=f).strip_dirs().sort_stats("cumulative", "tottime").print_stats()
df = eframes_by_filter[self.target_eid][self.target_eid]
# fill in empty rows with default values
missing_ids = [i for i in instance_ids if i not in
df[target_entity.index]]
if missing_ids:
default_df = self.generate_default_df(instance_ids=missing_ids,
extra_columns=df.columns)
df = df.append(default_df, sort=True)
df.index.name = self.entityset[self.target_eid].index
return df[[feat.get_name() for feat in self.features]]
def generate_default_df(self, instance_ids, extra_columns=None):
index_name = self.features[0].entity.index
default_row = [f.default_value for f in self.features]
default_cols = [f.get_name() for f in self.features]
default_matrix = [default_row] * len(instance_ids)
default_df = pd.DataFrame(default_matrix,
columns=default_cols,
index=instance_ids)
default_df.index.name = index_name
if extra_columns is not None:
for c in extra_columns:
if c not in default_df.columns:
default_df[c] = [np.nan] * len(instance_ids)
return default_df
def _feature_type_handler(self, f):
if isinstance(f, TransformPrimitive):
return self._calculate_transform_features
elif isinstance(f, DirectFeature):
return self._calculate_direct_features
elif isinstance(f, AggregationPrimitive):
return self._calculate_agg_features
elif isinstance(f, IdentityFeature):
return self._calculate_identity_features
else:
raise UnknownFeature(u"{} feature unknown".format(f.__class__))
def _calculate_identity_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert (entity_id in entity_frames and
features[0].get_name() in entity_frames[entity_id].columns)
return entity_frames[entity_id]
def _calculate_transform_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert len(set([f.entity.id for f in features])) == 1, \
"features must share base entity"
assert entity_id in entity_frames
frame = entity_frames[entity_id]
for f in features:
# handle when no data
if frame.shape[0] == 0:
set_default_column(frame, f)
continue
# collect only the variables we need for this transformation
variable_data = [frame[bf.get_name()].values
for bf in f.base_features]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.uses_calc_time:
values = feature_func(*variable_data, time=self.time_last)
else:
values = feature_func(*variable_data)
if isinstance(values, pd.Series):
values = values.values
frame[f.get_name()] = list(values)
return frame
def _calculate_direct_features(self, features, entity_frames):
entity_id = features[0].entity.id
parent_entity_id = features[0].parent_entity.id
assert entity_id in entity_frames and parent_entity_id in entity_frames
path = self.entityset.find_forward_path(entity_id, parent_entity_id)
assert len(path) == 1, \
"Error calculating DirectFeatures, len(path) > 1"
parent_df = entity_frames[parent_entity_id]
child_df = entity_frames[entity_id]
merge_var = path[0].child_variable.id
# generate a mapping of old column names (in the parent entity) to
# new column names (in the child entity) for the merge
col_map = {path[0].parent_variable.id: merge_var}
index_as_feature = None
for f in features:
if f.base_features[0].get_name() == path[0].parent_variable.id:
index_as_feature = f
# Sometimes entityset._add_multigenerational_links adds link variables
# that would ordinarily get calculated as direct features,
# so we make sure not to attempt to calculate again
if f.get_name() in child_df.columns:
continue
col_map[f.base_features[0].get_name()] = f.get_name()
# merge the identity feature from the parent entity into the child
merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)
if index_as_feature is not None:
merge_df.set_index(index_as_feature.get_name(), inplace=True,
drop=False)
else:
merge_df.set_index(merge_var, inplace=True)
new_df = pd.merge(left=child_df, right=merge_df,
left_on=merge_var, right_index=True,
how='left')
return new_df
def _calculate_agg_features(self, features, entity_frames):
test_feature = features[0]
entity = test_feature.entity
child_entity = test_feature.base_features[0].entity
assert entity.id in entity_frames and child_entity.id in entity_frames
frame = entity_frames[entity.id]
base_frame = entity_frames[child_entity.id]
# Sometimes approximate features get computed in a previous filter frame
# and put in the current one dynamically,
# so there may be existing features here
features = [f for f in features if f.get_name()
not in frame.columns]
if not len(features):
return frame
# handle where
where = test_feature.where
if where is not None and not base_frame.empty:
base_frame = base_frame.loc[base_frame[where.get_name()]]
# when no child data, just add all the features to frame with nan
if base_frame.empty:
for f in features:
frame[f.get_name()] = np.nan
else:
relationship_path = self.entityset.find_backward_path(entity.id,
child_entity.id)
groupby_var = Relationship._get_link_variable_name(relationship_path)
# if the use_previous property exists on this feature, include only the
# instances from the child entity included in that Timedelta
use_previous = test_feature.use_previous
if use_previous and not base_frame.empty:
# Filter by use_previous values
time_last = self.time_last
if use_previous.is_absolute():
time_first = time_last - use_previous
ti = child_entity.time_index
if ti is not None:
base_frame = base_frame[base_frame[ti] >= time_first]
else:
n = use_previous.value
def last_n(df):
return df.iloc[-n:]
base_frame = base_frame.groupby(groupby_var, observed=True, sort=False).apply(last_n)
to_agg = {}
agg_rename = {}
to_apply = set()
# apply multivariable and time-dependent features as we find them, and
# save aggregable features for later
for f in features:
if _can_agg(f):
variable_id = f.base_features[0].get_name()
if variable_id not in to_agg:
to_agg[variable_id] = []
func = f.get_function()
funcname = func
if callable(func):
funcname = func.__name__
to_agg[variable_id].append(func)
# this is used below to rename columns that pandas names for us
agg_rename[u"{}-{}".format(variable_id, funcname)] = f.get_name()
continue
to_apply.add(f)
# Apply the non-aggregable functions generate a new dataframe, and merge
# it with the existing one
if len(to_apply):
wrap = agg_wrapper(to_apply, self.time_last)
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var], observed=True, sort=False).apply(wrap)
frame = pd.merge(left=frame, right=to_merge,
left_index=True,
right_index=True, how='left')
# Apply the aggregate functions to generate a new dataframe, and merge
# it with the existing one
if len(to_agg):
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var],
observed=True, sort=False).agg(to_agg)
# rename columns to the correct feature names
to_merge.columns = [agg_rename["-".join(x)] for x in to_merge.columns.ravel()]
to_merge = to_merge[list(agg_rename.values())]
# workaround for pandas bug where categories are in the wrong order
# see: https://github.com/pandas-dev/pandas/issues/22501
if pdtypes.is_categorical_dtype(frame.index):
categories = pdtypes.CategoricalDtype(categories=frame.index.categories)
to_merge.index = to_merge.index.astype(object).astype(categories)
frame = pd.merge(left=frame, right=to_merge,
left_index=True, right_index=True, how='left')
# Handle default values
# 1. handle non scalar default values
iterfeats = [f for f in features
if hasattr(f.default_value, '__iter__')]
for f in iterfeats:
nulls = pd.isnull(frame[f.get_name()])
for ni in nulls[nulls].index:
frame.at[ni, f.get_name()] = f.default_value
# 2. handle scalars default values
fillna_dict = {f.get_name(): f.default_value for f in features
if f not in iterfeats}
frame.fillna(fillna_dict, inplace=True)
# convert boolean dtypes to floats as appropriate
# pandas behavior: https://github.com/pydata/pandas/issues/3752
for f in features:
if (not f.expanding and
f.variable_type == variable_types.Numeric and
frame[f.get_name()].dtype.name in ['object', 'bool']):
frame[f.get_name()] = frame[f.get_name()].astype(float)
return frame
def _can_agg(feature):
assert isinstance(feature, AggregationPrimitive)
base_features = feature.base_features
if feature.where is not None:
base_features = [bf.get_name() for bf in base_features
if bf.get_name() != feature.where.get_name()]
if feature.uses_calc_time:
return False
return len(base_features) == 1 and not feature.expanding
def agg_wrapper(feats, time_last):
def wrap(df):
d = {}
for f in feats:
func = f.get_function()
variable_ids = [bf.get_name() for bf in f.base_features]
args = [df[v] for v in variable_ids]
if f.uses_calc_time:
d[f.get_name()] = func(*args, time=time_last)
else:
d[f.get_name()] = func(*args)
return pd.Series(d)
return wrap
def set_default_column(frame, f):
default = f.default_value
if hasattr(default, '__iter__'):
length = frame.shape[0]
default = [f.default_value] * length
frame[f.get_name()] = default
| 45.140713 | 109 | 0.577764 | import cProfile
import logging
import os
import pstats
import sys
import warnings
from datetime import datetime
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from future import standard_library
from .base_backend import ComputationalBackend
from .feature_tree import FeatureTree
from featuretools import variable_types
from featuretools.entityset.relationship import Relationship
from featuretools.exceptions import UnknownFeature
from featuretools.primitives import (
AggregationPrimitive,
DirectFeature,
IdentityFeature,
TransformPrimitive
)
from featuretools.utils.gen_utils import make_tqdm_iterator
standard_library.install_aliases()
warnings.simplefilter('ignore', np.RankWarning)
warnings.simplefilter("ignore", category=RuntimeWarning)
logger = logging.getLogger('featuretools.computational_backend')
ROOT_DIR = os.path.expanduser("~")
class PandasBackend(ComputationalBackend):
def __init__(self, entityset, features):
assert len(set(f.entity.id for f in features)) == 1, \
"Features must all be defined on the same entity"
self.entityset = entityset
self.target_eid = features[0].entity.id
self.features = features
self.feature_tree = FeatureTree(entityset, features)
def __sizeof__(self):
return self.entityset.__sizeof__()
def calculate_all_features(self, instance_ids, time_last,
training_window=None, profile=False,
precalculated_features=None, ignored=None,
verbose=False):
assert len(instance_ids) > 0, "0 instance ids provided"
self.instance_ids = instance_ids
self.time_last = time_last
if self.time_last is None:
self.time_last = datetime.now()
if profile:
pr = cProfile.Profile()
pr.enable()
if precalculated_features is None:
precalculated_features = {}
target_entity = self.entityset[self.target_eid]
if ignored:
# on them anymore, rather than recreating
ordered_entities = FeatureTree(self.entityset, self.features, ignored=ignored).ordered_entities
else:
ordered_entities = self.feature_tree.ordered_entities
necessary_columns = self.feature_tree.necessary_columns
eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=instance_ids,
entity_columns=necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
large_eframes_by_filter = None
if any([f.uses_full_entity for f in self.feature_tree.all_features]):
large_necessary_columns = self.feature_tree.necessary_columns_for_all_values_features
large_eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=None,
entity_columns=large_necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
# Handle an empty time slice by returning a dataframe with defaults
if eframes_by_filter is None:
return self.generate_default_df(instance_ids=instance_ids)
finished_entity_ids = []
# Populate entity_frames with precalculated features
if len(precalculated_features) > 0:
for entity_id, precalc_feature_values in precalculated_features.items():
if entity_id in eframes_by_filter:
frame = eframes_by_filter[entity_id][entity_id]
eframes_by_filter[entity_id][entity_id] = pd.merge(frame,
precalc_feature_values,
left_index=True,
right_index=True)
else:
# Only features we're taking from this entity
entity_id_var = self.entityset[entity_id].index
precalc_feature_values[entity_id_var] = precalc_feature_values.index.values
eframes_by_filter[entity_id] = {entity_id: precalc_feature_values}
finished_entity_ids.append(entity_id)
if verbose:
total_groups_to_compute = sum(len(group)
for group in self.feature_tree.ordered_feature_groups.values())
pbar = make_tqdm_iterator(total=total_groups_to_compute,
desc="Computing features",
unit="feature group")
if verbose:
pbar.update(0)
for filter_eid in ordered_entities:
entity_frames = eframes_by_filter[filter_eid]
large_entity_frames = None
if large_eframes_by_filter is not None:
large_entity_frames = large_eframes_by_filter[filter_eid]
for eid in finished_entity_ids:
# descendent entity frames will have to be re-calculated.
# TODO: this check might not be necessary, depending on our
# constraints
if not self.entityset.find_backward_path(start_entity_id=filter_eid,
goal_entity_id=eid):
entity_frames[eid] = eframes_by_filter[eid][eid]
# TODO: look this over again
# precalculated features will only be placed in entity_frames,
# and it's possible that that they are the only features computed
# large_eframes_by_filter. The relevant lines that this case passes
# through are 136-143
if (large_eframes_by_filter is not None and
eid in large_eframes_by_filter and eid in large_eframes_by_filter[eid]):
large_entity_frames[eid] = large_eframes_by_filter[eid][eid]
if filter_eid in self.feature_tree.ordered_feature_groups:
for group in self.feature_tree.ordered_feature_groups[filter_eid]:
if verbose:
pbar.set_postfix({'running': 0})
test_feature = group[0]
entity_id = test_feature.entity.id
input_frames_type = self.feature_tree.input_frames_type(test_feature)
input_frames = large_entity_frames
if input_frames_type == "subset_entity_frames":
input_frames = entity_frames
handler = self._feature_type_handler(test_feature)
result_frame = handler(group, input_frames)
output_frames_type = self.feature_tree.output_frames_type(test_feature)
if output_frames_type in ['full_and_subset_entity_frames', 'subset_entity_frames']:
index = entity_frames[entity_id].index
# If result_frame came from a uses_full_entity feature,
# and the input was large_entity_frames,
# then it's possible it doesn't contain some of the features
# in the output entity_frames
# We thus need to concatenate the existing frame with the result frame,
# making sure not to duplicate any columns
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in entity_frames[entity_id].columns]
entity_frames[entity_id] = pd.concat([entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if output_frames_type in ['full_and_subset_entity_frames', 'full_entity_frames']:
index = large_entity_frames[entity_id].index
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in large_entity_frames[entity_id].columns]
large_entity_frames[entity_id] = pd.concat([large_entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if verbose:
pbar.update(1)
finished_entity_ids.append(filter_eid)
if verbose:
pbar.set_postfix({'running': 0})
pbar.refresh()
sys.stdout.flush()
pbar.close()
# debugging
if profile:
pr.disable()
prof_folder_path = os.path.join(ROOT_DIR, 'prof')
if not os.path.exists(prof_folder_path):
os.mkdir(prof_folder_path)
with open(os.path.join(prof_folder_path, 'inst-%s.log' %
list(instance_ids)[0]), 'w') as f:
pstats.Stats(pr, stream=f).strip_dirs().sort_stats("cumulative", "tottime").print_stats()
df = eframes_by_filter[self.target_eid][self.target_eid]
# fill in empty rows with default values
missing_ids = [i for i in instance_ids if i not in
df[target_entity.index]]
if missing_ids:
default_df = self.generate_default_df(instance_ids=missing_ids,
extra_columns=df.columns)
df = df.append(default_df, sort=True)
df.index.name = self.entityset[self.target_eid].index
return df[[feat.get_name() for feat in self.features]]
def generate_default_df(self, instance_ids, extra_columns=None):
index_name = self.features[0].entity.index
default_row = [f.default_value for f in self.features]
default_cols = [f.get_name() for f in self.features]
default_matrix = [default_row] * len(instance_ids)
default_df = pd.DataFrame(default_matrix,
columns=default_cols,
index=instance_ids)
default_df.index.name = index_name
if extra_columns is not None:
for c in extra_columns:
if c not in default_df.columns:
default_df[c] = [np.nan] * len(instance_ids)
return default_df
def _feature_type_handler(self, f):
if isinstance(f, TransformPrimitive):
return self._calculate_transform_features
elif isinstance(f, DirectFeature):
return self._calculate_direct_features
elif isinstance(f, AggregationPrimitive):
return self._calculate_agg_features
elif isinstance(f, IdentityFeature):
return self._calculate_identity_features
else:
raise UnknownFeature(u"{} feature unknown".format(f.__class__))
def _calculate_identity_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert (entity_id in entity_frames and
features[0].get_name() in entity_frames[entity_id].columns)
return entity_frames[entity_id]
def _calculate_transform_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert len(set([f.entity.id for f in features])) == 1, \
"features must share base entity"
assert entity_id in entity_frames
frame = entity_frames[entity_id]
for f in features:
# handle when no data
if frame.shape[0] == 0:
set_default_column(frame, f)
continue
# collect only the variables we need for this transformation
variable_data = [frame[bf.get_name()].values
for bf in f.base_features]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.uses_calc_time:
values = feature_func(*variable_data, time=self.time_last)
else:
values = feature_func(*variable_data)
if isinstance(values, pd.Series):
values = values.values
frame[f.get_name()] = list(values)
return frame
def _calculate_direct_features(self, features, entity_frames):
entity_id = features[0].entity.id
parent_entity_id = features[0].parent_entity.id
assert entity_id in entity_frames and parent_entity_id in entity_frames
path = self.entityset.find_forward_path(entity_id, parent_entity_id)
assert len(path) == 1, \
"Error calculating DirectFeatures, len(path) > 1"
parent_df = entity_frames[parent_entity_id]
child_df = entity_frames[entity_id]
merge_var = path[0].child_variable.id
# generate a mapping of old column names (in the parent entity) to
# new column names (in the child entity) for the merge
col_map = {path[0].parent_variable.id: merge_var}
index_as_feature = None
for f in features:
if f.base_features[0].get_name() == path[0].parent_variable.id:
index_as_feature = f
# Sometimes entityset._add_multigenerational_links adds link variables
# that would ordinarily get calculated as direct features,
# so we make sure not to attempt to calculate again
if f.get_name() in child_df.columns:
continue
col_map[f.base_features[0].get_name()] = f.get_name()
# merge the identity feature from the parent entity into the child
merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)
if index_as_feature is not None:
merge_df.set_index(index_as_feature.get_name(), inplace=True,
drop=False)
else:
merge_df.set_index(merge_var, inplace=True)
new_df = pd.merge(left=child_df, right=merge_df,
left_on=merge_var, right_index=True,
how='left')
return new_df
def _calculate_agg_features(self, features, entity_frames):
test_feature = features[0]
entity = test_feature.entity
child_entity = test_feature.base_features[0].entity
assert entity.id in entity_frames and child_entity.id in entity_frames
frame = entity_frames[entity.id]
base_frame = entity_frames[child_entity.id]
# Sometimes approximate features get computed in a previous filter frame
# and put in the current one dynamically,
# so there may be existing features here
features = [f for f in features if f.get_name()
not in frame.columns]
if not len(features):
return frame
# handle where
where = test_feature.where
if where is not None and not base_frame.empty:
base_frame = base_frame.loc[base_frame[where.get_name()]]
# when no child data, just add all the features to frame with nan
if base_frame.empty:
for f in features:
frame[f.get_name()] = np.nan
else:
relationship_path = self.entityset.find_backward_path(entity.id,
child_entity.id)
groupby_var = Relationship._get_link_variable_name(relationship_path)
# if the use_previous property exists on this feature, include only the
# instances from the child entity included in that Timedelta
use_previous = test_feature.use_previous
if use_previous and not base_frame.empty:
# Filter by use_previous values
time_last = self.time_last
if use_previous.is_absolute():
time_first = time_last - use_previous
ti = child_entity.time_index
if ti is not None:
base_frame = base_frame[base_frame[ti] >= time_first]
else:
n = use_previous.value
def last_n(df):
return df.iloc[-n:]
base_frame = base_frame.groupby(groupby_var, observed=True, sort=False).apply(last_n)
to_agg = {}
agg_rename = {}
to_apply = set()
# apply multivariable and time-dependent features as we find them, and
# save aggregable features for later
for f in features:
if _can_agg(f):
variable_id = f.base_features[0].get_name()
if variable_id not in to_agg:
to_agg[variable_id] = []
func = f.get_function()
funcname = func
if callable(func):
funcname = func.__name__
to_agg[variable_id].append(func)
# this is used below to rename columns that pandas names for us
agg_rename[u"{}-{}".format(variable_id, funcname)] = f.get_name()
continue
to_apply.add(f)
# Apply the non-aggregable functions generate a new dataframe, and merge
# it with the existing one
if len(to_apply):
wrap = agg_wrapper(to_apply, self.time_last)
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var], observed=True, sort=False).apply(wrap)
frame = pd.merge(left=frame, right=to_merge,
left_index=True,
right_index=True, how='left')
# Apply the aggregate functions to generate a new dataframe, and merge
# it with the existing one
if len(to_agg):
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var],
observed=True, sort=False).agg(to_agg)
# rename columns to the correct feature names
to_merge.columns = [agg_rename["-".join(x)] for x in to_merge.columns.ravel()]
to_merge = to_merge[list(agg_rename.values())]
# workaround for pandas bug where categories are in the wrong order
# see: https://github.com/pandas-dev/pandas/issues/22501
if pdtypes.is_categorical_dtype(frame.index):
categories = pdtypes.CategoricalDtype(categories=frame.index.categories)
to_merge.index = to_merge.index.astype(object).astype(categories)
frame = pd.merge(left=frame, right=to_merge,
left_index=True, right_index=True, how='left')
# Handle default values
# 1. handle non scalar default values
iterfeats = [f for f in features
if hasattr(f.default_value, '__iter__')]
for f in iterfeats:
nulls = pd.isnull(frame[f.get_name()])
for ni in nulls[nulls].index:
frame.at[ni, f.get_name()] = f.default_value
# 2. handle scalars default values
fillna_dict = {f.get_name(): f.default_value for f in features
if f not in iterfeats}
frame.fillna(fillna_dict, inplace=True)
# convert boolean dtypes to floats as appropriate
# pandas behavior: https://github.com/pydata/pandas/issues/3752
for f in features:
if (not f.expanding and
f.variable_type == variable_types.Numeric and
frame[f.get_name()].dtype.name in ['object', 'bool']):
frame[f.get_name()] = frame[f.get_name()].astype(float)
return frame
def _can_agg(feature):
assert isinstance(feature, AggregationPrimitive)
base_features = feature.base_features
if feature.where is not None:
base_features = [bf.get_name() for bf in base_features
if bf.get_name() != feature.where.get_name()]
if feature.uses_calc_time:
return False
return len(base_features) == 1 and not feature.expanding
def agg_wrapper(feats, time_last):
def wrap(df):
d = {}
for f in feats:
func = f.get_function()
variable_ids = [bf.get_name() for bf in f.base_features]
args = [df[v] for v in variable_ids]
if f.uses_calc_time:
d[f.get_name()] = func(*args, time=time_last)
else:
d[f.get_name()] = func(*args)
return pd.Series(d)
return wrap
def set_default_column(frame, f):
default = f.default_value
if hasattr(default, '__iter__'):
length = frame.shape[0]
default = [f.default_value] * length
frame[f.get_name()] = default
| true | true |
f73a92eabd4b8f52e5bc4fd68822c6777c4d54e1 | 624 | py | Python | LeetCode/1-1000/201-300/226-250/226. Invert Binary Tree/solution-python.py | adubois85/coding_challenge_websites | 7867a05847a216661eff3b24b1cb1480fb7d3030 | [
"Apache-2.0"
] | null | null | null | LeetCode/1-1000/201-300/226-250/226. Invert Binary Tree/solution-python.py | adubois85/coding_challenge_websites | 7867a05847a216661eff3b24b1cb1480fb7d3030 | [
"Apache-2.0"
] | null | null | null | LeetCode/1-1000/201-300/226-250/226. Invert Binary Tree/solution-python.py | adubois85/coding_challenge_websites | 7867a05847a216661eff3b24b1cb1480fb7d3030 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
# recursive solution
# Depth-first search going right, then left
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if root is None:
return root
if root.right:
self.invertTree(root.right)
if root.left:
self.invertTree(root.left)
root.left, root.right = root.right, root.left
return root
| 26 | 73 | 0.61859 | from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if root is None:
return root
if root.right:
self.invertTree(root.right)
if root.left:
self.invertTree(root.left)
root.left, root.right = root.right, root.left
return root
| true | true |
f73a9364534513170eb1a8a9313208ab6c8f5fa6 | 3,727 | py | Python | AcrobatLocalAutosave.py | fmoldenhauer/AcrobatLocalAutosave | e93df3147d766400e1637e905e29c0a49540f1ba | [
"MIT"
] | null | null | null | AcrobatLocalAutosave.py | fmoldenhauer/AcrobatLocalAutosave | e93df3147d766400e1637e905e29c0a49540f1ba | [
"MIT"
] | null | null | null | AcrobatLocalAutosave.py | fmoldenhauer/AcrobatLocalAutosave | e93df3147d766400e1637e905e29c0a49540f1ba | [
"MIT"
] | null | null | null | import os
import signal
import sys
import threading
import time
import traceback
from datetime import datetime
from win32com.client import constants
from win32com.client.gencache import EnsureDispatch
def getdocsfolder():
# Gets local user document folder and appends 'Autosaves'
oshell = EnsureDispatch("Wscript.Shell")
docs = oshell.SpecialFolders("MyDocuments")
directory = os.path.join(docs, "autosaves")
os.makedirs(directory, exist_ok=True)
return directory
def clearoutput():
os.system('cls')
print('Press CTRL-C to exit or change autosave interval')
def savecurrentopen(savedirectory):
# main function to save currently open pdfs
acrobat = EnsureDispatch('AcroExch.App') # access acrobat COM server
num = acrobat.GetNumAVDocs() # Get number of open PDFs
i = 0
now = datetime.now()
timestr = now.strftime("On %m/%d/%Y, at %H:%M:%S")
filelist = []
print(timestr)
while i < num:
doc = acrobat.GetAVDoc(i) # gets acrobats open windows
pd = doc.GetPDDoc() # gets underlying pdfs
name = pd.GetFileName()
if name in filelist: #bruteforce for saving docs with identical file names. need to update.
now = datetime.now()
name = name[:(len(name) - 4)] + now.strftime('%H-%M-%S.pdf')
filelist.append(name)
time.sleep(1)
pd.Save(constants.PDSaveCopy | constants.PDSaveFull, os.path.join(savedirectory, name))
print("Saved " + str(os.path.join(savedirectory, name)))
i += 1
class SignalHandler:
def __init__(self):
self.event = threading.Event()
def sig_handler(self, signal, frame):
self.event.set() #break old loop
response = input('Enter Autosave interval in seconds or press enter to exit: ')
if response.isnumeric():
newinterval = int(response)
print('Changed interval to ' + response)
cleanold()
self.event.clear()
while not s.event.isSet(): #new loop with user input interval
mainloop()
cleanold(age=newinterval*5)
print('')
print(str(int(round(newinterval / 60))) + ' minute(s) until next save', end='\r')
timer = 0
while timer < newinterval:
s.event.wait(1)
timer += 1
if (newinterval - timer) % 60 == 0:
print(str(int((newinterval - timer) / 60)) + ' minute(s) until next save ', end='\r')
def mainloop():
directory = getdocsfolder()
clearoutput()
savecurrentopen(directory)
def cleanold(age=3000):
directory = getdocsfolder()
onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
for file in onlyfiles:
path = os.path.join(directory, file)
x = os.stat(path)
Result = (time.time() - x.st_mtime)
if Result > age:
os.remove(path)
def show_exception(exc_type, exc_value, tb):
traceback.print_exception(exc_type, exc_value, tb)
input("Press key to exit.")
sys.exit(-1)
if __name__ == '__main__':
s = SignalHandler()
signal.signal(signal.SIGINT, s.sig_handler)
sys.excepthook = show_exception
while not s.event.isSet():
mainloop()
cleanold()
timer = 0
print('')
print('10 minute(s) until next save', end='\r')
while timer < 600:
s.event.wait(1)
timer += 1
if timer % 60 == 0:
print(str(int((600 - timer) / 60)) + ' minute(s) until next save ', end='\r')
os.system('cls')
print("Exiting...")
time.sleep(5)
| 31.058333 | 112 | 0.597532 | import os
import signal
import sys
import threading
import time
import traceback
from datetime import datetime
from win32com.client import constants
from win32com.client.gencache import EnsureDispatch
def getdocsfolder():
oshell = EnsureDispatch("Wscript.Shell")
docs = oshell.SpecialFolders("MyDocuments")
directory = os.path.join(docs, "autosaves")
os.makedirs(directory, exist_ok=True)
return directory
def clearoutput():
os.system('cls')
print('Press CTRL-C to exit or change autosave interval')
def savecurrentopen(savedirectory):
acrobat = EnsureDispatch('AcroExch.App')
num = acrobat.GetNumAVDocs()
i = 0
now = datetime.now()
timestr = now.strftime("On %m/%d/%Y, at %H:%M:%S")
filelist = []
print(timestr)
while i < num:
doc = acrobat.GetAVDoc(i)
pd = doc.GetPDDoc()
name = pd.GetFileName()
if name in filelist:
now = datetime.now()
name = name[:(len(name) - 4)] + now.strftime('%H-%M-%S.pdf')
filelist.append(name)
time.sleep(1)
pd.Save(constants.PDSaveCopy | constants.PDSaveFull, os.path.join(savedirectory, name))
print("Saved " + str(os.path.join(savedirectory, name)))
i += 1
class SignalHandler:
def __init__(self):
self.event = threading.Event()
def sig_handler(self, signal, frame):
self.event.set()
response = input('Enter Autosave interval in seconds or press enter to exit: ')
if response.isnumeric():
newinterval = int(response)
print('Changed interval to ' + response)
cleanold()
self.event.clear()
while not s.event.isSet():
mainloop()
cleanold(age=newinterval*5)
print('')
print(str(int(round(newinterval / 60))) + ' minute(s) until next save', end='\r')
timer = 0
while timer < newinterval:
s.event.wait(1)
timer += 1
if (newinterval - timer) % 60 == 0:
print(str(int((newinterval - timer) / 60)) + ' minute(s) until next save ', end='\r')
def mainloop():
directory = getdocsfolder()
clearoutput()
savecurrentopen(directory)
def cleanold(age=3000):
directory = getdocsfolder()
onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
for file in onlyfiles:
path = os.path.join(directory, file)
x = os.stat(path)
Result = (time.time() - x.st_mtime)
if Result > age:
os.remove(path)
def show_exception(exc_type, exc_value, tb):
traceback.print_exception(exc_type, exc_value, tb)
input("Press key to exit.")
sys.exit(-1)
if __name__ == '__main__':
s = SignalHandler()
signal.signal(signal.SIGINT, s.sig_handler)
sys.excepthook = show_exception
while not s.event.isSet():
mainloop()
cleanold()
timer = 0
print('')
print('10 minute(s) until next save', end='\r')
while timer < 600:
s.event.wait(1)
timer += 1
if timer % 60 == 0:
print(str(int((600 - timer) / 60)) + ' minute(s) until next save ', end='\r')
os.system('cls')
print("Exiting...")
time.sleep(5)
| true | true |
f73a9369bccd1cfb6478e9521b02649fb0d2e321 | 9,148 | py | Python | lib/utils.py | wdzhong/ASTGCN-PyTorch | 4f76d2302b6fd4227c4846e06ff11560d8a8237b | [
"MIT"
] | 17 | 2020-06-20T02:25:05.000Z | 2022-03-27T11:58:50.000Z | lib/utils.py | Neoyanghc/ASTGCN-PyTorch | 4f76d2302b6fd4227c4846e06ff11560d8a8237b | [
"MIT"
] | 1 | 2020-03-05T13:48:31.000Z | 2020-03-05T13:48:31.000Z | lib/utils.py | Neoyanghc/ASTGCN-PyTorch | 4f76d2302b6fd4227c4846e06ff11560d8a8237b | [
"MIT"
] | 8 | 2020-05-14T02:18:30.000Z | 2021-07-20T12:27:03.000Z | # -*- coding:utf-8 -*-
# pylint: disable=no-member
import csv
import numpy as np
from scipy.sparse.linalg import eigs
from .metrics import mean_absolute_error, mean_squared_error, masked_mape_np
def search_data(sequence_length, num_of_batches, label_start_idx,
num_for_predict, units, points_per_hour):
'''
Parameters
----------
sequence_length: int, length of all history data
num_of_batches: int, the number of batches will be used for training
label_start_idx: int, the first index of predicting target
num_for_predict: int,
the number of points will be predicted for each sample
units: int, week: 7 * 24, day: 24, recent(hour): 1
points_per_hour: int, number of points per hour, depends on data
Returns
----------
list[(start_idx, end_idx)]
'''
if points_per_hour < 0:
raise ValueError("points_per_hour should be greater than 0!")
if label_start_idx + num_for_predict > sequence_length:
return None
x_idx = []
for i in range(1, num_of_batches + 1):
start_idx = label_start_idx - points_per_hour * units * i
end_idx = start_idx + num_for_predict # wd: this could overlap with 'label_start_index', e.g. when num_for_predict is larger than 12 (one hour)
if start_idx >= 0:
x_idx.append((start_idx, end_idx))
else:
return None
if len(x_idx) != num_of_batches:
return None
return x_idx[::-1]
def get_sample_indices(data_sequence, num_of_weeks, num_of_days, num_of_hours,
label_start_idx, num_for_predict, points_per_hour=12):
"""
Parameters
----------
data_sequence: np.ndarray
shape is (sequence_length, num_of_vertices, num_of_features)
num_of_weeks, num_of_days, num_of_hours: int
label_start_idx: int, the first index of predicting target
num_for_predict: int,
the number of points will be predicted for each sample
points_per_hour: int, default 12, number of points per hour
Returns
----------
week_sample: np.ndarray
shape is (num_of_weeks * points_per_hour, # wd: points_per_hour should be num_for_predict??
num_of_vertices, num_of_features)
day_sample: np.ndarray
shape is (num_of_days * points_per_hour,
num_of_vertices, num_of_features)
hour_sample: np.ndarray
shape is (num_of_hours * points_per_hour,
num_of_vertices, num_of_features)
target: np.ndarray
shape is (num_for_predict, num_of_vertices, num_of_features)
"""
week_indices = search_data(data_sequence.shape[0], num_of_weeks,
label_start_idx, num_for_predict,
7 * 24, points_per_hour)
if not week_indices:
return None
day_indices = search_data(data_sequence.shape[0], num_of_days,
label_start_idx, num_for_predict,
24, points_per_hour)
if not day_indices:
return None
hour_indices = search_data(data_sequence.shape[0], num_of_hours,
label_start_idx, num_for_predict,
1, points_per_hour)
if not hour_indices:
return None
week_sample = np.concatenate([data_sequence[i: j]
for i, j in week_indices], axis=0)
day_sample = np.concatenate([data_sequence[i: j]
for i, j in day_indices], axis=0)
hour_sample = np.concatenate([data_sequence[i: j]
for i, j in hour_indices], axis=0)
target = data_sequence[label_start_idx: label_start_idx + num_for_predict]
return week_sample, day_sample, hour_sample, target
def get_adjacency_matrix(distance_df_filename, num_of_vertices):
'''
Parameters
----------
distance_df_filename: str, path of the csv file contains edges information
num_of_vertices: int, the number of vertices
Returns
----------
A: np.ndarray, adjacency matrix
'''
with open(distance_df_filename, 'r') as f:
reader = csv.reader(f)
header = f.__next__()
edges = [(int(i[0]), int(i[1])) for i in reader]
A = np.zeros((int(num_of_vertices), int(num_of_vertices)),
dtype=np.float32)
for i, j in edges:
A[i, j] = 1
return A
def scaled_Laplacian(W):
'''
compute \tilde{L}
Parameters
----------
W: np.ndarray, shape is (N, N), N is the num of vertices
Returns
----------
scaled_Laplacian: np.ndarray, shape (N, N)
'''
assert W.shape[0] == W.shape[1]
D = np.diag(np.sum(W, axis=1))
L = D - W
lambda_max = eigs(L, k=1, which='LR')[0].real
return (2 * L) / lambda_max - np.identity(W.shape[0])
def cheb_polynomial(L_tilde, K):
'''
compute a list of chebyshev polynomials from T_0 to T_{K-1}
Parameters
----------
L_tilde: scaled Laplacian, np.ndarray, shape (N, N)
K: the maximum order of chebyshev polynomials
Returns
----------
cheb_polynomials: list[np.ndarray], length: K, from T_0 to T_{K-1}
'''
N = L_tilde.shape[0]
cheb_polynomials = [np.identity(N), L_tilde.copy()]
for i in range(2, K):
cheb_polynomials.append(
2 * L_tilde * cheb_polynomials[i - 1] - cheb_polynomials[i - 2])
return cheb_polynomials
def compute_val_loss(net, val_loader, loss_function, sw, epoch, device):
"""
compute mean loss on validation set
Parameters
----------
net: model
val_loader: DataLoader
loss_function: func
sw: SummaryWriter. TODO: to be implemented
epoch: int, current epoch
"""
val_loader_length = len(val_loader)
tmp = []
for index, (val_w, val_d, val_r, val_t) in enumerate(val_loader):
val_w = val_w.to(device)
val_d = val_d.to(device)
val_r = val_r.to(device)
val_t = val_t.to(device)
output = net([val_w, val_d, val_r])
l = loss_function(output, val_t) # l is a tensor, with single value
tmp.append(l.item())
print('validation batch %s / %s, loss: %.2f' % (
index + 1, val_loader_length, l.item()))
validation_loss = sum(tmp) / len(tmp)
if sw:
sw.add_scalar(tag='validation_loss',
value=validation_loss,
global_step=epoch)
print('epoch: %s, validation loss: %.2f' % (epoch, validation_loss))
def predict(net, test_loader, device):
"""
predict
Parameters
----------
net: model
test_loader: DataLoader
Returns
----------
prediction: np.ndarray,
shape is (num_of_samples, num_of_vertices, num_for_predict)
"""
test_loader_length = len(test_loader)
prediction = []
for index, (test_w, test_d, test_r, _) in enumerate(test_loader):
test_w = test_w.to(device)
test_d = test_d.to(device)
test_r = test_r.to(device)
prediction.append(net([test_w, test_d, test_r]).cpu().numpy())
print('predicting testing set batch %s / %s' % (index + 1, test_loader_length))
prediction = np.concatenate(prediction, 0)
return prediction
def evaluate(net, test_loader, true_value, num_of_vertices, sw, epoch, device):
"""
compute MAE, RMSE, MAPE scores of the prediction
for 3, 6, 12 points on testing set
Parameters
----------
net: model
test_loader: DataLoader
true_value: np.ndarray, all ground truth of testing set
shape is (num_of_samples, num_for_predict, num_of_vertices)
num_of_vertices: int, number of vertices
sw: SummaryWriter. TODO: to be implemented.
epoch: int, current epoch
"""
prediction = predict(net, test_loader, device)
prediction = (prediction.transpose((0, 2, 1))
.reshape(prediction.shape[0], -1))
for i in [3, 6, 12]:
print('current epoch: %s, predict %s points' % (epoch, i))
mae = mean_absolute_error(true_value[:, : i * num_of_vertices],
prediction[:, : i * num_of_vertices])
rmse = mean_squared_error(true_value[:, : i * num_of_vertices],
prediction[:, : i * num_of_vertices]) ** 0.5
mape = masked_mape_np(true_value[:, : i * num_of_vertices],
prediction[:, : i * num_of_vertices], 0)
print('MAE: %.2f' % (mae))
print('RMSE: %.2f' % (rmse))
print('MAPE: %.2f' % (mape))
print()
if sw:
sw.add_scalar(tag='MAE_%s_points' % (i),
value=mae,
global_step=epoch)
sw.add_scalar(tag='RMSE_%s_points' % (i),
value=rmse,
global_step=epoch)
sw.add_scalar(tag='MAPE_%s_points' % (i),
value=mape,
global_step=epoch)
| 29.04127 | 152 | 0.591167 |
import csv
import numpy as np
from scipy.sparse.linalg import eigs
from .metrics import mean_absolute_error, mean_squared_error, masked_mape_np
def search_data(sequence_length, num_of_batches, label_start_idx,
num_for_predict, units, points_per_hour):
if points_per_hour < 0:
raise ValueError("points_per_hour should be greater than 0!")
if label_start_idx + num_for_predict > sequence_length:
return None
x_idx = []
for i in range(1, num_of_batches + 1):
start_idx = label_start_idx - points_per_hour * units * i
end_idx = start_idx + num_for_predict
if start_idx >= 0:
x_idx.append((start_idx, end_idx))
else:
return None
if len(x_idx) != num_of_batches:
return None
return x_idx[::-1]
def get_sample_indices(data_sequence, num_of_weeks, num_of_days, num_of_hours,
label_start_idx, num_for_predict, points_per_hour=12):
week_indices = search_data(data_sequence.shape[0], num_of_weeks,
label_start_idx, num_for_predict,
7 * 24, points_per_hour)
if not week_indices:
return None
day_indices = search_data(data_sequence.shape[0], num_of_days,
label_start_idx, num_for_predict,
24, points_per_hour)
if not day_indices:
return None
hour_indices = search_data(data_sequence.shape[0], num_of_hours,
label_start_idx, num_for_predict,
1, points_per_hour)
if not hour_indices:
return None
week_sample = np.concatenate([data_sequence[i: j]
for i, j in week_indices], axis=0)
day_sample = np.concatenate([data_sequence[i: j]
for i, j in day_indices], axis=0)
hour_sample = np.concatenate([data_sequence[i: j]
for i, j in hour_indices], axis=0)
target = data_sequence[label_start_idx: label_start_idx + num_for_predict]
return week_sample, day_sample, hour_sample, target
def get_adjacency_matrix(distance_df_filename, num_of_vertices):
with open(distance_df_filename, 'r') as f:
reader = csv.reader(f)
header = f.__next__()
edges = [(int(i[0]), int(i[1])) for i in reader]
A = np.zeros((int(num_of_vertices), int(num_of_vertices)),
dtype=np.float32)
for i, j in edges:
A[i, j] = 1
return A
def scaled_Laplacian(W):
assert W.shape[0] == W.shape[1]
D = np.diag(np.sum(W, axis=1))
L = D - W
lambda_max = eigs(L, k=1, which='LR')[0].real
return (2 * L) / lambda_max - np.identity(W.shape[0])
def cheb_polynomial(L_tilde, K):
N = L_tilde.shape[0]
cheb_polynomials = [np.identity(N), L_tilde.copy()]
for i in range(2, K):
cheb_polynomials.append(
2 * L_tilde * cheb_polynomials[i - 1] - cheb_polynomials[i - 2])
return cheb_polynomials
def compute_val_loss(net, val_loader, loss_function, sw, epoch, device):
val_loader_length = len(val_loader)
tmp = []
for index, (val_w, val_d, val_r, val_t) in enumerate(val_loader):
val_w = val_w.to(device)
val_d = val_d.to(device)
val_r = val_r.to(device)
val_t = val_t.to(device)
output = net([val_w, val_d, val_r])
l = loss_function(output, val_t)
tmp.append(l.item())
print('validation batch %s / %s, loss: %.2f' % (
index + 1, val_loader_length, l.item()))
validation_loss = sum(tmp) / len(tmp)
if sw:
sw.add_scalar(tag='validation_loss',
value=validation_loss,
global_step=epoch)
print('epoch: %s, validation loss: %.2f' % (epoch, validation_loss))
def predict(net, test_loader, device):
test_loader_length = len(test_loader)
prediction = []
for index, (test_w, test_d, test_r, _) in enumerate(test_loader):
test_w = test_w.to(device)
test_d = test_d.to(device)
test_r = test_r.to(device)
prediction.append(net([test_w, test_d, test_r]).cpu().numpy())
print('predicting testing set batch %s / %s' % (index + 1, test_loader_length))
prediction = np.concatenate(prediction, 0)
return prediction
def evaluate(net, test_loader, true_value, num_of_vertices, sw, epoch, device):
prediction = predict(net, test_loader, device)
prediction = (prediction.transpose((0, 2, 1))
.reshape(prediction.shape[0], -1))
for i in [3, 6, 12]:
print('current epoch: %s, predict %s points' % (epoch, i))
mae = mean_absolute_error(true_value[:, : i * num_of_vertices],
prediction[:, : i * num_of_vertices])
rmse = mean_squared_error(true_value[:, : i * num_of_vertices],
prediction[:, : i * num_of_vertices]) ** 0.5
mape = masked_mape_np(true_value[:, : i * num_of_vertices],
prediction[:, : i * num_of_vertices], 0)
print('MAE: %.2f' % (mae))
print('RMSE: %.2f' % (rmse))
print('MAPE: %.2f' % (mape))
print()
if sw:
sw.add_scalar(tag='MAE_%s_points' % (i),
value=mae,
global_step=epoch)
sw.add_scalar(tag='RMSE_%s_points' % (i),
value=rmse,
global_step=epoch)
sw.add_scalar(tag='MAPE_%s_points' % (i),
value=mape,
global_step=epoch)
| true | true |
f73a93f354dbfe3d3d9dcc9e0faf39ad2e05048e | 1,939 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_05_01/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_05_01/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_05_01/models/_paged_models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class PolicyAssignmentPaged(Paged):
"""
A paging container for iterating over a list of :class:`PolicyAssignment <azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PolicyAssignment]'}
}
def __init__(self, *args, **kwargs):
super(PolicyAssignmentPaged, self).__init__(*args, **kwargs)
class PolicyDefinitionPaged(Paged):
"""
A paging container for iterating over a list of :class:`PolicyDefinition <azure.mgmt.resource.policy.v2018_05_01.models.PolicyDefinition>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PolicyDefinition]'}
}
def __init__(self, *args, **kwargs):
super(PolicyDefinitionPaged, self).__init__(*args, **kwargs)
class PolicySetDefinitionPaged(Paged):
"""
A paging container for iterating over a list of :class:`PolicySetDefinition <azure.mgmt.resource.policy.v2018_05_01.models.PolicySetDefinition>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PolicySetDefinition]'}
}
def __init__(self, *args, **kwargs):
super(PolicySetDefinitionPaged, self).__init__(*args, **kwargs)
| 35.907407 | 155 | 0.623517 |
from msrest.paging import Paged
class PolicyAssignmentPaged(Paged):
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PolicyAssignment]'}
}
def __init__(self, *args, **kwargs):
super(PolicyAssignmentPaged, self).__init__(*args, **kwargs)
class PolicyDefinitionPaged(Paged):
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PolicyDefinition]'}
}
def __init__(self, *args, **kwargs):
super(PolicyDefinitionPaged, self).__init__(*args, **kwargs)
class PolicySetDefinitionPaged(Paged):
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[PolicySetDefinition]'}
}
def __init__(self, *args, **kwargs):
super(PolicySetDefinitionPaged, self).__init__(*args, **kwargs)
| true | true |
f73a9408751376983aedf679f68c28c3da0028c9 | 140 | py | Python | config.py | bsoyka/sunset-bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 1 | 2021-06-21T16:58:48.000Z | 2021-06-21T16:58:48.000Z | config.py | bsoyka/sunset-bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 4 | 2021-08-13T16:52:51.000Z | 2021-09-01T13:05:42.000Z | config.py | sunset-vacation/bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 4 | 2021-06-21T22:16:12.000Z | 2021-08-11T21:01:19.000Z | from pathlib import Path
from dynamic_yaml import load
with (Path(__file__).parent / 'config.yaml').open() as f:
CONFIG = load(f)
| 23.333333 | 58 | 0.692857 | from pathlib import Path
from dynamic_yaml import load
with (Path(__file__).parent / 'config.yaml').open() as f:
CONFIG = load(f)
| true | true |
f73a94c73b0acceaa44f08e9b07ab0a550ed9eeb | 3,386 | py | Python | problem_solving/maximum_subarray_sum/code.py | Chicco94/hacker-rank-code | 998d3abd7a40d85ec3d8fc4afe600b65978984db | [
"MIT"
] | null | null | null | problem_solving/maximum_subarray_sum/code.py | Chicco94/hacker-rank-code | 998d3abd7a40d85ec3d8fc4afe600b65978984db | [
"MIT"
] | null | null | null | problem_solving/maximum_subarray_sum/code.py | Chicco94/hacker-rank-code | 998d3abd7a40d85ec3d8fc4afe600b65978984db | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
from functools import cache
import time
from bisect import bisect,insort
@cache
def get_sub_sum(temp_sum,removed,added,modulo):
return (temp_sum-removed+added)%modulo
def maximumSum_iter_2(a, m, a_sum):
if len(a) == 0: return 0
if len(a) == 1: return a[0]%m
first = a[0]
last = a[-1]
return max(
a_sum,
maximumSum_iter_2(a[1:],m,get_sub_sum(a_sum,first,0,m)),
maximumSum_iter_2(a[0:-1],m,get_sub_sum(a_sum,last,0,m))
)
def maximumSum_2(a, m):
return maximumSum_iter_2(a,m,sum(a)%m)
def maximumSum_iter_3(a, m, a_sum, do_left=True):
if len(a) == 0: return 0
if len(a) == 1: return a[0]%m
first = a[0]
last = a[-1]
return max(
a_sum,
maximumSum_iter_3(a[1:],m,get_sub_sum(a_sum,first,0,m)) if do_left else a_sum,
maximumSum_iter_3(a[0:-1],m,get_sub_sum(a_sum,last,0,m),do_left=False)
)
def maximumSum_3(a, m):
return maximumSum_iter_3(a,m,sum(a)%m)
def maxSubarray(a,m):
N = len(a)
cumulative_sums = []
sum_so_far = 0
max_sum = 0
for i in range(N):
sum_so_far = (sum_so_far + a[i]) % m
pos = bisect(cumulative_sums, sum_so_far)
d = 0 if pos == i else cumulative_sums[pos]
max_sum = max(max_sum, (sum_so_far + m - d) % m)
insort(cumulative_sums, sum_so_far)
return max_sum
def maximumSum_1(a, m):
best_sub_a_sum = 0
for l in range(1,len(a)+1):
temp_sum = sum(a[0:l])%m
if temp_sum>best_sub_a_sum:
best_sub_a_sum = temp_sum
for i in range(1,len(a)-l+1):
temp_sum = get_sub_sum(temp_sum,a[i-1],a[i+l-1],m)
if temp_sum>best_sub_a_sum:
best_sub_a_sum = temp_sum
return best_sub_a_sum
if __name__ == '__main__':
with open("./test_cases/case_1.txt") as test_case:
with open("./test_cases/case_1_solutions.txt") as solutions:
q = int(test_case.readline().strip())
print("tot cases: ",q)
max_1_time = 0
max_2_time = 0
max_3_time = 0
for i in range(q):
first_multiple_input = test_case.readline().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
a = list(map(int, test_case.readline().rstrip().split()))
solution = int(solutions.readline().rstrip())
start_time = time.time()
r1 = maximumSum_1(a, m)
time_1 = time.time()-start_time
max_1_time += time_1
start_time = time.time()
r2= maxSubarray(a, m)
time_2 = time.time()-start_time
max_2_time += time_2
start_time = time.time()
r3= maximumSum_3(a, m)
time_3 = time.time()-start_time
max_3_time += time_3
if (time_1 > 0.5 or time_2 > 0.5 or time_3 > 0.5):
print(f"{i} {time_1} {time_2} {time_3}")
if (r1 != solution or r2 != solution or r3 != solution):
print(f"{i} {r1} {r2} {r3} {solution}")
print("1:{} 2:{} 3:{}".format(max_1_time/q,max_2_time/q,max_3_time/q))
| 29.701754 | 90 | 0.551683 |
import math
import os
import random
import re
import sys
from functools import cache
import time
from bisect import bisect,insort
@cache
def get_sub_sum(temp_sum,removed,added,modulo):
return (temp_sum-removed+added)%modulo
def maximumSum_iter_2(a, m, a_sum):
if len(a) == 0: return 0
if len(a) == 1: return a[0]%m
first = a[0]
last = a[-1]
return max(
a_sum,
maximumSum_iter_2(a[1:],m,get_sub_sum(a_sum,first,0,m)),
maximumSum_iter_2(a[0:-1],m,get_sub_sum(a_sum,last,0,m))
)
def maximumSum_2(a, m):
return maximumSum_iter_2(a,m,sum(a)%m)
def maximumSum_iter_3(a, m, a_sum, do_left=True):
if len(a) == 0: return 0
if len(a) == 1: return a[0]%m
first = a[0]
last = a[-1]
return max(
a_sum,
maximumSum_iter_3(a[1:],m,get_sub_sum(a_sum,first,0,m)) if do_left else a_sum,
maximumSum_iter_3(a[0:-1],m,get_sub_sum(a_sum,last,0,m),do_left=False)
)
def maximumSum_3(a, m):
return maximumSum_iter_3(a,m,sum(a)%m)
def maxSubarray(a,m):
N = len(a)
cumulative_sums = []
sum_so_far = 0
max_sum = 0
for i in range(N):
sum_so_far = (sum_so_far + a[i]) % m
pos = bisect(cumulative_sums, sum_so_far)
d = 0 if pos == i else cumulative_sums[pos]
max_sum = max(max_sum, (sum_so_far + m - d) % m)
insort(cumulative_sums, sum_so_far)
return max_sum
def maximumSum_1(a, m):
best_sub_a_sum = 0
for l in range(1,len(a)+1):
temp_sum = sum(a[0:l])%m
if temp_sum>best_sub_a_sum:
best_sub_a_sum = temp_sum
for i in range(1,len(a)-l+1):
temp_sum = get_sub_sum(temp_sum,a[i-1],a[i+l-1],m)
if temp_sum>best_sub_a_sum:
best_sub_a_sum = temp_sum
return best_sub_a_sum
if __name__ == '__main__':
with open("./test_cases/case_1.txt") as test_case:
with open("./test_cases/case_1_solutions.txt") as solutions:
q = int(test_case.readline().strip())
print("tot cases: ",q)
max_1_time = 0
max_2_time = 0
max_3_time = 0
for i in range(q):
first_multiple_input = test_case.readline().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
a = list(map(int, test_case.readline().rstrip().split()))
solution = int(solutions.readline().rstrip())
start_time = time.time()
r1 = maximumSum_1(a, m)
time_1 = time.time()-start_time
max_1_time += time_1
start_time = time.time()
r2= maxSubarray(a, m)
time_2 = time.time()-start_time
max_2_time += time_2
start_time = time.time()
r3= maximumSum_3(a, m)
time_3 = time.time()-start_time
max_3_time += time_3
if (time_1 > 0.5 or time_2 > 0.5 or time_3 > 0.5):
print(f"{i} {time_1} {time_2} {time_3}")
if (r1 != solution or r2 != solution or r3 != solution):
print(f"{i} {r1} {r2} {r3} {solution}")
print("1:{} 2:{} 3:{}".format(max_1_time/q,max_2_time/q,max_3_time/q))
| true | true |
f73a952b9290dc76a7f0dc3b96e6c90358a9fc2c | 1,063 | py | Python | api/admin.py | PatrickCmd/Recipe-API-Django-GraphQL | bed1f1ebab88615ca62ea3846fbeb8e1a69c09e6 | [
"MIT"
] | null | null | null | api/admin.py | PatrickCmd/Recipe-API-Django-GraphQL | bed1f1ebab88615ca62ea3846fbeb8e1a69c09e6 | [
"MIT"
] | null | null | null | api/admin.py | PatrickCmd/Recipe-API-Django-GraphQL | bed1f1ebab88615ca62ea3846fbeb8e1a69c09e6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Category, Recipe, Ingredient, RecipeVote
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = (
"id",
"name",
"owner",
"description",
)
search_fields = (
"name",
"owner"
)
readonly_fields = (
"created_at",
"updated_at",
)
@admin.register(Recipe)
class RecipeAdmin(admin.ModelAdmin):
list_display = (
"id",
"title",
"owner",
"category",
"description",
"is_public",
)
search_fields = (
"title",
"category",
"description",
"owner",
)
readonly_fields = (
"created_at",
"updated_at",
)
@admin.register(Ingredient)
class IngredientAdmin(admin.ModelAdmin):
list_display = (
"id",
"name",
"recipe",
"amount",
"instruction_notes",
)
search_fields = (
"name",
"recipe",
)
admin.site.register(RecipeVote)
| 17.145161 | 60 | 0.523989 | from django.contrib import admin
from .models import Category, Recipe, Ingredient, RecipeVote
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = (
"id",
"name",
"owner",
"description",
)
search_fields = (
"name",
"owner"
)
readonly_fields = (
"created_at",
"updated_at",
)
@admin.register(Recipe)
class RecipeAdmin(admin.ModelAdmin):
list_display = (
"id",
"title",
"owner",
"category",
"description",
"is_public",
)
search_fields = (
"title",
"category",
"description",
"owner",
)
readonly_fields = (
"created_at",
"updated_at",
)
@admin.register(Ingredient)
class IngredientAdmin(admin.ModelAdmin):
list_display = (
"id",
"name",
"recipe",
"amount",
"instruction_notes",
)
search_fields = (
"name",
"recipe",
)
admin.site.register(RecipeVote)
| true | true |
f73a95654fa3601d6df72f989dd2887bc9d735c2 | 1,867 | py | Python | array_queue.py | KAIKAIZHANG/Algorithm | 755547dea7f055919abfe9165279fc08e120b75d | [
"MIT"
] | 1 | 2019-03-01T09:00:40.000Z | 2019-03-01T09:00:40.000Z | array_queue.py | KAIKAIZHANG/Algorithm | 755547dea7f055919abfe9165279fc08e120b75d | [
"MIT"
] | null | null | null | array_queue.py | KAIKAIZHANG/Algorithm | 755547dea7f055919abfe9165279fc08e120b75d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
用数组实现队列queue:用数组实现的队列是顺序队列,主要操作有入队和出队操作。
"""
from typing import Optional
class DynamicQueue:
"""
算法步骤:
1.入队列
(1)判断尾指针大小是否位等于队列存储容量
是
(1.2)判断头指针是否位于队列开头,
是,队列已满,无法插入
否,队列未满,将队列进行前部搬移
a.数据整段往前重新复制搬移
b.将尾指针重新赋值,将头指针指0
否
(2)判断尾指针大小是否等于现有队列的长度
是,直接在队列尾部插入数据
否,在尾指针位置插入数据
尾指针加1
2.出队列
(1)判断头指针和尾指针是否相同
否,找到队列头并返回结果,将头指针后移进行赋值
是,队列已经为空,无法进行删除操作
"""
def __init__(self, capacity: int):
self._items = []
self._head = 0
self._tail = 0
self._capacity = capacity
def enqueue(self, item: str):
if self._tail == self._capacity:
if self._head == 0:
return False
self._items[0:self._tail-self._head] = self._items[self._head:self._tail]
self._tail -= self._head
self._head = 0
if self._tail == len(self._items):
self._items.append(item)
else:
self._items[self._tail] = item
self._tail += 1
return True
def dequeue(self) -> Optional[str]:
if self._head != self._tail:
temp = self._items[self._head]
self._head += 1
return temp
def __repr__(self):
return " ".join(item for item in self._items[self._head:self._tail])
if __name__ == '__main__':
dynamic_queue = DynamicQueue(10)
for i in range(10):
dynamic_queue.enqueue(str(i))
print(dynamic_queue)
for _ in range(3):
dynamic_queue.dequeue()
print(dynamic_queue)
dynamic_queue.enqueue("7")
print(dynamic_queue)
dynamic_queue.enqueue("8")
print(dynamic_queue)
| 21.709302 | 86 | 0.528656 |
from typing import Optional
class DynamicQueue:
def __init__(self, capacity: int):
self._items = []
self._head = 0
self._tail = 0
self._capacity = capacity
def enqueue(self, item: str):
if self._tail == self._capacity:
if self._head == 0:
return False
self._items[0:self._tail-self._head] = self._items[self._head:self._tail]
self._tail -= self._head
self._head = 0
if self._tail == len(self._items):
self._items.append(item)
else:
self._items[self._tail] = item
self._tail += 1
return True
def dequeue(self) -> Optional[str]:
if self._head != self._tail:
temp = self._items[self._head]
self._head += 1
return temp
def __repr__(self):
return " ".join(item for item in self._items[self._head:self._tail])
if __name__ == '__main__':
dynamic_queue = DynamicQueue(10)
for i in range(10):
dynamic_queue.enqueue(str(i))
print(dynamic_queue)
for _ in range(3):
dynamic_queue.dequeue()
print(dynamic_queue)
dynamic_queue.enqueue("7")
print(dynamic_queue)
dynamic_queue.enqueue("8")
print(dynamic_queue)
| true | true |
f73a960f610486f84a47840f56531280ba7619ca | 34,914 | py | Python | tests/test_resources_job.py | Spredzy/tower-cli | 2c115877a36238b94fb8af1ff32915ebaf868e60 | [
"Apache-2.0"
] | 1 | 2019-03-20T20:58:23.000Z | 2019-03-20T20:58:23.000Z | tests/test_resources_job.py | Spredzy/tower-cli | 2c115877a36238b94fb8af1ff32915ebaf868e60 | [
"Apache-2.0"
] | null | null | null | tests/test_resources_job.py | Spredzy/tower-cli | 2c115877a36238b94fb8af1ff32915ebaf868e60 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import yaml
import time
from copy import copy
import click
import tower_cli
from tower_cli.api import client
from tower_cli import exceptions as exc
from tower_cli.cli.resource import ResSubcommand
from tests.compat import unittest, mock
from tower_cli.conf import settings
from tower_cli.constants import CUR_API_VERSION
# Standard functions used for space and readability
# these operate on the test client, t
def register_get(t):
""" After starting job, the launch method may grab info about
the job just launched from this endpoint """
t.register_json('/jobs/42/',
{
'id': 42, 'job_template': 1, 'status': 'pending',
'created': 1234, 'elapsed': 0.0,
}, method='GET')
def standard_registration(t, **kwargs):
""" Endpoints common to launching any job with template #1 and
is automatically assigned to job #42. kwargs is used to provide
extra return fields of job launch"""
# A GET to the template endpoint is made to find the extra_vars to combine
t.register_json('/job_templates/1/', {
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
# A GET to the launch endpoint is needed to check if
# a password prompt is needed
t.register_json('/job_templates/1/launch/', {}, method='GET')
# A POST to the launch endpoint will launch a job, and we
# expect that the tower server will return the job number
data = {'id': 42}
data.update(kwargs)
t.register_json('/job_templates/1/launch/', data, method='POST')
def jt_vars_registration(t, extra_vars):
""" Endpoints that are needed to get information from job template.
This particular combination also entails
1) version of Tower - 2.2.0
2) successful job launch, id=42
3) prompts user for variables on launch """
t.register_json('/job_templates/1/', {
'ask_variables_on_launch': True,
'extra_vars': extra_vars,
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/config/', {'version': '2.2.0'}, method='GET')
t.register_json('/job_templates/1/launch/', {}, method='GET')
t.register_json('/job_templates/1/launch/', {'id': 42},
method='POST')
class LaunchTests(unittest.TestCase):
"""A set of tests for ensuring that the job resource's launch command
works in the way we expect.
"""
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_basic_launch(self):
"""Establish that we are able to create a job that doesn't require
any invocation-time input.
"""
with client.test_mode as t:
standard_registration(t)
result = self.res.launch(1)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_basic_launch_with_echo(self):
"""Establish that we are able to create a job and echo the output
to the command line without it breaking.
"""
with client.test_mode as t:
standard_registration(t)
result = self.res.launch(1)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
f = ResSubcommand(self.res)._echo_method(self.res.launch)
with mock.patch.object(click, 'secho'):
with settings.runtime_values(format='human'):
f(job_template=1)
def test_launch_w_tags(self):
"""Establish that we are able to create a job and attach tags to it.
"""
with client.test_mode as t:
standard_registration(t)
self.res.launch(1, tags="a, b, c")
self.assertEqual(
json.loads(t.requests[2].body)['job_tags'], 'a, b, c',
)
def test_launch_w_tuple_extra_vars(self):
"""Establish that if the click library gives a tuple, than the job
will run normally.
"""
with client.test_mode as t:
standard_registration(t)
result = self.res.launch(1, extra_vars=())
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_basic_launch_wait_option(self):
"""Establish that we are able to create a job that doesn't require
any invocation-time input, and that wait is called if requested.
"""
with client.test_mode as t:
standard_registration(t)
with mock.patch.object(type(self.res), 'wait') as wait:
self.res.launch(1, wait=True)
wait.assert_called_once_with(42, timeout=None)
def test_extra_vars_at_runtime(self):
"""Establish that if we should be asking for extra variables at
runtime, that we do.
"""
with client.test_mode as t:
# test with JSON job template extra_vars
jt_vars_registration(t, '{"spam": "eggs"}')
with mock.patch.object(click, 'edit') as edit:
edit.return_value = '# Nothing.\nfoo: bar'
result = self.res.launch(1, no_input=False)
self.assertDictContainsSubset(
{"spam": "eggs"},
yaml.load(edit.mock_calls[0][1][0])
)
self.assertDictContainsSubset(
{'foo': 'bar'},
json.loads(json.loads(t.requests[2].body)['extra_vars'])
)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_extra_vars_at_runtime_YAML_JT(self):
"""Establish that if we should be asking for extra variables at
runtime, that we do.
"""
with client.test_mode as t:
# test with YAML and comments
jt_vars_registration(t, 'spam: eggs\n# comment')
with mock.patch.object(click, 'edit') as edit:
edit.return_value = '# Nothing.\nfoo: bar'
self.res.launch(1, no_input=False)
self.assertIn('# comment', edit.mock_calls[0][1][0])
self.assertDictContainsSubset(
{"spam": "eggs"},
yaml.load(edit.mock_calls[0][1][0])
)
def test_extra_vars_at_runtime_no_user_data(self):
"""User launches a job that prompts for variables. User closes
editor without adding any text.
Establish that we launch the job as-is.
"""
with client.test_mode as t:
# No job template variables
jt_vars_registration(t, '')
initial = '\n'.join((
'# Specify extra variables (if any) here as YAML.',
'# Lines beginning with "#" denote comments.',
'',
))
with mock.patch.object(click, 'edit') as edit:
edit.return_value = initial
self.res.launch(1, no_input=False)
self.assertEqual(t.requests[2].method, 'POST')
self.assertEqual(t.requests[2].body, '{}')
def test_job_template_variables_post_24(self):
""" Check that in Tower versions past 2.4,
it does not include job template
variables along with the rest """
with client.test_mode as t:
jt_vars_registration(t, 'spam: eggs')
t.register_json('/config/', {'version': '2.4'}, method='GET')
result = self.res.launch(1, extra_vars=['foo: bar'])
response_json = yaml.load(t.requests[2].body)
ev_json = yaml.load(response_json['extra_vars'])
self.assertTrue('foo' in ev_json)
self.assertTrue('spam' not in ev_json)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_extra_vars_at_call_time(self):
"""Establish that extra variables specified at call time are
appropriately specified.
"""
with client.test_mode as t:
t.register_json('/job_templates/1/', {
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/job_templates/1/launch/', {}, method='GET')
t.register_json('/job_templates/1/launch/', {'id': 42},
method='POST')
result = self.res.launch(1, extra_vars=['foo: bar'])
self.assertDictContainsSubset(
{'foo': 'bar'},
json.loads(json.loads(t.requests[2].body)['extra_vars'])
)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_extra_vars_file_at_call_time(self):
"""Establish that extra variables specified at call time as a file are
appropriately specified.
"""
with client.test_mode as t:
t.register_json('/job_templates/1/', {
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/job_templates/1/launch/', {}, method='GET')
t.register_json('/job_templates/1/launch/', {'id': 42},
method='POST')
result = self.res.launch(1, extra_vars=['foo: bar'])
self.assertDictContainsSubset(
{'foo': 'bar'},
json.loads(json.loads(t.requests[2].body)['extra_vars'])
)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_passwords_needed_at_start(self):
"""Establish that we are able to create a job that doesn't require
any invocation-time input.
"""
with client.test_mode as t:
t.register_json('/job_templates/1/', {
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/job_templates/1/launch/', {
'passwords_needed_to_start': ['foo'],
}, method='GET')
t.register_json('/job_templates/1/launch/', {'id': 42},
method='POST')
with mock.patch('tower_cli.resources.job.getpass') as getpass:
getpass.return_value = 'bar'
result = self.res.launch(1)
getpass.assert_called_once_with('Password for foo: ')
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_ignored_fields(self):
"""Establish that if ignored_fields is returned when launching job,
it will be displayed in verbose mode.
"""
echo_count_with_ignore = 0
echo_count = 0
with client.test_mode as t:
standard_registration(t)
with settings.runtime_values(verbose=True):
with mock.patch.object(click, 'secho') as secho:
self.res.launch(job_template=1)
echo_count = secho.call_count
with client.test_mode as t:
standard_registration(t, ignored_fields={'foo': 'bar'})
with settings.runtime_values(verbose=True):
with mock.patch.object(click, 'secho') as secho:
self.res.launch(job_template=1)
echo_count_with_ignore = secho.call_count
self.assertEqual(echo_count_with_ignore - echo_count, 2)
class StatusTests(unittest.TestCase):
"""A set of tests to establish that the job status command works in the
way that we expect.
"""
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_normal(self):
"""Establish that the data about a job retrieved from the jobs
endpoint is provided.
"""
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'extra': 'ignored',
'failed': False,
'status': 'successful',
})
result = self.res.status(42)
self.assertEqual(result, {
'elapsed': 1335024000.0,
'failed': False,
'status': 'successful',
})
self.assertEqual(len(t.requests), 1)
def test_normal_with_lookup(self):
"""Establish that the data about job specified by query is
returned correctly.
"""
with client.test_mode as t:
t.register_json('/jobs/?name=bar', {"count": 1, "results": [
{"id": 42, "name": "bar",
'elapsed': 1335024000.0,
'extra': 'ignored',
'failed': False,
'status': 'successful', },
], "next": None, "previous": None}, method='GET')
result = self.res.status(name="bar")
self.assertEqual(result, {
'elapsed': 1335024000.0,
'failed': False,
'status': 'successful',
})
self.assertEqual(len(t.requests), 1)
def test_detailed(self):
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'extra': 'ignored',
'failed': False,
'status': 'successful',
})
result = self.res.status(42, detail=True)
self.assertEqual(result, {
'elapsed': 1335024000.0,
'extra': 'ignored',
'failed': False,
'status': 'successful',
})
self.assertEqual(len(t.requests), 1)
class ListStatusesTests(unittest.TestCase):
"""A set of tests to establish that the job list command works in the
way that we expect when passing a single status or multiple statuses.
"""
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_list_lone_status(self):
"""Establish that the list command is still able to handle single
status.
"""
with client.test_mode as t:
t.register_json('/jobs/?status=running', {
'elapsed': 4567.0,
'extra': 'ignored',
'failed': False,
'status': 'running',
'extra': 'ignored'
})
result = self.res.list(status='running')
self.assertEqual(result, {
'elapsed': 4567.0,
'failed': False,
'status': 'running',
'extra': 'ignored'
})
self.assertEqual(len(t.requests), 1)
def test_list_multiple_statuses_with_bad_one(self):
"""Establish that when passing multiple statuses, the list command errors
when a status is not found among the registered statuses.
"""
with client.test_mode as t:
with self.assertRaises(exc.TowerCLIError) as e:
self.res.list(status='pending,runin')
self.assertEqual(len(t.requests), 0)
self.assertEqual(str(e.exception), 'This status does not exist: runin')
def test_list_multiple_statuses_first_page(self):
"""Establish that when passing multiple statuses, the list command returns
only the first page of jobs matching the requested statuses.
"""
with client.test_mode as t:
t.register_json('/jobs/?or__status=pending&or__status=running', {
'count': 3,
'previous': None,
'next': '/api/%s/jobs/?or__status=pending&or__status=running&page=2' % CUR_API_VERSION,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
t.register_json('/jobs/?or__status=pending&or__status=running&page=2', {
'count': 2,
'previous': '/api/%s/jobs/?or__status=pending&or__status=running&page=2' % CUR_API_VERSION,
'next': None,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
result = self.res.list(status='pending,running')
self.assertEqual(result, {
'count': 3,
'previous': None,
'next': 2,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
self.assertEqual(len(t.requests), 1)
def test_list_multiple_statuses_second_page(self):
"""Establish that when passing multiple statuses, the list command returns
only the second page of jobs matching the requested statuses.
"""
with client.test_mode as t:
t.register_json('/jobs/?or__status=pending&or__status=running', {
'count': 3,
'previous': None,
'next': '/api/%s/jobs/?or__status=pending&or__status=running&page=2' % CUR_API_VERSION,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
t.register_json('/jobs/?or__status=pending&or__status=running&page=2', {
'count': 2,
'previous': '/api/%s/jobs/?or__status=pending&or__status=running&page=1' % CUR_API_VERSION,
'next': None,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
result = self.res.list(status='pending,running', page=2)
self.assertEqual(result, {
'count': 2,
'previous': 1,
'next': None,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
self.assertEqual(len(t.requests), 1)
def test_list_multiple_statuses_all_pages(self):
"""Establish that when passing multiple statuses, the list command returns
the entire set of jobs matching the requested statuses.
"""
with client.test_mode as t:
t.register_json('/jobs/?or__status=pending&or__status=running', {
'count': 3,
'previous': None,
'next': '/api/%s/jobs/?or__status=pending&or__status=running&page=2' % CUR_API_VERSION,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
t.register_json('/jobs/?or__status=pending&or__status=running&page=2', {
'count': 2,
'previous': '/api/%s/jobs/?or__status=pending&or__status=running&page=1' % CUR_API_VERSION,
'next': None,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
result = self.res.list(status='pending,running', all_pages=True)
self.assertEqual(result, {
'count': 5,
'previous': None,
'next': 2,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
self.assertEqual(len(t.requests), 2)
class MonitorWaitTests(unittest.TestCase):
"""A set of tests to establish that the job monitor and wait commands
works in the way that we expect.
"""
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_already_successful(self):
"""Establish that if we attempt to wait an already successful job,
we simply get back the job success report.
"""
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'failed': False,
'status': 'successful',
})
with mock.patch.object(time, 'sleep') as sleep:
result = self.res.wait(42)
self.assertEqual(sleep.call_count, 0)
self.assertEqual(result['status'], 'successful')
def test_already_successful_monitor(self):
"""Pass-through successful job with monitor method"""
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'failed': False,
'status': 'successful',
})
# Test same for monitor
with mock.patch.object(time, 'sleep') as sleep:
with mock.patch.object(type(self.res), 'wait'):
with mock.patch.object(click, 'echo'):
result = self.res.monitor(42)
self.assertEqual(sleep.call_count, 0)
self.assertEqual(result['status'], 'successful')
def test_failure(self):
"""Establish that if the job has failed, that we raise the
JobFailure exception.
"""
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'failed': True,
'status': 'failed',
})
with self.assertRaises(exc.JobFailure):
with mock.patch.object(click, 'secho') as secho:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = True
self.res.wait(42)
self.assertTrue(secho.call_count >= 1)
# Test the same with the monitor method
with self.assertRaises(exc.JobFailure):
with mock.patch.object(click, 'secho') as secho:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = True
self.res.monitor(42)
self.assertTrue(secho.call_count >= 1)
def test_failure_non_tty(self):
"""Establish that if the job has failed, that we raise the
JobFailure exception, and also don't print bad things on non-tty
outfiles.
"""
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'failed': True,
'status': 'failed',
})
with self.assertRaises(exc.JobFailure):
with mock.patch.object(click, 'echo') as echo:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = False
self.res.wait(42)
self.assertTrue(echo.call_count >= 1)
def test_waiting(self):
"""Establish that if the first status call returns a pending job,
and the second a success, that both calls are made, and a success
finally returned.
"""
# Set up our data object.
data = {'elapsed': 1335024000.0, 'failed': False, 'status': 'pending'}
# Register the initial request's response.
with client.test_mode as t:
t.register_json('/jobs/42/', copy(data))
# Create a way to assign a successful data object to the request.
def assign_success(*args):
t.clear()
t.register_json('/jobs/42/', dict(data, status='successful'))
# Make the successful state assignment occur when time.sleep()
# is called between requests.
with mock.patch.object(time, 'sleep') as sleep:
sleep.side_effect = assign_success
with mock.patch.object(click, 'secho') as secho:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = True
self.res.wait(42, min_interval=0.21)
self.assertTrue(secho.call_count >= 100)
# We should have gotten two requests total, to the same URL.
self.assertEqual(len(t.requests), 2)
self.assertEqual(t.requests[0].url, t.requests[1].url)
def test_monitor(self):
"""Establish that if the first status call returns a pending job,
and the second a success, that both calls are made, and a success
finally returned.
"""
# Set up our data object.
data = {'elapsed': 1335024000.0, 'failed': False, 'status': 'pending'}
# Register the initial request's response.
with client.test_mode as t:
t.register_json('/jobs/42/', copy(data))
# Create a way to assign a successful data object to the request.
def assign_success(*args):
t.clear()
t.register_json('/jobs/42/', dict(data, status='successful'))
# Make the successful state assignment occur when time.sleep()
# is called between requests.
with mock.patch.object(time, 'sleep') as sleep:
sleep.side_effect = assign_success
with mock.patch.object(click, 'echo'):
with mock.patch.object(type(self.res), 'wait'):
with mock.patch.object(
type(self.res), 'lookup_stdout'):
self.res.monitor(42, min_interval=0.21)
# We should have gotten 3 requests total, to the same URL.
self.assertEqual(len(t.requests), 3)
self.assertEqual(t.requests[0].url, t.requests[1].url)
def test_timeout(self):
"""Establish that the --timeout flag is honored if sent to
`tower-cli job wait`.
"""
# Set up our data object.
# This doesn't have to change; it will always be pending
# (thus the timeout).
data = {'elapsed': 1335024000.0, 'failed': False, 'status': 'pending'}
# Mock out the passage of time.
with client.test_mode as t:
t.register_json('/jobs/42/', copy(data))
with mock.patch.object(click, 'secho') as secho:
with self.assertRaises(exc.Timeout):
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = True
self.res.wait(42, min_interval=0.21, timeout=0.1)
self.assertTrue(secho.call_count >= 1)
def test_waiting_not_tty(self):
"""Establish that the wait command prints more useful output
for logging if not connected to a tty.
"""
# Set up our data object.
data = {'elapsed': 1335024000.0, 'failed': False, 'status': 'pending'}
# Register the initial request's response.
with client.test_mode as t:
t.register_json('/jobs/42/', copy(data))
# Create a way to assign a successful data object to the request.
def assign_success(*args):
t.clear()
t.register_json('/jobs/42/', dict(data, status='successful'))
# Make the successful state assignment occur when time.sleep()
# is called between requests.
with mock.patch.object(time, 'sleep') as sleep:
sleep.side_effect = assign_success
with mock.patch.object(click, 'echo') as echo:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = False
self.res.wait(42, min_interval=0.21)
self.assertTrue(echo.call_count >= 1)
# We should have gotten two requests total, to the same URL.
self.assertEqual(len(t.requests), 2)
self.assertEqual(t.requests[0].url, t.requests[1].url)
class CancelTests(unittest.TestCase):
"""A set of tasks to establish that the job cancel command works in the
way that we expect.
"""
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_standard_cancelation(self):
"""Establish that a standard cancelation command works in the way
we expect.
"""
with client.test_mode as t:
t.register('/jobs/42/cancel/', '', method='POST')
result = self.res.cancel(42)
self.assertTrue(t.requests[0].url.endswith('/jobs/42/cancel/'))
self.assertTrue(result['changed'])
def test_cancelation_by_lookup(self):
"""Establish that a job can be canceled by name or identity
"""
with client.test_mode as t:
t.register_json('/jobs/?name=bar', {"count": 1, "results": [
{"id": 42, "name": "bar"},
], "next": None, "previous": None}, method='GET')
t.register('/jobs/42/cancel/', '', method='POST')
result = self.res.cancel(name="bar")
self.assertTrue(t.requests[0].url.endswith('/jobs/?name=bar'))
self.assertTrue(t.requests[1].url.endswith('/jobs/42/cancel/'))
self.assertTrue(result['changed'])
def test_cancelation_completed(self):
"""Establish that a standard cancelation command works in the way
we expect.
"""
with client.test_mode as t:
t.register('/jobs/42/cancel/', '', method='POST', status_code=405)
result = self.res.cancel(42)
self.assertTrue(t.requests[0].url.endswith('/jobs/42/cancel/'))
self.assertFalse(result['changed'])
def test_cancelation_completed_with_error(self):
"""Establish that a standard cancelation command works in the way
we expect.
"""
with client.test_mode as t:
t.register('/jobs/42/cancel/', '', method='POST', status_code=405)
with self.assertRaises(exc.TowerCLIError):
self.res.cancel(42, fail_if_not_running=True)
class RelaunchTests(unittest.TestCase):
"""A set of tasks to establish that the job relaunch command works in the
way that we expect.
"""
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_standard_relaunch(self):
"""Establish that a standard relaunch command works in the way
we expect.
"""
with client.test_mode as t:
data = {'id': 43}
t.register_json('/jobs/42/relaunch/', data, method='POST')
result = self.res.relaunch(42)
self.assertTrue(t.requests[0].url.endswith('/jobs/42/relaunch/'))
self.assertTrue(result['changed'])
| 40.131034 | 107 | 0.525319 |
import json
import yaml
import time
from copy import copy
import click
import tower_cli
from tower_cli.api import client
from tower_cli import exceptions as exc
from tower_cli.cli.resource import ResSubcommand
from tests.compat import unittest, mock
from tower_cli.conf import settings
from tower_cli.constants import CUR_API_VERSION
def register_get(t):
t.register_json('/jobs/42/',
{
'id': 42, 'job_template': 1, 'status': 'pending',
'created': 1234, 'elapsed': 0.0,
}, method='GET')
def standard_registration(t, **kwargs):
t.register_json('/job_templates/1/', {
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/job_templates/1/launch/', {}, method='GET')
data = {'id': 42}
data.update(kwargs)
t.register_json('/job_templates/1/launch/', data, method='POST')
def jt_vars_registration(t, extra_vars):
t.register_json('/job_templates/1/', {
'ask_variables_on_launch': True,
'extra_vars': extra_vars,
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/config/', {'version': '2.2.0'}, method='GET')
t.register_json('/job_templates/1/launch/', {}, method='GET')
t.register_json('/job_templates/1/launch/', {'id': 42},
method='POST')
class LaunchTests(unittest.TestCase):
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_basic_launch(self):
with client.test_mode as t:
standard_registration(t)
result = self.res.launch(1)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_basic_launch_with_echo(self):
with client.test_mode as t:
standard_registration(t)
result = self.res.launch(1)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
f = ResSubcommand(self.res)._echo_method(self.res.launch)
with mock.patch.object(click, 'secho'):
with settings.runtime_values(format='human'):
f(job_template=1)
def test_launch_w_tags(self):
with client.test_mode as t:
standard_registration(t)
self.res.launch(1, tags="a, b, c")
self.assertEqual(
json.loads(t.requests[2].body)['job_tags'], 'a, b, c',
)
def test_launch_w_tuple_extra_vars(self):
with client.test_mode as t:
standard_registration(t)
result = self.res.launch(1, extra_vars=())
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_basic_launch_wait_option(self):
with client.test_mode as t:
standard_registration(t)
with mock.patch.object(type(self.res), 'wait') as wait:
self.res.launch(1, wait=True)
wait.assert_called_once_with(42, timeout=None)
def test_extra_vars_at_runtime(self):
with client.test_mode as t:
jt_vars_registration(t, '{"spam": "eggs"}')
with mock.patch.object(click, 'edit') as edit:
edit.return_value = '# Nothing.\nfoo: bar'
result = self.res.launch(1, no_input=False)
self.assertDictContainsSubset(
{"spam": "eggs"},
yaml.load(edit.mock_calls[0][1][0])
)
self.assertDictContainsSubset(
{'foo': 'bar'},
json.loads(json.loads(t.requests[2].body)['extra_vars'])
)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_extra_vars_at_runtime_YAML_JT(self):
with client.test_mode as t:
jt_vars_registration(t, 'spam: eggs\n# comment')
with mock.patch.object(click, 'edit') as edit:
edit.return_value = '# Nothing.\nfoo: bar'
self.res.launch(1, no_input=False)
self.assertIn('# comment', edit.mock_calls[0][1][0])
self.assertDictContainsSubset(
{"spam": "eggs"},
yaml.load(edit.mock_calls[0][1][0])
)
def test_extra_vars_at_runtime_no_user_data(self):
with client.test_mode as t:
jt_vars_registration(t, '')
initial = '\n'.join((
'# Specify extra variables (if any) here as YAML.',
'# Lines beginning with "#" denote comments.',
'',
))
with mock.patch.object(click, 'edit') as edit:
edit.return_value = initial
self.res.launch(1, no_input=False)
self.assertEqual(t.requests[2].method, 'POST')
self.assertEqual(t.requests[2].body, '{}')
def test_job_template_variables_post_24(self):
with client.test_mode as t:
jt_vars_registration(t, 'spam: eggs')
t.register_json('/config/', {'version': '2.4'}, method='GET')
result = self.res.launch(1, extra_vars=['foo: bar'])
response_json = yaml.load(t.requests[2].body)
ev_json = yaml.load(response_json['extra_vars'])
self.assertTrue('foo' in ev_json)
self.assertTrue('spam' not in ev_json)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_extra_vars_at_call_time(self):
with client.test_mode as t:
t.register_json('/job_templates/1/', {
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/job_templates/1/launch/', {}, method='GET')
t.register_json('/job_templates/1/launch/', {'id': 42},
method='POST')
result = self.res.launch(1, extra_vars=['foo: bar'])
self.assertDictContainsSubset(
{'foo': 'bar'},
json.loads(json.loads(t.requests[2].body)['extra_vars'])
)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_extra_vars_file_at_call_time(self):
with client.test_mode as t:
t.register_json('/job_templates/1/', {
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/job_templates/1/launch/', {}, method='GET')
t.register_json('/job_templates/1/launch/', {'id': 42},
method='POST')
result = self.res.launch(1, extra_vars=['foo: bar'])
self.assertDictContainsSubset(
{'foo': 'bar'},
json.loads(json.loads(t.requests[2].body)['extra_vars'])
)
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_passwords_needed_at_start(self):
with client.test_mode as t:
t.register_json('/job_templates/1/', {
'id': 1,
'name': 'frobnicate',
'related': {'launch': '/job_templates/1/launch/'},
})
register_get(t)
t.register_json('/job_templates/1/launch/', {
'passwords_needed_to_start': ['foo'],
}, method='GET')
t.register_json('/job_templates/1/launch/', {'id': 42},
method='POST')
with mock.patch('tower_cli.resources.job.getpass') as getpass:
getpass.return_value = 'bar'
result = self.res.launch(1)
getpass.assert_called_once_with('Password for foo: ')
self.assertDictContainsSubset({'changed': True, 'id': 42}, result)
def test_ignored_fields(self):
echo_count_with_ignore = 0
echo_count = 0
with client.test_mode as t:
standard_registration(t)
with settings.runtime_values(verbose=True):
with mock.patch.object(click, 'secho') as secho:
self.res.launch(job_template=1)
echo_count = secho.call_count
with client.test_mode as t:
standard_registration(t, ignored_fields={'foo': 'bar'})
with settings.runtime_values(verbose=True):
with mock.patch.object(click, 'secho') as secho:
self.res.launch(job_template=1)
echo_count_with_ignore = secho.call_count
self.assertEqual(echo_count_with_ignore - echo_count, 2)
class StatusTests(unittest.TestCase):
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_normal(self):
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'extra': 'ignored',
'failed': False,
'status': 'successful',
})
result = self.res.status(42)
self.assertEqual(result, {
'elapsed': 1335024000.0,
'failed': False,
'status': 'successful',
})
self.assertEqual(len(t.requests), 1)
def test_normal_with_lookup(self):
with client.test_mode as t:
t.register_json('/jobs/?name=bar', {"count": 1, "results": [
{"id": 42, "name": "bar",
'elapsed': 1335024000.0,
'extra': 'ignored',
'failed': False,
'status': 'successful', },
], "next": None, "previous": None}, method='GET')
result = self.res.status(name="bar")
self.assertEqual(result, {
'elapsed': 1335024000.0,
'failed': False,
'status': 'successful',
})
self.assertEqual(len(t.requests), 1)
def test_detailed(self):
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'extra': 'ignored',
'failed': False,
'status': 'successful',
})
result = self.res.status(42, detail=True)
self.assertEqual(result, {
'elapsed': 1335024000.0,
'extra': 'ignored',
'failed': False,
'status': 'successful',
})
self.assertEqual(len(t.requests), 1)
class ListStatusesTests(unittest.TestCase):
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_list_lone_status(self):
with client.test_mode as t:
t.register_json('/jobs/?status=running', {
'elapsed': 4567.0,
'extra': 'ignored',
'failed': False,
'status': 'running',
'extra': 'ignored'
})
result = self.res.list(status='running')
self.assertEqual(result, {
'elapsed': 4567.0,
'failed': False,
'status': 'running',
'extra': 'ignored'
})
self.assertEqual(len(t.requests), 1)
def test_list_multiple_statuses_with_bad_one(self):
with client.test_mode as t:
with self.assertRaises(exc.TowerCLIError) as e:
self.res.list(status='pending,runin')
self.assertEqual(len(t.requests), 0)
self.assertEqual(str(e.exception), 'This status does not exist: runin')
def test_list_multiple_statuses_first_page(self):
with client.test_mode as t:
t.register_json('/jobs/?or__status=pending&or__status=running', {
'count': 3,
'previous': None,
'next': '/api/%s/jobs/?or__status=pending&or__status=running&page=2' % CUR_API_VERSION,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
t.register_json('/jobs/?or__status=pending&or__status=running&page=2', {
'count': 2,
'previous': '/api/%s/jobs/?or__status=pending&or__status=running&page=2' % CUR_API_VERSION,
'next': None,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
result = self.res.list(status='pending,running')
self.assertEqual(result, {
'count': 3,
'previous': None,
'next': 2,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
self.assertEqual(len(t.requests), 1)
def test_list_multiple_statuses_second_page(self):
with client.test_mode as t:
t.register_json('/jobs/?or__status=pending&or__status=running', {
'count': 3,
'previous': None,
'next': '/api/%s/jobs/?or__status=pending&or__status=running&page=2' % CUR_API_VERSION,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
t.register_json('/jobs/?or__status=pending&or__status=running&page=2', {
'count': 2,
'previous': '/api/%s/jobs/?or__status=pending&or__status=running&page=1' % CUR_API_VERSION,
'next': None,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
result = self.res.list(status='pending,running', page=2)
self.assertEqual(result, {
'count': 2,
'previous': 1,
'next': None,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
self.assertEqual(len(t.requests), 1)
def test_list_multiple_statuses_all_pages(self):
with client.test_mode as t:
t.register_json('/jobs/?or__status=pending&or__status=running', {
'count': 3,
'previous': None,
'next': '/api/%s/jobs/?or__status=pending&or__status=running&page=2' % CUR_API_VERSION,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
t.register_json('/jobs/?or__status=pending&or__status=running&page=2', {
'count': 2,
'previous': '/api/%s/jobs/?or__status=pending&or__status=running&page=1' % CUR_API_VERSION,
'next': None,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
result = self.res.list(status='pending,running', all_pages=True)
self.assertEqual(result, {
'count': 5,
'previous': None,
'next': 2,
'results': [{
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 7823.0,
'failed': False,
'status': 'running',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}, {
'elapsed': 0.0,
'failed': False,
'status': 'pending',
'extra': 'ignored',
}]
})
self.assertEqual(len(t.requests), 2)
class MonitorWaitTests(unittest.TestCase):
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_already_successful(self):
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'failed': False,
'status': 'successful',
})
with mock.patch.object(time, 'sleep') as sleep:
result = self.res.wait(42)
self.assertEqual(sleep.call_count, 0)
self.assertEqual(result['status'], 'successful')
def test_already_successful_monitor(self):
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'failed': False,
'status': 'successful',
})
with mock.patch.object(time, 'sleep') as sleep:
with mock.patch.object(type(self.res), 'wait'):
with mock.patch.object(click, 'echo'):
result = self.res.monitor(42)
self.assertEqual(sleep.call_count, 0)
self.assertEqual(result['status'], 'successful')
def test_failure(self):
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'failed': True,
'status': 'failed',
})
with self.assertRaises(exc.JobFailure):
with mock.patch.object(click, 'secho') as secho:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = True
self.res.wait(42)
self.assertTrue(secho.call_count >= 1)
with self.assertRaises(exc.JobFailure):
with mock.patch.object(click, 'secho') as secho:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = True
self.res.monitor(42)
self.assertTrue(secho.call_count >= 1)
def test_failure_non_tty(self):
with client.test_mode as t:
t.register_json('/jobs/42/', {
'elapsed': 1335024000.0,
'failed': True,
'status': 'failed',
})
with self.assertRaises(exc.JobFailure):
with mock.patch.object(click, 'echo') as echo:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = False
self.res.wait(42)
self.assertTrue(echo.call_count >= 1)
def test_waiting(self):
data = {'elapsed': 1335024000.0, 'failed': False, 'status': 'pending'}
with client.test_mode as t:
t.register_json('/jobs/42/', copy(data))
# Create a way to assign a successful data object to the request.
def assign_success(*args):
t.clear()
t.register_json('/jobs/42/', dict(data, status='successful'))
# Make the successful state assignment occur when time.sleep()
# is called between requests.
with mock.patch.object(time, 'sleep') as sleep:
sleep.side_effect = assign_success
with mock.patch.object(click, 'secho') as secho:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = True
self.res.wait(42, min_interval=0.21)
self.assertTrue(secho.call_count >= 100)
# We should have gotten two requests total, to the same URL.
self.assertEqual(len(t.requests), 2)
self.assertEqual(t.requests[0].url, t.requests[1].url)
def test_monitor(self):
# Set up our data object.
data = {'elapsed': 1335024000.0, 'failed': False, 'status': 'pending'}
# Register the initial request's response.
with client.test_mode as t:
t.register_json('/jobs/42/', copy(data))
def assign_success(*args):
t.clear()
t.register_json('/jobs/42/', dict(data, status='successful'))
with mock.patch.object(time, 'sleep') as sleep:
sleep.side_effect = assign_success
with mock.patch.object(click, 'echo'):
with mock.patch.object(type(self.res), 'wait'):
with mock.patch.object(
type(self.res), 'lookup_stdout'):
self.res.monitor(42, min_interval=0.21)
self.assertEqual(len(t.requests), 3)
self.assertEqual(t.requests[0].url, t.requests[1].url)
def test_timeout(self):
# (thus the timeout).
data = {'elapsed': 1335024000.0, 'failed': False, 'status': 'pending'}
# Mock out the passage of time.
with client.test_mode as t:
t.register_json('/jobs/42/', copy(data))
with mock.patch.object(click, 'secho') as secho:
with self.assertRaises(exc.Timeout):
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = True
self.res.wait(42, min_interval=0.21, timeout=0.1)
self.assertTrue(secho.call_count >= 1)
def test_waiting_not_tty(self):
# Set up our data object.
data = {'elapsed': 1335024000.0, 'failed': False, 'status': 'pending'}
# Register the initial request's response.
with client.test_mode as t:
t.register_json('/jobs/42/', copy(data))
def assign_success(*args):
t.clear()
t.register_json('/jobs/42/', dict(data, status='successful'))
with mock.patch.object(time, 'sleep') as sleep:
sleep.side_effect = assign_success
with mock.patch.object(click, 'echo') as echo:
with mock.patch('tower_cli.models.base.is_tty') as tty:
tty.return_value = False
self.res.wait(42, min_interval=0.21)
self.assertTrue(echo.call_count >= 1)
self.assertEqual(len(t.requests), 2)
self.assertEqual(t.requests[0].url, t.requests[1].url)
class CancelTests(unittest.TestCase):
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_standard_cancelation(self):
with client.test_mode as t:
t.register('/jobs/42/cancel/', '', method='POST')
result = self.res.cancel(42)
self.assertTrue(t.requests[0].url.endswith('/jobs/42/cancel/'))
self.assertTrue(result['changed'])
def test_cancelation_by_lookup(self):
with client.test_mode as t:
t.register_json('/jobs/?name=bar', {"count": 1, "results": [
{"id": 42, "name": "bar"},
], "next": None, "previous": None}, method='GET')
t.register('/jobs/42/cancel/', '', method='POST')
result = self.res.cancel(name="bar")
self.assertTrue(t.requests[0].url.endswith('/jobs/?name=bar'))
self.assertTrue(t.requests[1].url.endswith('/jobs/42/cancel/'))
self.assertTrue(result['changed'])
def test_cancelation_completed(self):
with client.test_mode as t:
t.register('/jobs/42/cancel/', '', method='POST', status_code=405)
result = self.res.cancel(42)
self.assertTrue(t.requests[0].url.endswith('/jobs/42/cancel/'))
self.assertFalse(result['changed'])
def test_cancelation_completed_with_error(self):
with client.test_mode as t:
t.register('/jobs/42/cancel/', '', method='POST', status_code=405)
with self.assertRaises(exc.TowerCLIError):
self.res.cancel(42, fail_if_not_running=True)
class RelaunchTests(unittest.TestCase):
def setUp(self):
self.res = tower_cli.get_resource('job')
def test_standard_relaunch(self):
with client.test_mode as t:
data = {'id': 43}
t.register_json('/jobs/42/relaunch/', data, method='POST')
result = self.res.relaunch(42)
self.assertTrue(t.requests[0].url.endswith('/jobs/42/relaunch/'))
self.assertTrue(result['changed'])
| true | true |
f73a962dd32ccb6fbf113c22d0a8c223db3c7ad5 | 789 | py | Python | resetbg/resetbg.py | pzyyll/fun_tools | 1021d5d838ac2db6d051874fff548fc632c908a8 | [
"MIT"
] | null | null | null | resetbg/resetbg.py | pzyyll/fun_tools | 1021d5d838ac2db6d051874fff548fc632c908a8 | [
"MIT"
] | null | null | null | resetbg/resetbg.py | pzyyll/fun_tools | 1021d5d838ac2db6d051874fff548fc632c908a8 | [
"MIT"
] | null | null | null | """ 一个制作替换头像背景颜色的小工具(可以自制证件照~)
resetbg.py img_file color[blue|red|white]
"""
import sys
import removebg
from removebg import RemoveBg
from PIL import Image
class Color(object):
BLUE = (30, 144, 255)
RED = (255, 48, 48)
WHITE = (255, 255, 255)
@staticmethod
def getColor(color):
return {
'blue': Color.BLUE,
'red': Color.RED
}.get(color, Color.BLUE)
API_KEY = 'ZE4zpXPftPoPsx4XoRcBSw46'
rmbg = RemoveBg(API_KEY, 'err.log')
jpg = sys.argv[1]
color = sys.argv[2]
rmbg.remove_background_from_img_file(jpg)
rawpng_path = jpg + '_no_bg.png'
rawpng = Image.open(rawpng_path)
x, y = rawpng.size
p = Image.new('RGBA', rawpng.size, Color.getColor(color))
p.paste(rawpng, (0, 0, x, y), rawpng)
p.save(jpg + '_' + color + '.png')
| 19.725 | 57 | 0.646388 |
import sys
import removebg
from removebg import RemoveBg
from PIL import Image
class Color(object):
BLUE = (30, 144, 255)
RED = (255, 48, 48)
WHITE = (255, 255, 255)
@staticmethod
def getColor(color):
return {
'blue': Color.BLUE,
'red': Color.RED
}.get(color, Color.BLUE)
API_KEY = 'ZE4zpXPftPoPsx4XoRcBSw46'
rmbg = RemoveBg(API_KEY, 'err.log')
jpg = sys.argv[1]
color = sys.argv[2]
rmbg.remove_background_from_img_file(jpg)
rawpng_path = jpg + '_no_bg.png'
rawpng = Image.open(rawpng_path)
x, y = rawpng.size
p = Image.new('RGBA', rawpng.size, Color.getColor(color))
p.paste(rawpng, (0, 0, x, y), rawpng)
p.save(jpg + '_' + color + '.png')
| true | true |
f73a970a5b30ae54b9427c6dcf06706f67d17dd6 | 2,253 | py | Python | languages/python/src/concepts/P036_Dictionaries.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2019-05-25T10:09:00.000Z | 2022-03-11T09:06:23.000Z | languages/python/src/concepts/P036_Dictionaries.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2020-03-31T04:30:17.000Z | 2020-10-30T07:54:28.000Z | languages/python/src/concepts/P036_Dictionaries.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 4 | 2019-07-12T13:18:56.000Z | 2021-11-17T08:04:55.000Z | # Description: Dictionaries in Python
# Note
# 1. A dictionary is an UNORDERED key: value pairs, with the requirement that the keys are unique within a dictionary.
# 2. Dictionary in other languages are also called "associative memories", "associative arrays", "hash map" etc.
# 3. Dictionary Keys
# - Dictionaries are indexed by keys, which can be any immutable type; strings and numbers can always be keys.
# - Tuples can be used as keys if they contain only strings, numbers, or tuples; if a tuple contains any mutable
# object either directly or indirectly, it cannot be used as a key.
# - List cannot be used as keys, since lists can be modified in place using index assignments, slice assignments, or
# methods like append() and extend().
# 4. It is an error to extract a value using a non-existent key.
# Create dictionary
info = {} # Empty dictionary
info = {'AAAA': 1111, 'BBBB': 2222}
info['CCCC'] = 3333
dictionary1 = dict([('DDDD', 4444), ('EEEE', 5555)]) # Use the dict() constructor to build dictionaries directly from
# the sequences of key-value pairs.
dictionary2 = dict(sape=4139, guido=4127, jack=4098) # When the keys are simple strings, it is sometimes easier to
# specify pairs using keyword arguments.
# Accessing Dictionary
print(info)
print(info['CCCC'])
print(list(info.keys())) # Return a list of keys
print(sorted(info.keys())) # A list of SORTED keys
print(dictionary1)
print(dictionary2)
# Modifying dictionary
del info['CCCC']
info['irv'] = 4127
# The 'in' operator
print('guido' in info) # Returns True or False
print('jack' not in info)
# Looping Dictionary
# 1. The key and the corresponding value can be retrieved at the same time using the items() method.
a_dictionary = {'One': 1, 'Two': 2, "Three": 3}
for key, value in a_dictionary.items():
print(key, value)
# Dictionary comprehensions
anotherDictionary = {x: x**2 for x in (2, 4, 6)} # Dictionaries from an arbitrary key and value expressions
print(anotherDictionary)
| 47.93617 | 120 | 0.64403 |
info = {}
info = {'AAAA': 1111, 'BBBB': 2222}
info['CCCC'] = 3333
dictionary1 = dict([('DDDD', 4444), ('EEEE', 5555)])
dictionary2 = dict(sape=4139, guido=4127, jack=4098)
print(info)
print(info['CCCC'])
print(list(info.keys()))
print(sorted(info.keys()))
print(dictionary1)
print(dictionary2)
del info['CCCC']
info['irv'] = 4127
print('guido' in info)
print('jack' not in info)
a_dictionary = {'One': 1, 'Two': 2, "Three": 3}
for key, value in a_dictionary.items():
print(key, value)
anotherDictionary = {x: x**2 for x in (2, 4, 6)}
print(anotherDictionary)
| true | true |
f73a975f0d306f0272d72f7050ec5232a8c6d8fc | 13,051 | py | Python | opal_client/client.py | MatanyaStroh/opal | 6b0c61d0bd5d08fc5291827ff6af8587d6dbc775 | [
"Apache-2.0"
] | 367 | 2021-04-11T11:58:35.000Z | 2022-02-24T15:33:54.000Z | opal_client/client.py | asafcsimpleorg/opal | 262a296e5a2d06e9d1b15fcf3cc9adf68e12fd91 | [
"Apache-2.0"
] | 63 | 2021-04-14T07:00:44.000Z | 2021-12-07T12:05:38.000Z | opal_client/client.py | asafcsimpleorg/opal | 262a296e5a2d06e9d1b15fcf3cc9adf68e12fd91 | [
"Apache-2.0"
] | 32 | 2021-04-11T12:53:53.000Z | 2021-12-04T19:57:42.000Z | from logging import disable
import os
import signal
import asyncio
import uuid
import aiohttp
import functools
from typing import List, Optional
from fastapi import FastAPI
import websockets
from opal_common.logger import logger, configure_logs
from opal_common.middleware import configure_middleware
from opal_common.config import opal_common_config
from opal_common.security.sslcontext import get_custom_ssl_context
from opal_common.authentication.verifier import JWTVerifier
from opal_common.authentication.deps import JWTAuthenticator
from opal_client.policy_store.api import init_policy_store_router
from opal_client.config import PolicyStoreTypes, opal_client_config
from opal_client.data.api import init_data_router
from opal_client.data.updater import DataUpdater
from opal_client.data.fetcher import DataFetcher
from opal_client.policy_store.base_policy_store_client import BasePolicyStoreClient
from opal_client.policy_store.policy_store_client_factory import PolicyStoreClientFactory
from opal_client.opa.runner import OpaRunner
from opal_client.opa.options import OpaServerOptions
from opal_client.policy.api import init_policy_router
from opal_client.policy.updater import PolicyUpdater
from opal_client.callbacks.register import CallbacksRegister
from opal_client.callbacks.api import init_callbacks_api
class OpalClient:
def __init__(
self,
policy_store_type:PolicyStoreTypes=None,
policy_store:BasePolicyStoreClient=None,
data_updater:DataUpdater=None,
data_topics: List[str] = None,
policy_updater:PolicyUpdater=None,
inline_opa_enabled:bool=None,
inline_opa_options:OpaServerOptions=None,
verifier: Optional[JWTVerifier] = None,
) -> None:
"""
Args:
policy_store_type (PolicyStoreTypes, optional): [description]. Defaults to POLICY_STORE_TYPE.
Internal components (for each pass None for default init, or False to disable):
policy_store (BasePolicyStoreClient, optional): The policy store client. Defaults to None.
data_updater (DataUpdater, optional): Defaults to None.
policy_updater (PolicyUpdater, optional): Defaults to None.
"""
# defaults
policy_store_type: PolicyStoreTypes = policy_store_type or opal_client_config.POLICY_STORE_TYPE
inline_opa_enabled: bool = inline_opa_enabled or opal_client_config.INLINE_OPA_ENABLED
inline_opa_options: OpaServerOptions = inline_opa_options or opal_client_config.INLINE_OPA_CONFIG
opal_client_identifier: str = opal_client_config.OPAL_CLIENT_STAT_ID or f"CLIENT_{uuid.uuid4().hex}"
# set logs
configure_logs()
# Init policy store client
self.policy_store_type: PolicyStoreTypes = policy_store_type
self.policy_store: BasePolicyStoreClient = policy_store or PolicyStoreClientFactory.create(policy_store_type)
# data fetcher
self.data_fetcher = DataFetcher()
# callbacks register
if hasattr(opal_client_config.DEFAULT_UPDATE_CALLBACKS, 'callbacks'):
default_callbacks = opal_client_config.DEFAULT_UPDATE_CALLBACKS.callbacks
else:
default_callbacks = []
self._callbacks_register = CallbacksRegister(default_callbacks)
# Init policy updater
if policy_updater is not None:
self.policy_updater = policy_updater
else:
self.policy_updater = PolicyUpdater(policy_store=self.policy_store, data_fetcher=self.data_fetcher, callbacks_register=self._callbacks_register, opal_client_id=opal_client_identifier)
# Data updating service
if opal_client_config.DATA_UPDATER_ENABLED:
if data_updater is not None:
self.data_updater = data_updater
else:
data_topics = data_topics if data_topics is not None else opal_client_config.DATA_TOPICS
self.data_updater = DataUpdater(policy_store=self.policy_store, data_topics=data_topics, data_fetcher=self.data_fetcher, callbacks_register=self._callbacks_register, opal_client_id=opal_client_identifier)
else:
self.data_updater = None
# Internal services
# Policy store
if self.policy_store_type == PolicyStoreTypes.OPA and inline_opa_enabled:
rehydration_callbacks = [
# refetches policy code (e.g: rego) and static data from server
functools.partial(self.policy_updater.update_policy, force_full_update=True),
]
if self.data_updater:
rehydration_callbacks.append(
functools.partial(self.data_updater.get_base_policy_data, data_fetch_reason="policy store rehydration")
)
self.opa_runner = OpaRunner.setup_opa_runner(options=inline_opa_options, rehydration_callbacks=rehydration_callbacks)
else:
self.opa_runner = False
custom_ssl_context = get_custom_ssl_context()
if opal_common_config.CLIENT_SELF_SIGNED_CERTIFICATES_ALLOWED and custom_ssl_context is not None:
logger.warning("OPAL client is configured to trust self-signed certificates")
if verifier is not None:
self.verifier = verifier
else:
self.verifier = JWTVerifier(
public_key=opal_common_config.AUTH_PUBLIC_KEY,
algorithm=opal_common_config.AUTH_JWT_ALGORITHM,
audience=opal_common_config.AUTH_JWT_AUDIENCE,
issuer=opal_common_config.AUTH_JWT_ISSUER,
)
if not self.verifier.enabled:
logger.info("API authentication disabled (public encryption key was not provided)")
# init fastapi app
self.app: FastAPI = self._init_fast_api_app()
def _init_fast_api_app(self):
"""
inits the fastapi app object
"""
app = FastAPI(
title="OPAL Client",
description="OPAL is an administration layer for Open Policy Agent (OPA), detecting changes" + \
" to both policy and data and pushing live updates to your agents. The opal client is" + \
" deployed alongside a policy-store (e.g: OPA), keeping it up-to-date, by connecting to" + \
" an opal-server and subscribing to pub/sub updates for policy and policy data changes.",
version="0.1.0"
)
configure_middleware(app)
self._configure_api_routes(app)
self._configure_lifecycle_callbacks(app)
return app
def _configure_api_routes(self, app: FastAPI):
"""
mounts the api routes on the app object
"""
authenticator = JWTAuthenticator(self.verifier)
# Init api routers with required dependencies
policy_router = init_policy_router(policy_updater=self.policy_updater)
data_router = init_data_router(data_updater=self.data_updater)
policy_store_router = init_policy_store_router(authenticator)
callbacks_router = init_callbacks_api(authenticator, self._callbacks_register)
# mount the api routes on the app object
app.include_router(policy_router, tags=["Policy Updater"])
app.include_router(data_router, tags=["Data Updater"])
app.include_router(policy_store_router, tags=["Policy Store"])
app.include_router(callbacks_router, tags=["Callbacks"])
# top level routes (i.e: healthchecks)
@app.get("/healthcheck", include_in_schema=False)
@app.get("/", include_in_schema=False)
def healthcheck():
return {"status": "ok"}
return app
def _configure_lifecycle_callbacks(self, app: FastAPI):
"""
registers callbacks on app startup and shutdown.
on app startup we launch our long running processes (async tasks)
on the event loop. on app shutdown we stop these long running tasks.
"""
@app.on_event("startup")
async def startup_event():
asyncio.create_task(self.start_client_background_tasks())
@app.on_event("shutdown")
async def shutdown_event():
await self.stop_client_background_tasks()
return app
async def start_client_background_tasks(self):
"""
Launch OPAL client long-running tasks:
- Policy Store runner (e.g: Opa Runner)
- Policy Updater
- Data Updater
If there is a policy store to run, we wait until its up before launching dependent tasks.
"""
if self.opa_runner:
# runs the policy store dependent tasks after policy store is up
self.opa_runner.register_opa_initial_start_callbacks([self.launch_policy_store_dependent_tasks])
async with self.opa_runner:
await self.opa_runner.wait_until_done()
else:
# we do not run the policy store in the same container
# therefore we can immediately launch dependent tasks
await self.launch_policy_store_dependent_tasks()
async def stop_client_background_tasks(self):
"""
stops all background tasks (called on shutdown event)
"""
logger.info("stopping background tasks...")
# stopping opa runner
if self.opa_runner:
await self.opa_runner.stop()
# stopping updater tasks (each updater runs a pub/sub client)
logger.info("trying to shutdown DataUpdater and PolicyUpdater gracefully...")
tasks: List[asyncio.Task] = []
if self.data_updater:
tasks.append(asyncio.create_task(self.data_updater.stop()))
if self.policy_updater:
tasks.append(asyncio.create_task(self.policy_updater.stop()))
try:
await asyncio.gather(*tasks)
except Exception:
logger.exception("exception while shutting down updaters")
async def launch_policy_store_dependent_tasks(self):
try:
await self.maybe_init_healthcheck_policy()
except Exception:
logger.critical("healthcheck policy enabled but could not be initialized!")
self._trigger_shutdown()
return
try:
for task in asyncio.as_completed([self.launch_policy_updater(), self.launch_data_updater()]):
await task
except websockets.exceptions.InvalidStatusCode as err:
logger.error("Failed to launch background task -- {err}", err=repr(err))
self._trigger_shutdown()
async def maybe_init_healthcheck_policy(self):
"""
This function only runs if OPA_HEALTH_CHECK_POLICY_ENABLED is true.
Puts the healthcheck policy in opa cache and inits the transaction log used by the policy.
If any action fails, opal client will shutdown.
"""
if not opal_client_config.OPA_HEALTH_CHECK_POLICY_ENABLED:
return # skip
healthcheck_policy_relpath = opal_client_config.OPA_HEALTH_CHECK_POLICY_PATH
here = os.path.abspath(os.path.dirname(__file__))
healthcheck_policy_path = os.path.join(here, healthcheck_policy_relpath)
if not os.path.exists(healthcheck_policy_path):
logger.error("Critical: OPA health-check policy is enabled, but cannot find policy at {path}", path=healthcheck_policy_path)
raise ValueError("OPA health check policy not found!")
try:
healthcheck_policy_code = open(healthcheck_policy_path, 'r').read()
except IOError as err:
logger.error("Critical: Cannot read healthcheck policy: {err}", err=repr(err))
raise
try:
await self.policy_store.init_healthcheck_policy(
policy_id=healthcheck_policy_relpath,
policy_code=healthcheck_policy_code,
data_updater_enabled=opal_client_config.DATA_UPDATER_ENABLED
)
except aiohttp.ClientError as err:
logger.error("Failed to connect to OPA agent while init healthcheck policy -- {err}", err=repr(err))
raise
def _trigger_shutdown(self):
"""
this will send SIGTERM (Keyboard interrupt) to the worker, making uvicorn
send "lifespan.shutdown" event to Starlette via the ASGI lifespan interface.
Starlette will then trigger the @app.on_event("shutdown") callback, which
in our case (self.stop_client_background_tasks()) will gracefully shutdown
the background processes and only then will terminate the worker.
"""
logger.info("triggering shutdown with SIGTERM...")
os.kill(os.getpid(), signal.SIGTERM)
async def launch_policy_updater(self):
if self.policy_updater:
async with self.policy_updater:
await self.policy_updater.wait_until_done()
async def launch_data_updater(self):
if self.data_updater:
async with self.data_updater:
await self.data_updater.wait_until_done() | 43.942761 | 220 | 0.689679 | from logging import disable
import os
import signal
import asyncio
import uuid
import aiohttp
import functools
from typing import List, Optional
from fastapi import FastAPI
import websockets
from opal_common.logger import logger, configure_logs
from opal_common.middleware import configure_middleware
from opal_common.config import opal_common_config
from opal_common.security.sslcontext import get_custom_ssl_context
from opal_common.authentication.verifier import JWTVerifier
from opal_common.authentication.deps import JWTAuthenticator
from opal_client.policy_store.api import init_policy_store_router
from opal_client.config import PolicyStoreTypes, opal_client_config
from opal_client.data.api import init_data_router
from opal_client.data.updater import DataUpdater
from opal_client.data.fetcher import DataFetcher
from opal_client.policy_store.base_policy_store_client import BasePolicyStoreClient
from opal_client.policy_store.policy_store_client_factory import PolicyStoreClientFactory
from opal_client.opa.runner import OpaRunner
from opal_client.opa.options import OpaServerOptions
from opal_client.policy.api import init_policy_router
from opal_client.policy.updater import PolicyUpdater
from opal_client.callbacks.register import CallbacksRegister
from opal_client.callbacks.api import init_callbacks_api
class OpalClient:
def __init__(
self,
policy_store_type:PolicyStoreTypes=None,
policy_store:BasePolicyStoreClient=None,
data_updater:DataUpdater=None,
data_topics: List[str] = None,
policy_updater:PolicyUpdater=None,
inline_opa_enabled:bool=None,
inline_opa_options:OpaServerOptions=None,
verifier: Optional[JWTVerifier] = None,
) -> None:
policy_store_type: PolicyStoreTypes = policy_store_type or opal_client_config.POLICY_STORE_TYPE
inline_opa_enabled: bool = inline_opa_enabled or opal_client_config.INLINE_OPA_ENABLED
inline_opa_options: OpaServerOptions = inline_opa_options or opal_client_config.INLINE_OPA_CONFIG
opal_client_identifier: str = opal_client_config.OPAL_CLIENT_STAT_ID or f"CLIENT_{uuid.uuid4().hex}"
configure_logs()
self.policy_store_type: PolicyStoreTypes = policy_store_type
self.policy_store: BasePolicyStoreClient = policy_store or PolicyStoreClientFactory.create(policy_store_type)
self.data_fetcher = DataFetcher()
if hasattr(opal_client_config.DEFAULT_UPDATE_CALLBACKS, 'callbacks'):
default_callbacks = opal_client_config.DEFAULT_UPDATE_CALLBACKS.callbacks
else:
default_callbacks = []
self._callbacks_register = CallbacksRegister(default_callbacks)
if policy_updater is not None:
self.policy_updater = policy_updater
else:
self.policy_updater = PolicyUpdater(policy_store=self.policy_store, data_fetcher=self.data_fetcher, callbacks_register=self._callbacks_register, opal_client_id=opal_client_identifier)
if opal_client_config.DATA_UPDATER_ENABLED:
if data_updater is not None:
self.data_updater = data_updater
else:
data_topics = data_topics if data_topics is not None else opal_client_config.DATA_TOPICS
self.data_updater = DataUpdater(policy_store=self.policy_store, data_topics=data_topics, data_fetcher=self.data_fetcher, callbacks_register=self._callbacks_register, opal_client_id=opal_client_identifier)
else:
self.data_updater = None
if self.policy_store_type == PolicyStoreTypes.OPA and inline_opa_enabled:
rehydration_callbacks = [
functools.partial(self.policy_updater.update_policy, force_full_update=True),
]
if self.data_updater:
rehydration_callbacks.append(
functools.partial(self.data_updater.get_base_policy_data, data_fetch_reason="policy store rehydration")
)
self.opa_runner = OpaRunner.setup_opa_runner(options=inline_opa_options, rehydration_callbacks=rehydration_callbacks)
else:
self.opa_runner = False
custom_ssl_context = get_custom_ssl_context()
if opal_common_config.CLIENT_SELF_SIGNED_CERTIFICATES_ALLOWED and custom_ssl_context is not None:
logger.warning("OPAL client is configured to trust self-signed certificates")
if verifier is not None:
self.verifier = verifier
else:
self.verifier = JWTVerifier(
public_key=opal_common_config.AUTH_PUBLIC_KEY,
algorithm=opal_common_config.AUTH_JWT_ALGORITHM,
audience=opal_common_config.AUTH_JWT_AUDIENCE,
issuer=opal_common_config.AUTH_JWT_ISSUER,
)
if not self.verifier.enabled:
logger.info("API authentication disabled (public encryption key was not provided)")
self.app: FastAPI = self._init_fast_api_app()
def _init_fast_api_app(self):
app = FastAPI(
title="OPAL Client",
description="OPAL is an administration layer for Open Policy Agent (OPA), detecting changes" + \
" to both policy and data and pushing live updates to your agents. The opal client is" + \
" deployed alongside a policy-store (e.g: OPA), keeping it up-to-date, by connecting to" + \
" an opal-server and subscribing to pub/sub updates for policy and policy data changes.",
version="0.1.0"
)
configure_middleware(app)
self._configure_api_routes(app)
self._configure_lifecycle_callbacks(app)
return app
def _configure_api_routes(self, app: FastAPI):
authenticator = JWTAuthenticator(self.verifier)
policy_router = init_policy_router(policy_updater=self.policy_updater)
data_router = init_data_router(data_updater=self.data_updater)
policy_store_router = init_policy_store_router(authenticator)
callbacks_router = init_callbacks_api(authenticator, self._callbacks_register)
app.include_router(policy_router, tags=["Policy Updater"])
app.include_router(data_router, tags=["Data Updater"])
app.include_router(policy_store_router, tags=["Policy Store"])
app.include_router(callbacks_router, tags=["Callbacks"])
@app.get("/healthcheck", include_in_schema=False)
@app.get("/", include_in_schema=False)
def healthcheck():
return {"status": "ok"}
return app
def _configure_lifecycle_callbacks(self, app: FastAPI):
@app.on_event("startup")
async def startup_event():
asyncio.create_task(self.start_client_background_tasks())
@app.on_event("shutdown")
async def shutdown_event():
await self.stop_client_background_tasks()
return app
async def start_client_background_tasks(self):
if self.opa_runner:
self.opa_runner.register_opa_initial_start_callbacks([self.launch_policy_store_dependent_tasks])
async with self.opa_runner:
await self.opa_runner.wait_until_done()
else:
await self.launch_policy_store_dependent_tasks()
async def stop_client_background_tasks(self):
logger.info("stopping background tasks...")
if self.opa_runner:
await self.opa_runner.stop()
logger.info("trying to shutdown DataUpdater and PolicyUpdater gracefully...")
tasks: List[asyncio.Task] = []
if self.data_updater:
tasks.append(asyncio.create_task(self.data_updater.stop()))
if self.policy_updater:
tasks.append(asyncio.create_task(self.policy_updater.stop()))
try:
await asyncio.gather(*tasks)
except Exception:
logger.exception("exception while shutting down updaters")
async def launch_policy_store_dependent_tasks(self):
try:
await self.maybe_init_healthcheck_policy()
except Exception:
logger.critical("healthcheck policy enabled but could not be initialized!")
self._trigger_shutdown()
return
try:
for task in asyncio.as_completed([self.launch_policy_updater(), self.launch_data_updater()]):
await task
except websockets.exceptions.InvalidStatusCode as err:
logger.error("Failed to launch background task -- {err}", err=repr(err))
self._trigger_shutdown()
async def maybe_init_healthcheck_policy(self):
if not opal_client_config.OPA_HEALTH_CHECK_POLICY_ENABLED:
return
healthcheck_policy_relpath = opal_client_config.OPA_HEALTH_CHECK_POLICY_PATH
here = os.path.abspath(os.path.dirname(__file__))
healthcheck_policy_path = os.path.join(here, healthcheck_policy_relpath)
if not os.path.exists(healthcheck_policy_path):
logger.error("Critical: OPA health-check policy is enabled, but cannot find policy at {path}", path=healthcheck_policy_path)
raise ValueError("OPA health check policy not found!")
try:
healthcheck_policy_code = open(healthcheck_policy_path, 'r').read()
except IOError as err:
logger.error("Critical: Cannot read healthcheck policy: {err}", err=repr(err))
raise
try:
await self.policy_store.init_healthcheck_policy(
policy_id=healthcheck_policy_relpath,
policy_code=healthcheck_policy_code,
data_updater_enabled=opal_client_config.DATA_UPDATER_ENABLED
)
except aiohttp.ClientError as err:
logger.error("Failed to connect to OPA agent while init healthcheck policy -- {err}", err=repr(err))
raise
def _trigger_shutdown(self):
logger.info("triggering shutdown with SIGTERM...")
os.kill(os.getpid(), signal.SIGTERM)
async def launch_policy_updater(self):
if self.policy_updater:
async with self.policy_updater:
await self.policy_updater.wait_until_done()
async def launch_data_updater(self):
if self.data_updater:
async with self.data_updater:
await self.data_updater.wait_until_done() | true | true |
f73a9a422d3c323f5b13da425abee834a51ec3e8 | 441 | py | Python | reader/opinions/opinion.py | nicolay-r/RuSentRel | 4fc0df1580d3da21f0be1e832e403652f73caed1 | [
"MIT"
] | 5 | 2019-01-28T10:31:07.000Z | 2021-09-08T06:25:30.000Z | reader/opinions/opinion.py | nicolay-r/RuSentRel | 4fc0df1580d3da21f0be1e832e403652f73caed1 | [
"MIT"
] | null | null | null | reader/opinions/opinion.py | nicolay-r/RuSentRel | 4fc0df1580d3da21f0be1e832e403652f73caed1 | [
"MIT"
] | 1 | 2019-08-18T18:30:56.000Z | 2019-08-18T18:30:56.000Z | from ..common.opinions.opinion import Opinion
class RuSentRelOpinion(Opinion):
def __init__(self, value_source, value_target, sentiment):
assert(',' not in value_source)
assert(',' not in value_target)
super(RuSentRelOpinion, self).__init__(source_value=value_source,
target_value=value_target,
sentiment=sentiment)
| 36.75 | 73 | 0.587302 | from ..common.opinions.opinion import Opinion
class RuSentRelOpinion(Opinion):
def __init__(self, value_source, value_target, sentiment):
assert(',' not in value_source)
assert(',' not in value_target)
super(RuSentRelOpinion, self).__init__(source_value=value_source,
target_value=value_target,
sentiment=sentiment)
| true | true |
f73a9acf63d52b11022bc82165e556cf59c8bf4d | 3,768 | py | Python | pyshard/app/app.py | la9ran9e/pyshard | 99a345e01c9e949720c345ed142cae9c7d2839f7 | [
"MIT"
] | 6 | 2019-07-11T14:08:41.000Z | 2021-04-11T03:33:10.000Z | pyshard/app/app.py | la9ran9e/pyshard | 99a345e01c9e949720c345ed142cae9c7d2839f7 | [
"MIT"
] | 2 | 2019-07-19T20:07:32.000Z | 2022-02-12T10:17:52.000Z | pyshard/app/app.py | la9ran9e/pyshard | 99a345e01c9e949720c345ed142cae9c7d2839f7 | [
"MIT"
] | 1 | 2022-02-12T10:11:16.000Z | 2022-02-12T10:11:16.000Z | import abc
from typing import Union
from ..master.master import Master, _Shards
from ..master.client import MasterClient
from ..shard.client import ShardClient
from ..core.client import ClientError
from ..core.typing import Key, Doc, Hash
class AbstractResult(abc.ABC):
@abc.abstractmethod
def result(self) -> Union[int, Doc]: ...
@abc.abstractmethod
def hash(self) -> Hash: ...
@abc.abstractmethod
def __iter__(self): ...
class PyshardABC(abc.ABC):
@abc.abstractmethod
def write(self, index, key: Key, doc: Doc) -> AbstractResult: ...
@abc.abstractmethod
def read(self, index, key: Key) -> AbstractResult: ...
@abc.abstractmethod
def pop(self, index, key: Key) -> AbstractResult: ...
@abc.abstractmethod
def remove(self, index, key: Key) -> AbstractResult: ...
@abc.abstractmethod
def create_index(self, index): ...
def _map_shards(bootstrap_client, **kwargs):
shard_map = {}
map_ = bootstrap_client.get_map()
for bin, addr in map_.items():
shard_map[float(bin)] = ShardClient(*addr, **kwargs)
return _Shards(shard_map)
class Result(AbstractResult):
def __init__(self, result, hash_):
self._result = result
self._hash = hash_
@property
def result(self):
return self._result
@property
def hash(self):
return self._hash
def __iter__(self):
yield from [self.result, self.hash]
class Pyshard(PyshardABC):
def __init__(self, bootstrap_server, buffer_size=1024, master_class=Master,
**master_args):
self._bootstrap_client = MasterClient(*bootstrap_server, buffer_size=buffer_size)
shards = _map_shards(self._bootstrap_client) # TODO: add ShardClient kwargs
self._master = master_class(shards=shards, **master_args)
def write(self, index, key, doc) -> Result:
hash_, shard = self._master.get_shard(index, key)
try:
offset = shard.write(index, key, hash_, doc)
except ClientError as err:
# log warning: err
res = 0
else:
res = offset
return Result(res, hash_)
def has(self, index, key) -> Result:
hash_, shard = self._master.get_shard(index, key)
return Result(shard.has(index, key), hash_)
def read(self, index, key) -> Result:
hash_, shard = self._master.get_shard(index, key)
try:
doc = shard.read(index, key)
except ClientError as err:
# log warning: err
res = None
else:
res = doc
return Result(res, hash_)
def pop(self, index, key) -> Result:
hash_, shard = self._master.get_shard(index, key)
try:
doc = shard.pop(index, key)
except ClientError as err:
# log warning: err
res = None
else:
res = doc
return Result(res, hash_)
def remove(self, index, key) -> Result:
hash_, shard = self._master.get_shard(index, key)
try:
offset = shard.remove(index, key)
except ClientError as err:
# log warning: err
res = 0
else:
res = offset
return Result(res, hash_)
def create_index(self, index):
self._master.create_index(index)
def drop_index(self, index):
self._master.drop_index(index)
def keys(self, index):
for shard in self._master.shards:
for key in shard.keys(index):
yield key
def close(self):
self._bootstrap_client.close()
self._master.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| 27.107914 | 89 | 0.603769 | import abc
from typing import Union
from ..master.master import Master, _Shards
from ..master.client import MasterClient
from ..shard.client import ShardClient
from ..core.client import ClientError
from ..core.typing import Key, Doc, Hash
class AbstractResult(abc.ABC):
@abc.abstractmethod
def result(self) -> Union[int, Doc]: ...
@abc.abstractmethod
def hash(self) -> Hash: ...
@abc.abstractmethod
def __iter__(self): ...
class PyshardABC(abc.ABC):
@abc.abstractmethod
def write(self, index, key: Key, doc: Doc) -> AbstractResult: ...
@abc.abstractmethod
def read(self, index, key: Key) -> AbstractResult: ...
@abc.abstractmethod
def pop(self, index, key: Key) -> AbstractResult: ...
@abc.abstractmethod
def remove(self, index, key: Key) -> AbstractResult: ...
@abc.abstractmethod
def create_index(self, index): ...
def _map_shards(bootstrap_client, **kwargs):
shard_map = {}
map_ = bootstrap_client.get_map()
for bin, addr in map_.items():
shard_map[float(bin)] = ShardClient(*addr, **kwargs)
return _Shards(shard_map)
class Result(AbstractResult):
def __init__(self, result, hash_):
self._result = result
self._hash = hash_
@property
def result(self):
return self._result
@property
def hash(self):
return self._hash
def __iter__(self):
yield from [self.result, self.hash]
class Pyshard(PyshardABC):
def __init__(self, bootstrap_server, buffer_size=1024, master_class=Master,
**master_args):
self._bootstrap_client = MasterClient(*bootstrap_server, buffer_size=buffer_size)
shards = _map_shards(self._bootstrap_client)
self._master = master_class(shards=shards, **master_args)
def write(self, index, key, doc) -> Result:
hash_, shard = self._master.get_shard(index, key)
try:
offset = shard.write(index, key, hash_, doc)
except ClientError as err:
res = 0
else:
res = offset
return Result(res, hash_)
def has(self, index, key) -> Result:
hash_, shard = self._master.get_shard(index, key)
return Result(shard.has(index, key), hash_)
def read(self, index, key) -> Result:
hash_, shard = self._master.get_shard(index, key)
try:
doc = shard.read(index, key)
except ClientError as err:
res = None
else:
res = doc
return Result(res, hash_)
def pop(self, index, key) -> Result:
hash_, shard = self._master.get_shard(index, key)
try:
doc = shard.pop(index, key)
except ClientError as err:
res = None
else:
res = doc
return Result(res, hash_)
def remove(self, index, key) -> Result:
hash_, shard = self._master.get_shard(index, key)
try:
offset = shard.remove(index, key)
except ClientError as err:
res = 0
else:
res = offset
return Result(res, hash_)
def create_index(self, index):
self._master.create_index(index)
def drop_index(self, index):
self._master.drop_index(index)
def keys(self, index):
for shard in self._master.shards:
for key in shard.keys(index):
yield key
def close(self):
self._bootstrap_client.close()
self._master.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| true | true |
f73a9baafa9bf7b4797f518cdcfb94577b0fdb4d | 1,856 | py | Python | top_articles/cron_articles.py | Ramesh7128/hacker-news-clone | f41cca2e2d39b7a610b6261f6f1af6ff24f0e466 | [
"MIT"
] | 1 | 2019-05-28T13:24:28.000Z | 2019-05-28T13:24:28.000Z | top_articles/cron_articles.py | Ramesh7128/hacker-news-clone | f41cca2e2d39b7a610b6261f6f1af6ff24f0e466 | [
"MIT"
] | 2 | 2020-06-05T19:48:32.000Z | 2021-06-10T21:08:21.000Z | top_articles/cron_articles.py | Ramesh7128/hacker-news-clone | f41cca2e2d39b7a610b6261f6f1af6ff24f0e466 | [
"MIT"
] | 1 | 2019-03-19T18:55:29.000Z | 2019-03-19T18:55:29.000Z |
import asyncio
import aiohttp
import requests
from top_articles.models import Story
from django.core.exceptions import ObjectDoesNotExist
def check_db_story_ids(articlesID_list):
new_articleID_list = []
for id in articlesID_list:
try:
Story.objects.get(id=id)
except ObjectDoesNotExist:
new_articleID_list.append(id)
return new_articleID_list
def get_article_urls(url):
"""
Fetch all ids of top trending articles
args: None
return:None
"""
articlesID_list = requests.get(
url).json()
url_list = []
print("article length",len(articlesID_list))
newarticlesID_list = check_db_story_ids(articlesID_list)
for id in newarticlesID_list:
url ="https://hacker-news.firebaseio.com/v0/item/%s.json?print=pretty" % id
url_list.append(url)
return url_list, articlesID_list, newarticlesID_list
async def fetch_url(session, url):
async with session.get(url, timeout=60 * 60) as response:
return await response.json()
async def fetch_all_urls(session, urls, loop):
results = await asyncio.gather(*[fetch_url(session, url) for url in urls],
return_exceptions=True)
return results
def fetch_articles(urls):
if len(urls) > 1:
loop = asyncio.get_event_loop()
connector = aiohttp.TCPConnector(limit=100)
with aiohttp.ClientSession(loop=loop, connector=connector) as session:
articles = loop.run_until_complete(fetch_all_urls(session, urls, loop))
raw_result = articles
return raw_result
else:
return None
def main(url):
urls_list, articlesID_list, newarticlesID_list = get_article_urls(url)
print(urls_list, articlesID_list, newarticlesID_list)
result_dict = fetch_articles(urls_list)
return result_dict, articlesID_list, newarticlesID_list
| 31.457627 | 83 | 0.709591 |
import asyncio
import aiohttp
import requests
from top_articles.models import Story
from django.core.exceptions import ObjectDoesNotExist
def check_db_story_ids(articlesID_list):
new_articleID_list = []
for id in articlesID_list:
try:
Story.objects.get(id=id)
except ObjectDoesNotExist:
new_articleID_list.append(id)
return new_articleID_list
def get_article_urls(url):
articlesID_list = requests.get(
url).json()
url_list = []
print("article length",len(articlesID_list))
newarticlesID_list = check_db_story_ids(articlesID_list)
for id in newarticlesID_list:
url ="https://hacker-news.firebaseio.com/v0/item/%s.json?print=pretty" % id
url_list.append(url)
return url_list, articlesID_list, newarticlesID_list
async def fetch_url(session, url):
async with session.get(url, timeout=60 * 60) as response:
return await response.json()
async def fetch_all_urls(session, urls, loop):
results = await asyncio.gather(*[fetch_url(session, url) for url in urls],
return_exceptions=True)
return results
def fetch_articles(urls):
if len(urls) > 1:
loop = asyncio.get_event_loop()
connector = aiohttp.TCPConnector(limit=100)
with aiohttp.ClientSession(loop=loop, connector=connector) as session:
articles = loop.run_until_complete(fetch_all_urls(session, urls, loop))
raw_result = articles
return raw_result
else:
return None
def main(url):
urls_list, articlesID_list, newarticlesID_list = get_article_urls(url)
print(urls_list, articlesID_list, newarticlesID_list)
result_dict = fetch_articles(urls_list)
return result_dict, articlesID_list, newarticlesID_list
| true | true |
f73a9bc09926655605ead8c6dbff3fb134d9c7f1 | 5,748 | py | Python | utils/utils.py | JacopoBugini/SpotMask | 0be6c35283b89d5bbddcdb2b65a67a59fac4d264 | [
"MIT"
] | 1 | 2021-08-30T13:47:19.000Z | 2021-08-30T13:47:19.000Z | utils/utils.py | JacopoBugini/SpotMask | 0be6c35283b89d5bbddcdb2b65a67a59fac4d264 | [
"MIT"
] | null | null | null | utils/utils.py | JacopoBugini/SpotMask | 0be6c35283b89d5bbddcdb2b65a67a59fac4d264 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import tensorflow.keras as keras
from tensorflow.keras.preprocessing import image
import numpy as np
# -------------------------------------------------------------------
# Load models
# -------------------------------------------------------------------
# Load the trained model
mask_net = keras.models.load_model('models/facemask-correctness/mask_correctness_model.h5')
print("Model Check Mask imported correctly")
detect_net = keras.models.load_model('models/mask-detection/mask_detection_model.h5')
print("Model Detect Mask imported correctly")
print("*********************************************")
suggest_net = keras.models.load_model('models/suggestions-detection/suggestions_model.h5')
print("Model Detect Mask imported correctly")
print("*********************************************")
# -------------------------------------------------------------------
# Parameters
# -------------------------------------------------------------------
CONF_THRESHOLD = 0.5
NMS_THRESHOLD = 0.4
IMG_WIDTH = 416
IMG_HEIGHT = 416
# -------------------------------------------------------------------
# Help functions
# -------------------------------------------------------------------
# Get the names of the output layers
def get_outputs_names(net):
# Get the names of all the layers in the network
layers_names = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected
return [layers_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
def process_frame(frame, outs, conf_threshold, nms_threshold, mode):
frame_height = frame.shape[0]
frame_width = frame.shape[1]
# Scan through all the bounding boxes output from the network and keep only the ones with high confidence scores.
confidences = []
boxes = []
final_boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * frame_width)
center_y = int(detection[1] * frame_height)
width = int(detection[2] * frame_width)
height = int(detection[3] * frame_height)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with lower confidences.
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold,
nms_threshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
final_boxes.append(box)
colour_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
face_img_crop = colour_frame[top-30:top+height+30, left-30:left+width+30]
img_array = prepare_frame(face_img_crop)
output_mask, colour, mask_result = detect_mask_usage(img_array, mode)
cv2.rectangle(frame, (left, top), (left+width, top+height), colour, 3)
cv2.putText(frame, output_mask, (left, top-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, colour, 2)
return final_boxes
def prepare_frame(img, size=[150,150]):
img_reshaped = cv2.resize(img, (size[0],size[1]))
img_array = image.img_to_array(img_reshaped)
img_array = img_array.astype('float32')
img_array /= 255.0
img_array = img_array.reshape((1,) + img_array.shape)
return img_array
def detect_mask_usage(img_array, mode):
# predict mask presence: Detect Mask
mask_result = detect_net.predict_on_batch(img_array)
# Predict Mask Correctness: Mask Correctness
mask_is_proper = mask_net.predict_on_batch(img_array)
# Predict Mask Suggestions: Mask Suggestions
suggestions = suggest_net.predict_on_batch(img_array)
# Elaborate scores based on prediction values
# get mask presence results
score=np.amax(mask_result[0], axis=0)
list_scores = list(mask_result[0])
mask_detection_result_index = list_scores.index(score)
# get mask correctness results
score_2=np.amax(mask_is_proper[0], axis=0)
list_scores_2 = list(mask_is_proper[0])
correctness_result_index = list_scores_2.index(score_2)
# get mask suggestions results
score_3=np.amax(suggestions[0], axis=0)
list_scores_3 = list(suggestions[0])
suggestions_result_index = list_scores_3.index(score_3)
if mask_detection_result_index == 1:
output_mask = 'Wear a Mask!'
colour = (0,0,255)
else:
if mode == 'simple':
if correctness_result_index == 1:
output_mask = 'Good!'
colour = (0,255,0)
else:
output_mask = 'Wear it correctly!'
colour = (0,152,232)
elif mode == 'suggestions':
if suggestions_result_index == 0:
output_mask = 'Adjust on Chin!'
colour = (0,152,232)
elif suggestions_result_index == 1:
output_mask = 'Cover your Nose!'
colour = (0,152,232)
elif suggestions_result_index == 2:
output_mask = 'Cover Mouth and Nose!'
colour = (0,152,232)
elif suggestions_result_index == 3:
output_mask = 'Good!'
colour = (0,255,0)
else:
print('Mode not recongized. Please consider giving --mode "suggestions" or --mode "simple"')
return output_mask, colour, mask_result
| 34.836364 | 117 | 0.586639 | import numpy as np
import cv2
import tensorflow.keras as keras
from tensorflow.keras.preprocessing import image
import numpy as np
mask_net = keras.models.load_model('models/facemask-correctness/mask_correctness_model.h5')
print("Model Check Mask imported correctly")
detect_net = keras.models.load_model('models/mask-detection/mask_detection_model.h5')
print("Model Detect Mask imported correctly")
print("*********************************************")
suggest_net = keras.models.load_model('models/suggestions-detection/suggestions_model.h5')
print("Model Detect Mask imported correctly")
print("*********************************************")
CONF_THRESHOLD = 0.5
NMS_THRESHOLD = 0.4
IMG_WIDTH = 416
IMG_HEIGHT = 416
def get_outputs_names(net):
layers_names = net.getLayerNames()
return [layers_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
def process_frame(frame, outs, conf_threshold, nms_threshold, mode):
frame_height = frame.shape[0]
frame_width = frame.shape[1]
confidences = []
boxes = []
final_boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > conf_threshold:
center_x = int(detection[0] * frame_width)
center_y = int(detection[1] * frame_height)
width = int(detection[2] * frame_width)
height = int(detection[3] * frame_height)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold,
nms_threshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
final_boxes.append(box)
colour_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
face_img_crop = colour_frame[top-30:top+height+30, left-30:left+width+30]
img_array = prepare_frame(face_img_crop)
output_mask, colour, mask_result = detect_mask_usage(img_array, mode)
cv2.rectangle(frame, (left, top), (left+width, top+height), colour, 3)
cv2.putText(frame, output_mask, (left, top-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, colour, 2)
return final_boxes
def prepare_frame(img, size=[150,150]):
img_reshaped = cv2.resize(img, (size[0],size[1]))
img_array = image.img_to_array(img_reshaped)
img_array = img_array.astype('float32')
img_array /= 255.0
img_array = img_array.reshape((1,) + img_array.shape)
return img_array
def detect_mask_usage(img_array, mode):
mask_result = detect_net.predict_on_batch(img_array)
mask_is_proper = mask_net.predict_on_batch(img_array)
suggestions = suggest_net.predict_on_batch(img_array)
score=np.amax(mask_result[0], axis=0)
list_scores = list(mask_result[0])
mask_detection_result_index = list_scores.index(score)
score_2=np.amax(mask_is_proper[0], axis=0)
list_scores_2 = list(mask_is_proper[0])
correctness_result_index = list_scores_2.index(score_2)
score_3=np.amax(suggestions[0], axis=0)
list_scores_3 = list(suggestions[0])
suggestions_result_index = list_scores_3.index(score_3)
if mask_detection_result_index == 1:
output_mask = 'Wear a Mask!'
colour = (0,0,255)
else:
if mode == 'simple':
if correctness_result_index == 1:
output_mask = 'Good!'
colour = (0,255,0)
else:
output_mask = 'Wear it correctly!'
colour = (0,152,232)
elif mode == 'suggestions':
if suggestions_result_index == 0:
output_mask = 'Adjust on Chin!'
colour = (0,152,232)
elif suggestions_result_index == 1:
output_mask = 'Cover your Nose!'
colour = (0,152,232)
elif suggestions_result_index == 2:
output_mask = 'Cover Mouth and Nose!'
colour = (0,152,232)
elif suggestions_result_index == 3:
output_mask = 'Good!'
colour = (0,255,0)
else:
print('Mode not recongized. Please consider giving --mode "suggestions" or --mode "simple"')
return output_mask, colour, mask_result
| true | true |
f73a9d6f13d293b5a4eaf1b31cd6470abcde107a | 1,280 | py | Python | dane/utils.py | CLARIAH/DANE-util | 8a3edec69be18ac3bdee476b65059409af05c1bb | [
"Apache-2.0"
] | null | null | null | dane/utils.py | CLARIAH/DANE-util | 8a3edec69be18ac3bdee476b65059409af05c1bb | [
"Apache-2.0"
] | 1 | 2019-12-11T19:46:20.000Z | 2019-12-11T21:30:38.000Z | dane/utils.py | CLARIAH/DANE-util | 8a3edec69be18ac3bdee476b65059409af05c1bb | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-present, Netherlands Institute for Sound and Vision (Nanne van Noord)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import json
import sys
import subprocess
import dane
def get_git_revision():
try:
return subprocess.check_output(['git',
'rev-parse', '--short', 'HEAD'], stderr=subprocess.STDOUT).decode('ascii').strip()
except:
return "NO-REV"
def get_git_remote():
return subprocess.check_output(['git',
'config', '--get', 'remote.origin.url']).decode('ascii').strip()
def cwd_is_git():
try:
subprocess.check_output(['git', 'branch'], stderr=subprocess.DEVNULL)
return True
except:
return False
| 33.684211 | 94 | 0.657031 | true | true | |
f73a9d79dcbad3b1c1df4232ee70738ec1694d7c | 2,125 | py | Python | optuna/visualization/_utils.py | srijan-deepsource/optuna | 2a83adf1e5104a4cde2f8f275788dc1aaf246097 | [
"MIT"
] | 2 | 2021-02-28T10:35:23.000Z | 2021-06-16T10:01:44.000Z | optuna/visualization/_utils.py | srijan-deepsource/optuna | 2a83adf1e5104a4cde2f8f275788dc1aaf246097 | [
"MIT"
] | 12 | 2021-03-28T06:50:16.000Z | 2022-03-07T15:07:23.000Z | optuna/visualization/_utils.py | crcrpar/optuna | d25c7ee8e103575207978ec09a14ad9a4fefa53d | [
"MIT"
] | 1 | 2021-03-17T04:28:42.000Z | 2021-03-17T04:28:42.000Z | from typing import Callable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
import warnings
from optuna.distributions import CategoricalDistribution
from optuna.distributions import LogUniformDistribution
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.visualization import _plotly_imports
__all__ = ["is_available"]
def is_available() -> bool:
"""Returns whether visualization with plotly is available or not.
.. note::
:mod:`~optuna.visualization` module depends on plotly version 4.0.0 or higher. If a
supported version of plotly isn't installed in your environment, this function will return
:obj:`False`. In such case, please execute ``$ pip install -U plotly>=4.0.0`` to install
plotly.
Returns:
:obj:`True` if visualization with plotly is available, :obj:`False` otherwise.
"""
return _plotly_imports._imports.is_successful()
def _check_plot_args(
study: Union[Study, Sequence[Study]],
target: Optional[Callable[[FrozenTrial], float]],
target_name: str,
) -> None:
studies: Sequence[Study]
if isinstance(study, Study):
studies = [study]
else:
studies = study
if target is None and any(study._is_multi_objective() for study in studies):
raise ValueError(
"If the `study` is being used for multi-objective optimization, "
"please specify the `target`."
)
if target is not None and target_name == "Objective Value":
warnings.warn(
"`target` is specified, but `target_name` is the default value, 'Objective Value'."
)
def _is_log_scale(trials: List[FrozenTrial], param: str) -> bool:
return any(
isinstance(t.distributions[param], LogUniformDistribution)
for t in trials
if param in t.params
)
def _is_categorical(trials: List[FrozenTrial], param: str) -> bool:
return any(
isinstance(t.distributions[param], CategoricalDistribution)
for t in trials
if param in t.params
)
| 28.333333 | 98 | 0.686588 | from typing import Callable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
import warnings
from optuna.distributions import CategoricalDistribution
from optuna.distributions import LogUniformDistribution
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.visualization import _plotly_imports
__all__ = ["is_available"]
def is_available() -> bool:
return _plotly_imports._imports.is_successful()
def _check_plot_args(
study: Union[Study, Sequence[Study]],
target: Optional[Callable[[FrozenTrial], float]],
target_name: str,
) -> None:
studies: Sequence[Study]
if isinstance(study, Study):
studies = [study]
else:
studies = study
if target is None and any(study._is_multi_objective() for study in studies):
raise ValueError(
"If the `study` is being used for multi-objective optimization, "
"please specify the `target`."
)
if target is not None and target_name == "Objective Value":
warnings.warn(
"`target` is specified, but `target_name` is the default value, 'Objective Value'."
)
def _is_log_scale(trials: List[FrozenTrial], param: str) -> bool:
return any(
isinstance(t.distributions[param], LogUniformDistribution)
for t in trials
if param in t.params
)
def _is_categorical(trials: List[FrozenTrial], param: str) -> bool:
return any(
isinstance(t.distributions[param], CategoricalDistribution)
for t in trials
if param in t.params
)
| true | true |
f73a9f7139a49cf163c274e062f1e08230a93421 | 2,353 | py | Python | tests/product/tests/test_process_kubelet_rules.py | jeniawhite/cloudbeat | 5306ef6f5750b57c8a523fd76283b22da80a140f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-03-07T09:20:47.000Z | 2022-03-07T09:20:47.000Z | tests/product/tests/test_process_kubelet_rules.py | jeniawhite/cloudbeat | 5306ef6f5750b57c8a523fd76283b22da80a140f | [
"ECL-2.0",
"Apache-2.0"
] | 25 | 2022-02-22T15:16:43.000Z | 2022-03-31T15:15:56.000Z | tests/product/tests/test_process_kubelet_rules.py | jeniawhite/cloudbeat | 5306ef6f5750b57c8a523fd76283b22da80a140f | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2022-03-02T15:19:28.000Z | 2022-03-29T12:45:34.000Z | """
Kubernetes CIS rules verification.
This module verifies correctness of retrieved findings by manipulating audit and remediation actions
"""
from datetime import datetime
import pytest
import time
from commonlib.utils import get_evaluation
from product.tests.tests.process.process_test_cases import *
@pytest.mark.rules
@pytest.mark.parametrize(
("rule_tag", "dictionary", "resource", "expected"),
kubelet_rules,
)
def test_process_kubelet(config_node_pre_test,
rule_tag,
dictionary,
resource,
expected):
"""
This data driven test verifies rules and findings return by cloudbeat agent.
In order to add new cases @pytest.mark.parameterize section shall be updated.
Setup and teardown actions are defined in data method.
This test creates cloudbeat agent instance, changes node resources (modes, users, groups) and verifies,
that cloudbeat returns correct finding.
@param rule_tag: Name of rule to be verified.
@param dictionary: Set and Unset dictionary
@param resource: Full path to resource / file
@param expected: Result to be found in finding evaluation field.
@return: None - Test Pass / Fail result is generated.
"""
k8s_client, api_client, cloudbeat_agent = config_node_pre_test
if not "edit_config_file" in dir(api_client):
pytest.skip("skipping process rules run in non-containerized api_client")
# Currently, single node is used, in the future may be extended for all nodes.
node = k8s_client.get_cluster_nodes()[0]
pods = k8s_client.get_agent_pod_instances(agent_name=cloudbeat_agent.name, namespace=cloudbeat_agent.namespace)
api_client.edit_config_file(container_name=node.metadata.name,
dictionary=dictionary,
resource=resource)
# Wait for updated file fetch
# TODO: Implement a more optimal way of waiting
time.sleep(60)
evaluation = get_evaluation(
k8s=k8s_client,
timeout=cloudbeat_agent.findings_timeout,
pod_name=pods[0].metadata.name,
namespace=cloudbeat_agent.namespace,
rule_tag=rule_tag,
exec_timestamp=datetime.utcnow()
)
assert evaluation == expected, f"Rule {rule_tag} verification failed."
| 37.349206 | 115 | 0.697408 | from datetime import datetime
import pytest
import time
from commonlib.utils import get_evaluation
from product.tests.tests.process.process_test_cases import *
@pytest.mark.rules
@pytest.mark.parametrize(
("rule_tag", "dictionary", "resource", "expected"),
kubelet_rules,
)
def test_process_kubelet(config_node_pre_test,
rule_tag,
dictionary,
resource,
expected):
k8s_client, api_client, cloudbeat_agent = config_node_pre_test
if not "edit_config_file" in dir(api_client):
pytest.skip("skipping process rules run in non-containerized api_client")
node = k8s_client.get_cluster_nodes()[0]
pods = k8s_client.get_agent_pod_instances(agent_name=cloudbeat_agent.name, namespace=cloudbeat_agent.namespace)
api_client.edit_config_file(container_name=node.metadata.name,
dictionary=dictionary,
resource=resource)
time.sleep(60)
evaluation = get_evaluation(
k8s=k8s_client,
timeout=cloudbeat_agent.findings_timeout,
pod_name=pods[0].metadata.name,
namespace=cloudbeat_agent.namespace,
rule_tag=rule_tag,
exec_timestamp=datetime.utcnow()
)
assert evaluation == expected, f"Rule {rule_tag} verification failed."
| true | true |
f73aa07486f160e58c64c36fd743339e78454a80 | 888 | py | Python | setup.py | mjoblin/neotiles | f7370aefb74d4d57692d0e8a302c8c95f817a61a | [
"MIT"
] | 1 | 2021-04-25T19:27:12.000Z | 2021-04-25T19:27:12.000Z | setup.py | mjoblin/neotiles | f7370aefb74d4d57692d0e8a302c8c95f817a61a | [
"MIT"
] | 1 | 2016-12-27T00:35:51.000Z | 2017-01-02T06:30:04.000Z | setup.py | mjoblin/neotiles | f7370aefb74d4d57692d0e8a302c8c95f817a61a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
packages = [package for package in find_packages()
if package.startswith('neotiles')]
version = '0.4.0'
install_requires = [
'wrapt',
]
setup_requires = [
'pytest-runner'
]
tests_require = [
'pytest',
'pytest-cov',
'pytest-sugar'
]
extras_require = {
'dev': [
'sphinx',
'flake8',
] + tests_require,
}
setup(
name='neotiles',
version=version,
description='Treat a neopixel or RGB matrix as a collection of separate tiles',
author='Mike Joblin',
author_email='mike@tastymoss.com',
url='https://github.com/mjoblin/neotiles',
packages=packages,
install_requires=install_requires,
extras_require=extras_require,
setup_requires=setup_requires,
tests_require=tests_require,
license='MIT',
classifiers=[
]
)
| 18.122449 | 83 | 0.653153 |
from setuptools import setup, find_packages
packages = [package for package in find_packages()
if package.startswith('neotiles')]
version = '0.4.0'
install_requires = [
'wrapt',
]
setup_requires = [
'pytest-runner'
]
tests_require = [
'pytest',
'pytest-cov',
'pytest-sugar'
]
extras_require = {
'dev': [
'sphinx',
'flake8',
] + tests_require,
}
setup(
name='neotiles',
version=version,
description='Treat a neopixel or RGB matrix as a collection of separate tiles',
author='Mike Joblin',
author_email='mike@tastymoss.com',
url='https://github.com/mjoblin/neotiles',
packages=packages,
install_requires=install_requires,
extras_require=extras_require,
setup_requires=setup_requires,
tests_require=tests_require,
license='MIT',
classifiers=[
]
)
| true | true |
f73aa12f6985d1f2e9876af55a40726426258f6e | 15,109 | py | Python | src/application.py | hiitsmeme/BookOrganizer | 666f2568a5bb89b6c6c012d4af7fd63a1f662bca | [
"MIT"
] | 1 | 2021-09-10T19:43:10.000Z | 2021-09-10T19:43:10.000Z | src/application.py | hiitsmeme/BookOrganizer | 666f2568a5bb89b6c6c012d4af7fd63a1f662bca | [
"MIT"
] | 1 | 2021-09-10T19:44:06.000Z | 2021-09-10T19:44:06.000Z | src/application.py | hiitsmeme/BookOrganizer | 666f2568a5bb89b6c6c012d4af7fd63a1f662bca | [
"MIT"
] | 1 | 2021-10-02T13:37:51.000Z | 2021-10-02T13:37:51.000Z | from flask import Flask, flash, render_template, request, session, redirect, url_for
import flask_login
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.security import check_password_hash, generate_password_hash
from functools import wraps
#------Configs------#
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"] = True
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///./src/data.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
MySession = sessionmaker()
Base = declarative_base()
engine = create_engine('sqlite:///./src/data.db')
Base.metadata.create_all(bind=engine)
MySession.configure(bind=engine)
session = MySession()
Session(app)
#------------------#
#--------Routes---------#
@app.route("/")
def index():
if flask_login.current_user.is_authenticated:
return render_template("index.html",
username = flask_login.current_user.username,
total_books = flask_login.current_user.total_books,
total_pages = flask_login.current_user.total_pages)
else:
return redirect("/login")
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
else:
name = request.form.get("name").strip()
password = request.form.get("password")
confirmation = request.form.get("confirmation")
if len(name) == 0 or len(password) == 0 or len(confirmation) == 0:
return render_template("apology.html", message="Fill in all the fields")
if password != confirmation:
return render_template("apology.html", message="Passwords don't match")
if User.findUser("username", name) != None:
return render_template("apology.html", message="Username taken")
user = User(name, password)
flask_login.login_user(user)
return redirect("/")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "GET":
return render_template("login.html")
else:
name = request.form.get("name").strip()
password = request.form.get("password")
if len(name) == 0 or len(password) == 0:
return render_template("apology.html", message="Fill in all the fields")
user = User.checkUser(name, password)
if user == False:
return render_template("apology.html", message="Password or username wrong. Are you already registered?")
else:
flask_login.login_user(user)
return redirect("/")
@app.route("/logout")
@flask_login.login_required
def logout():
flask_login.logout_user()
return redirect("/")
@app.route("/add", methods=["GET", "POST"])
@flask_login.login_required
def add():
if request.method == 'GET':
return render_template("add.html")
else:
user_id = flask_login.current_user.id
title = request.form.get('title').strip()
author = request.form.get('author').strip()
pages = request.form.get('pages').strip()
rating = request.form.get('rating')
month = request.form.get('month')
year = request.form.get('year').strip()
if len(title) == 0 or len(author) == 0 or len(pages) == 0 or len(rating) == 0 or len(month) == 0 or len(year) == 0:
return render_template("apology_home.html", message="Fill in all the fields")
Book(user_id, title, author, pages, rating, month, year)
return redirect("/")
@app.route("/library", methods=["GET", "POST"])
@flask_login.login_required
def library():
if request.method == 'GET':
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
user_id = flask_login.current_user.id
books = sess.query(Book).filter_by(user_id=user_id).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
if len(books) == 0:
return render_template("apology_home.html", message="You haven't added any books yet")
return render_template("library.html", books=books)
else:
rating_f = request.form.get("rating_filter")
month_f = request.form.get("month_filter")
year_f = request.form.get("year_filter")
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
# only rating filter entered
if rating_f != 'None':
if month_f != 'None':
if len(year_f) > 0:
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).filter_by(year=year_f).filter_by(month=month_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).filter_by(month=month_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
if len(year_f) > 0:
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).filter_by(year=year_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
if month_f != 'None':
if len(year_f) > 0:
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(month=month_f).filter_by(year=year_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(month=month_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
if len(year_f) > 0:
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(year=year_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
return redirect("/library")
def rating_f(rating_f):
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).order_by(Book.year.desc()).order_by(Book.month.desc())
return books
def month_f(month_f):
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(month=month_f).order_by(Book.year.desc()).order_by(Book.month.desc())
return books
def year_f(year_f):
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(year=year_f).order_by(Book.year.desc()).order_by(Book.month.desc())
return books
@app.route("/search", methods=["GET", "POST"])
@flask_login.login_required
def search():
if request.method == "GET":
return render_template("search.html")
else:
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
user_id = flask_login.current_user.id
books = sess.query(Book).filter_by(user_id=user_id).all()
title = request.form.get("title").strip()
author = request.form.get("author").strip()
# only title entered
if len(author) == 0 and len(title) > 0:
books = sess.query(Book).filter_by(user_id=user_id).filter_by(title=title).all()
if len(books) == 0:
return render_template("apology_home.html", message="No match found")
else:
return render_template("library.html", books=books)
# only author entered
if len(title) == 0 and len(author) > 0:
books = sess.query(Book).filter_by(user_id=user_id).filter_by(author=author).all()
if len(books) == 0:
return render_template("apology_home.html", message="No match found")
else:
return render_template("library.html", books=books)
# both entered
if len(title) > 0 and len(author) > 0:
books = sess.query(Book).filter_by(user_id=user_id).filter_by(author=author).filter_by(title=title).all()
if len(books) == 0:
return render_template("apology_home.html", message="No match found")
else:
return render_template("library.html", books=books)
#both empty
if len(title) == 0 and len(author) == 0:
return render_template("apology_home.html", message="You have to enter something")
@app.route("/remove/<id>")
@flask_login.login_required
def remove(id):
book = Book.find_by_id(id)
book.removeBook()
return redirect("/library")
#-----------------#
#------Login Manager------#
@login_manager.user_loader
def load_user(user_id):
return User.findUser("id", int(user_id))
def unauthorized():
return redirect("/")
login_manager.unauthorized_handler(unauthorized)
#-------------------------#
#-------User class------#
class User(flask_login.UserMixin, Base):
__tablename__ = 'User'
id = Column(Integer, primary_key=True)
username = Column(String, unique=True, nullable=False)
passwordhash = Column(String, nullable=False)
total_books = Column(Integer, nullable=False, default=0)
total_pages = Column(Integer, nullable=False, default=0)
def __init__(self, username, password, addToDatabase=True):
self.username = username
self.passwordhash = generate_password_hash(password)
if addToDatabase:
User.addUser(self)
def addUser(self):
session.add(self)
session.commit()
def removeUser(self):
session.delete(self)
session.commit()
def findUser(Type, Value):
Type = str.lower(Type)
if (Type == "username"):
with MySession() as sess:
user = sess.query(User).filter_by(username = Value).first()
if (Type == "id"):
with MySession() as sess:
user = sess.query(User).filter_by(id = Value).first()
return user
def checkUser(username, password):
user = User.findUser("username", username)
if user == None:
return False
if check_password_hash(user.passwordhash, password):
return user
else:
return False
def updateBookCount(self, add=True):
username = flask_login.current_user.username
if not add:
session.query(User).filter(User.username == username).update({
User.total_books: User.total_books - 1
}, synchronize_session=False)
else:
session.query(User).filter(User.username == username).update({
User.total_books: User.total_books + 1
}, synchronize_session=False)
session.commit()
def updatePageCount(self, pages, add=True):
username = flask_login.current_user.username
if not add:
session.query(User).filter(User.username == username).update({
User.total_pages: User.total_pages - pages
}, synchronize_session=False)
else:
session.query(User).filter(User.username == username).update({
User.total_pages: User.total_pages + pages
}, synchronize_session=False)
session.commit()
def currentUser(self):
return [self.username, self.total_books, self.total_pages]
#----------------------------#
#--------Book Class----------#
class Book(flask_login.UserMixin, Base):
__tablename__ = 'Books'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('User.id'))
title = Column(String, nullable= False)
author = Column(String, nullable=False)
pages = Column(Integer, nullable=False)
rating = Column(Integer, nullable=False)
month = Column(Integer, nullable=False)
year = Column(Integer, nullable=False)
def __init__(self, user_id, title, author, pages, rating, month, year, addToDatabase=True):
self.user_id = user_id
self.title = title
self.author = author
self.pages = int(pages)
self.rating = int(rating)
self.month = int(month)
self.year = int(year)
if addToDatabase:
Book.addBook(self)
def addBook(self):
flask_login.current_user.updateBookCount()
flask_login.current_user.updatePageCount(self.pages)
session.add(self)
session.commit()
def removeBook(self):
flask_login.current_user.updateBookCount(add=False)
flask_login.current_user.updatePageCount(self.pages, add=False)
session.delete(self)
session.commit()
def findBook(title, author):
for i in session.query(Book).filter_by(title=title).all():
for x in session.query(Book).filter_by(author=author).all():
if x == i:
return x
return None
def find_by_id(id):
return session.query(Book).filter_by(id=id).first()
def currentBook(self):
dic = {
'id' : self.id,
'user_id' : self.user_id,
'title' : self.title,
'author' : self.author,
'pages' : self.pages,
'rating' : self.rating,
'month' : self.month,
'year' : self.year
}
return dic
def allBooks(user_id):
book_list = []
for book in session.query(Book).filter_by(user_id=user_id).all():
book_list.append(book.currentBook())
return book_list
| 38.445293 | 227 | 0.630419 | from flask import Flask, flash, render_template, request, session, redirect, url_for
import flask_login
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.security import check_password_hash, generate_password_hash
from functools import wraps
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"] = True
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///./src/data.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
MySession = sessionmaker()
Base = declarative_base()
engine = create_engine('sqlite:///./src/data.db')
Base.metadata.create_all(bind=engine)
MySession.configure(bind=engine)
session = MySession()
Session(app)
@app.route("/")
def index():
if flask_login.current_user.is_authenticated:
return render_template("index.html",
username = flask_login.current_user.username,
total_books = flask_login.current_user.total_books,
total_pages = flask_login.current_user.total_pages)
else:
return redirect("/login")
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
else:
name = request.form.get("name").strip()
password = request.form.get("password")
confirmation = request.form.get("confirmation")
if len(name) == 0 or len(password) == 0 or len(confirmation) == 0:
return render_template("apology.html", message="Fill in all the fields")
if password != confirmation:
return render_template("apology.html", message="Passwords don't match")
if User.findUser("username", name) != None:
return render_template("apology.html", message="Username taken")
user = User(name, password)
flask_login.login_user(user)
return redirect("/")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "GET":
return render_template("login.html")
else:
name = request.form.get("name").strip()
password = request.form.get("password")
if len(name) == 0 or len(password) == 0:
return render_template("apology.html", message="Fill in all the fields")
user = User.checkUser(name, password)
if user == False:
return render_template("apology.html", message="Password or username wrong. Are you already registered?")
else:
flask_login.login_user(user)
return redirect("/")
@app.route("/logout")
@flask_login.login_required
def logout():
flask_login.logout_user()
return redirect("/")
@app.route("/add", methods=["GET", "POST"])
@flask_login.login_required
def add():
if request.method == 'GET':
return render_template("add.html")
else:
user_id = flask_login.current_user.id
title = request.form.get('title').strip()
author = request.form.get('author').strip()
pages = request.form.get('pages').strip()
rating = request.form.get('rating')
month = request.form.get('month')
year = request.form.get('year').strip()
if len(title) == 0 or len(author) == 0 or len(pages) == 0 or len(rating) == 0 or len(month) == 0 or len(year) == 0:
return render_template("apology_home.html", message="Fill in all the fields")
Book(user_id, title, author, pages, rating, month, year)
return redirect("/")
@app.route("/library", methods=["GET", "POST"])
@flask_login.login_required
def library():
if request.method == 'GET':
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
user_id = flask_login.current_user.id
books = sess.query(Book).filter_by(user_id=user_id).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
if len(books) == 0:
return render_template("apology_home.html", message="You haven't added any books yet")
return render_template("library.html", books=books)
else:
rating_f = request.form.get("rating_filter")
month_f = request.form.get("month_filter")
year_f = request.form.get("year_filter")
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
if rating_f != 'None':
if month_f != 'None':
if len(year_f) > 0:
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).filter_by(year=year_f).filter_by(month=month_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).filter_by(month=month_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
if len(year_f) > 0:
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).filter_by(year=year_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
if month_f != 'None':
if len(year_f) > 0:
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(month=month_f).filter_by(year=year_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(month=month_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
if len(year_f) > 0:
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(year=year_f).order_by(Book.year.desc()).order_by(Book.month.desc()).all()
return render_template("library.html", books=books)
return redirect("/library")
def rating_f(rating_f):
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(rating=rating_f).order_by(Book.year.desc()).order_by(Book.month.desc())
return books
def month_f(month_f):
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(month=month_f).order_by(Book.year.desc()).order_by(Book.month.desc())
return books
def year_f(year_f):
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
books = sess.query(Book).filter_by(user_id=flask_login.current_user.id).filter_by(year=year_f).order_by(Book.year.desc()).order_by(Book.month.desc())
return books
@app.route("/search", methods=["GET", "POST"])
@flask_login.login_required
def search():
if request.method == "GET":
return render_template("search.html")
else:
current_session = sessionmaker()
current_session.configure(bind=engine)
sess = current_session()
user_id = flask_login.current_user.id
books = sess.query(Book).filter_by(user_id=user_id).all()
title = request.form.get("title").strip()
author = request.form.get("author").strip()
if len(author) == 0 and len(title) > 0:
books = sess.query(Book).filter_by(user_id=user_id).filter_by(title=title).all()
if len(books) == 0:
return render_template("apology_home.html", message="No match found")
else:
return render_template("library.html", books=books)
if len(title) == 0 and len(author) > 0:
books = sess.query(Book).filter_by(user_id=user_id).filter_by(author=author).all()
if len(books) == 0:
return render_template("apology_home.html", message="No match found")
else:
return render_template("library.html", books=books)
if len(title) > 0 and len(author) > 0:
books = sess.query(Book).filter_by(user_id=user_id).filter_by(author=author).filter_by(title=title).all()
if len(books) == 0:
return render_template("apology_home.html", message="No match found")
else:
return render_template("library.html", books=books)
if len(title) == 0 and len(author) == 0:
return render_template("apology_home.html", message="You have to enter something")
@app.route("/remove/<id>")
@flask_login.login_required
def remove(id):
book = Book.find_by_id(id)
book.removeBook()
return redirect("/library")
@login_manager.user_loader
def load_user(user_id):
return User.findUser("id", int(user_id))
def unauthorized():
return redirect("/")
login_manager.unauthorized_handler(unauthorized)
class User(flask_login.UserMixin, Base):
__tablename__ = 'User'
id = Column(Integer, primary_key=True)
username = Column(String, unique=True, nullable=False)
passwordhash = Column(String, nullable=False)
total_books = Column(Integer, nullable=False, default=0)
total_pages = Column(Integer, nullable=False, default=0)
def __init__(self, username, password, addToDatabase=True):
self.username = username
self.passwordhash = generate_password_hash(password)
if addToDatabase:
User.addUser(self)
def addUser(self):
session.add(self)
session.commit()
def removeUser(self):
session.delete(self)
session.commit()
def findUser(Type, Value):
Type = str.lower(Type)
if (Type == "username"):
with MySession() as sess:
user = sess.query(User).filter_by(username = Value).first()
if (Type == "id"):
with MySession() as sess:
user = sess.query(User).filter_by(id = Value).first()
return user
def checkUser(username, password):
user = User.findUser("username", username)
if user == None:
return False
if check_password_hash(user.passwordhash, password):
return user
else:
return False
def updateBookCount(self, add=True):
username = flask_login.current_user.username
if not add:
session.query(User).filter(User.username == username).update({
User.total_books: User.total_books - 1
}, synchronize_session=False)
else:
session.query(User).filter(User.username == username).update({
User.total_books: User.total_books + 1
}, synchronize_session=False)
session.commit()
def updatePageCount(self, pages, add=True):
username = flask_login.current_user.username
if not add:
session.query(User).filter(User.username == username).update({
User.total_pages: User.total_pages - pages
}, synchronize_session=False)
else:
session.query(User).filter(User.username == username).update({
User.total_pages: User.total_pages + pages
}, synchronize_session=False)
session.commit()
def currentUser(self):
return [self.username, self.total_books, self.total_pages]
class Book(flask_login.UserMixin, Base):
__tablename__ = 'Books'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('User.id'))
title = Column(String, nullable= False)
author = Column(String, nullable=False)
pages = Column(Integer, nullable=False)
rating = Column(Integer, nullable=False)
month = Column(Integer, nullable=False)
year = Column(Integer, nullable=False)
def __init__(self, user_id, title, author, pages, rating, month, year, addToDatabase=True):
self.user_id = user_id
self.title = title
self.author = author
self.pages = int(pages)
self.rating = int(rating)
self.month = int(month)
self.year = int(year)
if addToDatabase:
Book.addBook(self)
def addBook(self):
flask_login.current_user.updateBookCount()
flask_login.current_user.updatePageCount(self.pages)
session.add(self)
session.commit()
def removeBook(self):
flask_login.current_user.updateBookCount(add=False)
flask_login.current_user.updatePageCount(self.pages, add=False)
session.delete(self)
session.commit()
def findBook(title, author):
for i in session.query(Book).filter_by(title=title).all():
for x in session.query(Book).filter_by(author=author).all():
if x == i:
return x
return None
def find_by_id(id):
return session.query(Book).filter_by(id=id).first()
def currentBook(self):
dic = {
'id' : self.id,
'user_id' : self.user_id,
'title' : self.title,
'author' : self.author,
'pages' : self.pages,
'rating' : self.rating,
'month' : self.month,
'year' : self.year
}
return dic
def allBooks(user_id):
book_list = []
for book in session.query(Book).filter_by(user_id=user_id).all():
book_list.append(book.currentBook())
return book_list
| true | true |
f73aa4743bbf6e7484d7968e7f251af60f29aa80 | 2,326 | py | Python | shared/CastInPipeline.py | mssalvador/NextProject | b9e223f8f1de803fd3865c3f2148a417f88556da | [
"Apache-2.0"
] | 1 | 2017-10-10T07:00:46.000Z | 2017-10-10T07:00:46.000Z | shared/CastInPipeline.py | mssalvador/NextProject | b9e223f8f1de803fd3865c3f2148a417f88556da | [
"Apache-2.0"
] | null | null | null | shared/CastInPipeline.py | mssalvador/NextProject | b9e223f8f1de803fd3865c3f2148a417f88556da | [
"Apache-2.0"
] | 2 | 2018-11-19T09:07:49.000Z | 2018-11-28T12:54:25.000Z | '''
Created on Oct 13, 2017
@author: svanhmic
'''
from pyspark.ml.param.shared import HasInputCol, HasOutputCol
from pyspark.ml import Transformer
from pyspark.sql import functions as F
from pyspark import keyword_only
from pyspark.ml.param import Params, Param, TypeConverters
class CastInPipeline(Transformer, HasInputCol):
'''
This inherrent-class converts a given vector column in a data frame to a ml-dense vector.
Can be used in a pipeline method
'''
applicable_casts = ['intstring',
'intfloat',
'intdouble',
'doublefloat',
'floatdouble',
'stringdouble',
'stringint'
]
castTo = Param(
parent=Params._dummy(),
name='castTo',
doc='Indicates the what we want to cast to.',
typeConverter=TypeConverters.toString
)
@keyword_only
def __init__(self, inputCol=None, castTo=None,):
if castTo not in ['string', 'int', 'float', 'double', 'boolean']:
raise TypeError('new type must be a valid type!')
super(CastInPipeline, self).__init__()
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, castTo=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setCastTo(self, value):
"""
Sets the casted value to
:return:
"""
if value in ['string', 'int', 'float', 'double', 'boolean']:
return self._set(castTo=value)
else:
raise TypeError('new type must be a valid type!')
def getCastTo(self):
return self.getOrDefault(self.castTo)
def _transform(self, dataset):
column_types = dict(dataset.dtypes)
if str(column_types[self.getInputCol()])+str(self.getCastTo()) not in self.applicable_casts:
raise Exception(
'The desired conversion from {} to {}, cannot be applied, sorry!'
.format(column_types[self.getInputCol()], self.getCastTo())
)
return dataset.withColumn(
self.getInputCol(),
F.col(self.getInputCol()).cast(self.getCastTo())) | 32.305556 | 100 | 0.588134 | from pyspark.ml.param.shared import HasInputCol, HasOutputCol
from pyspark.ml import Transformer
from pyspark.sql import functions as F
from pyspark import keyword_only
from pyspark.ml.param import Params, Param, TypeConverters
class CastInPipeline(Transformer, HasInputCol):
applicable_casts = ['intstring',
'intfloat',
'intdouble',
'doublefloat',
'floatdouble',
'stringdouble',
'stringint'
]
castTo = Param(
parent=Params._dummy(),
name='castTo',
doc='Indicates the what we want to cast to.',
typeConverter=TypeConverters.toString
)
@keyword_only
def __init__(self, inputCol=None, castTo=None,):
if castTo not in ['string', 'int', 'float', 'double', 'boolean']:
raise TypeError('new type must be a valid type!')
super(CastInPipeline, self).__init__()
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, castTo=None):
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setCastTo(self, value):
if value in ['string', 'int', 'float', 'double', 'boolean']:
return self._set(castTo=value)
else:
raise TypeError('new type must be a valid type!')
def getCastTo(self):
return self.getOrDefault(self.castTo)
def _transform(self, dataset):
column_types = dict(dataset.dtypes)
if str(column_types[self.getInputCol()])+str(self.getCastTo()) not in self.applicable_casts:
raise Exception(
'The desired conversion from {} to {}, cannot be applied, sorry!'
.format(column_types[self.getInputCol()], self.getCastTo())
)
return dataset.withColumn(
self.getInputCol(),
F.col(self.getInputCol()).cast(self.getCastTo())) | true | true |
f73aa4a1ef8ce18a908f665a0856c28d81ed879a | 5,971 | py | Python | python/ray/tune/logger.py | visatish/ray | dc76e51a60652b3210c93f81df6dafcf461d4431 | [
"Apache-2.0"
] | 29 | 2019-05-18T12:18:34.000Z | 2022-03-30T01:46:48.000Z | python/ray/tune/logger.py | visatish/ray | dc76e51a60652b3210c93f81df6dafcf461d4431 | [
"Apache-2.0"
] | 8 | 2019-08-15T05:42:10.000Z | 2021-05-21T09:41:15.000Z | python/ray/tune/logger.py | visatish/ray | dc76e51a60652b3210c93f81df6dafcf461d4431 | [
"Apache-2.0"
] | 8 | 2019-07-15T22:36:20.000Z | 2020-08-09T07:03:26.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import json
import logging
import numpy as np
import os
import yaml
from ray.tune.log_sync import get_syncer
from ray.tune.result import NODE_IP, TRAINING_ITERATION, TIME_TOTAL_S, \
TIMESTEPS_TOTAL
logger = logging.getLogger(__name__)
try:
import tensorflow as tf
except ImportError:
tf = None
logger.warning("Couldn't import TensorFlow - "
"disabling TensorBoard logging.")
class Logger(object):
"""Logging interface for ray.tune; specialized implementations follow.
By default, the UnifiedLogger implementation is used which logs results in
multiple formats (TensorBoard, rllab/viskit, plain json) at once.
"""
def __init__(self, config, logdir, upload_uri=None):
self.config = config
self.logdir = logdir
self.uri = upload_uri
self._init()
def _init(self):
pass
def on_result(self, result):
"""Given a result, appends it to the existing log."""
raise NotImplementedError
def close(self):
"""Releases all resources used by this logger."""
pass
def flush(self):
"""Flushes all disk writes to storage."""
pass
class UnifiedLogger(Logger):
"""Unified result logger for TensorBoard, rllab/viskit, plain json.
This class also periodically syncs output to the given upload uri."""
def _init(self):
self._loggers = []
for cls in [_JsonLogger, _TFLogger, _VisKitLogger]:
if cls is _TFLogger and tf is None:
logger.info("TF not installed - "
"cannot log with {}...".format(cls))
continue
self._loggers.append(cls(self.config, self.logdir, self.uri))
self._log_syncer = get_syncer(self.logdir, self.uri)
def on_result(self, result):
for logger in self._loggers:
logger.on_result(result)
self._log_syncer.set_worker_ip(result.get(NODE_IP))
self._log_syncer.sync_if_needed()
def close(self):
for logger in self._loggers:
logger.close()
self._log_syncer.sync_now(force=True)
def flush(self):
for logger in self._loggers:
logger.flush()
self._log_syncer.sync_now(force=True)
self._log_syncer.wait()
class NoopLogger(Logger):
def on_result(self, result):
pass
class _JsonLogger(Logger):
def _init(self):
config_out = os.path.join(self.logdir, "params.json")
with open(config_out, "w") as f:
json.dump(self.config, f, sort_keys=True, cls=_SafeFallbackEncoder)
local_file = os.path.join(self.logdir, "result.json")
self.local_out = open(local_file, "w")
def on_result(self, result):
json.dump(result, self, cls=_SafeFallbackEncoder)
self.write("\n")
def write(self, b):
self.local_out.write(b)
self.local_out.flush()
def close(self):
self.local_out.close()
def to_tf_values(result, path):
values = []
for attr, value in result.items():
if value is not None:
if type(value) in [int, float, np.float32, np.float64, np.int32]:
values.append(
tf.Summary.Value(
tag="/".join(path + [attr]), simple_value=value))
elif type(value) is dict:
values.extend(to_tf_values(value, path + [attr]))
return values
class _TFLogger(Logger):
def _init(self):
self._file_writer = tf.summary.FileWriter(self.logdir)
def on_result(self, result):
tmp = result.copy()
for k in [
"config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION
]:
del tmp[k] # not useful to tf log these
values = to_tf_values(tmp, ["ray", "tune"])
train_stats = tf.Summary(value=values)
t = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
self._file_writer.add_summary(train_stats, t)
iteration_value = to_tf_values({
"training_iteration": result[TRAINING_ITERATION]
}, ["ray", "tune"])
iteration_stats = tf.Summary(value=iteration_value)
self._file_writer.add_summary(iteration_stats, t)
self._file_writer.flush()
def flush(self):
self._file_writer.flush()
def close(self):
self._file_writer.close()
class _VisKitLogger(Logger):
def _init(self):
"""CSV outputted with Headers as first set of results."""
# Note that we assume params.json was already created by JsonLogger
self._file = open(os.path.join(self.logdir, "progress.csv"), "w")
self._csv_out = None
def on_result(self, result):
if self._csv_out is None:
self._csv_out = csv.DictWriter(self._file, result.keys())
self._csv_out.writeheader()
self._csv_out.writerow(result.copy())
def close(self):
self._file.close()
class _SafeFallbackEncoder(json.JSONEncoder):
def __init__(self, nan_str="null", **kwargs):
super(_SafeFallbackEncoder, self).__init__(**kwargs)
self.nan_str = nan_str
def default(self, value):
try:
if np.isnan(value):
return None
if np.issubdtype(value, float):
return float(value)
if np.issubdtype(value, int):
return int(value)
except Exception:
return str(value) # give up, just stringify it (ok for logs)
def pretty_print(result):
result = result.copy()
result.update(config=None) # drop config from pretty print
out = {}
for k, v in result.items():
if v is not None:
out[k] = v
cleaned = json.dumps(out, cls=_SafeFallbackEncoder)
return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)
| 29.706468 | 79 | 0.624519 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import json
import logging
import numpy as np
import os
import yaml
from ray.tune.log_sync import get_syncer
from ray.tune.result import NODE_IP, TRAINING_ITERATION, TIME_TOTAL_S, \
TIMESTEPS_TOTAL
logger = logging.getLogger(__name__)
try:
import tensorflow as tf
except ImportError:
tf = None
logger.warning("Couldn't import TensorFlow - "
"disabling TensorBoard logging.")
class Logger(object):
def __init__(self, config, logdir, upload_uri=None):
self.config = config
self.logdir = logdir
self.uri = upload_uri
self._init()
def _init(self):
pass
def on_result(self, result):
raise NotImplementedError
def close(self):
pass
def flush(self):
pass
class UnifiedLogger(Logger):
def _init(self):
self._loggers = []
for cls in [_JsonLogger, _TFLogger, _VisKitLogger]:
if cls is _TFLogger and tf is None:
logger.info("TF not installed - "
"cannot log with {}...".format(cls))
continue
self._loggers.append(cls(self.config, self.logdir, self.uri))
self._log_syncer = get_syncer(self.logdir, self.uri)
def on_result(self, result):
for logger in self._loggers:
logger.on_result(result)
self._log_syncer.set_worker_ip(result.get(NODE_IP))
self._log_syncer.sync_if_needed()
def close(self):
for logger in self._loggers:
logger.close()
self._log_syncer.sync_now(force=True)
def flush(self):
for logger in self._loggers:
logger.flush()
self._log_syncer.sync_now(force=True)
self._log_syncer.wait()
class NoopLogger(Logger):
def on_result(self, result):
pass
class _JsonLogger(Logger):
def _init(self):
config_out = os.path.join(self.logdir, "params.json")
with open(config_out, "w") as f:
json.dump(self.config, f, sort_keys=True, cls=_SafeFallbackEncoder)
local_file = os.path.join(self.logdir, "result.json")
self.local_out = open(local_file, "w")
def on_result(self, result):
json.dump(result, self, cls=_SafeFallbackEncoder)
self.write("\n")
def write(self, b):
self.local_out.write(b)
self.local_out.flush()
def close(self):
self.local_out.close()
def to_tf_values(result, path):
values = []
for attr, value in result.items():
if value is not None:
if type(value) in [int, float, np.float32, np.float64, np.int32]:
values.append(
tf.Summary.Value(
tag="/".join(path + [attr]), simple_value=value))
elif type(value) is dict:
values.extend(to_tf_values(value, path + [attr]))
return values
class _TFLogger(Logger):
def _init(self):
self._file_writer = tf.summary.FileWriter(self.logdir)
def on_result(self, result):
tmp = result.copy()
for k in [
"config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION
]:
del tmp[k] # not useful to tf log these
values = to_tf_values(tmp, ["ray", "tune"])
train_stats = tf.Summary(value=values)
t = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
self._file_writer.add_summary(train_stats, t)
iteration_value = to_tf_values({
"training_iteration": result[TRAINING_ITERATION]
}, ["ray", "tune"])
iteration_stats = tf.Summary(value=iteration_value)
self._file_writer.add_summary(iteration_stats, t)
self._file_writer.flush()
def flush(self):
self._file_writer.flush()
def close(self):
self._file_writer.close()
class _VisKitLogger(Logger):
def _init(self):
# Note that we assume params.json was already created by JsonLogger
self._file = open(os.path.join(self.logdir, "progress.csv"), "w")
self._csv_out = None
def on_result(self, result):
if self._csv_out is None:
self._csv_out = csv.DictWriter(self._file, result.keys())
self._csv_out.writeheader()
self._csv_out.writerow(result.copy())
def close(self):
self._file.close()
class _SafeFallbackEncoder(json.JSONEncoder):
def __init__(self, nan_str="null", **kwargs):
super(_SafeFallbackEncoder, self).__init__(**kwargs)
self.nan_str = nan_str
def default(self, value):
try:
if np.isnan(value):
return None
if np.issubdtype(value, float):
return float(value)
if np.issubdtype(value, int):
return int(value)
except Exception:
return str(value) # give up, just stringify it (ok for logs)
def pretty_print(result):
result = result.copy()
result.update(config=None) # drop config from pretty print
out = {}
for k, v in result.items():
if v is not None:
out[k] = v
cleaned = json.dumps(out, cls=_SafeFallbackEncoder)
return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.